diff --git a/.chainlit/config.toml b/.chainlit/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..e747f277718a5f357faafcc002e8a0b8a4069400 --- /dev/null +++ b/.chainlit/config.toml @@ -0,0 +1,84 @@ +[project] +# Whether to enable telemetry (default: true). No personal data is collected. +enable_telemetry = true + +# List of environment variables to be provided by each user to use the app. +user_env = [] + +# Duration (in seconds) during which the session is saved when the connection is lost +session_timeout = 3600 + +# Enable third parties caching (e.g LangChain cache) +cache = false + +# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) +# follow_symlink = false + +[features] +# Show the prompt playground +prompt_playground = true + +# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript) +unsafe_allow_html = false + +# Process and display mathematical expressions. This can clash with "$" characters in messages. +latex = false + +# Authorize users to upload files with messages +multi_modal = true + +# Allows user to use speech to text +[features.speech_to_text] + enabled = false + # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string + # language = "en-US" + +[UI] +# Name of the app and chatbot. +name = "Chatbot" + +# Show the readme while the conversation is empty. +show_readme_as_default = true + +# Description of the app and chatbot. This is used for HTML tags. +# description = "" + +# Large size content are by default collapsed for a cleaner ui +default_collapse_content = true + +# The default value for the expand messages settings. +default_expand_messages = false + +# Hide the chain of thought details from the user in the UI. +hide_cot = false + +# Link to your github repo. This will add a github button in the UI's header. +# github = "" + +# Specify a CSS file that can be used to customize the user interface. +# The CSS file can be served from the public directory or via an external link. +# custom_css = "/public/test.css" + +# Override default MUI light theme. (Check theme.ts) +[UI.theme.light] + #background = "#FAFAFA" + #paper = "#FFFFFF" + + [UI.theme.light.primary] + #main = "#F80061" + #dark = "#980039" + #light = "#FFE7EB" + +# Override default MUI dark theme. (Check theme.ts) +[UI.theme.dark] + #background = "#FAFAFA" + #paper = "#FFFFFF" + + [UI.theme.dark.primary] + #main = "#F80061" + #dark = "#980039" + #light = "#FFE7EB" + + +[meta] +generated_by = "0.7.700" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e7e50308919a10de1020d8892d14226f77848711 --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +venv/ +__pycache__/* +.env +download-hf-model.ipynb +temp +start_qdrant_services.sh +requirements copy.txt +Dockerfile copy +.venv/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..feb834eaecc4486dbeacd7fbeae310c7d1c6015c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.11.9 +RUN useradd -m -u 1000 user +USER user +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:$PATH +WORKDIR $HOME/app +COPY --chown=user . $HOME/app +COPY ./requirements.txt ~/app/requirements.txt +RUN pip install -r requirements.txt +COPY . . +CMD ["chainlit", "run", "app.py", "--port", "7860"] \ No newline at end of file diff --git a/README.md b/README.md index c1d21bc5f418afb7fbdb9704c97d2442d44d74e4..f8562e7744fe6b1fce6492bafe3dbb8a37ab661d 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,73 @@ ---- -title: SafeGuardAI -emoji: šŸ  -colorFrom: yellow -colorTo: yellow -sdk: docker -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# Midterm + +## Background and Context +The CEO and corporate, with permission of the board, have assembled a crack data science and engineering team to take advantage of RAG, agents, and all of the latest open-source technologies emerging in the industry. This time it's for real though. This time, the company is aiming squarely at some Return On Investment - some ROI - on its research and development dollars. + +## The Problem +You are an AI Solutions Engineer. You've worked directly with internal stakeholders to identify a problem: `people are concerned about the implications of AI, and no one seems to understand the right way to think about building ethical and useful AI applications for enterprises.` + +This is a big problem and one that is rapidly changing. Several people you interviewed said that *they could benefit from a chatbot that helped them understand how the AI industry is evolving, especially as it relates to politics.* Many are interested due to the current election cycle, but others feel that some of the best guidance is likely to come from the government. + +## Task 1: Dealing with the Data +You identify the following important documents that, if used for context, you believe will help people understand whatā€™s happening now: +1. 2022: [Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People](https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf) (PDF) +2. 2024: [National Institute of Standards and Technology (NIST) Artificial Intelligent Risk Management Framework](https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf) (PDF) +Your boss, the SVP of Technology, green-lighted this project to drive the adoption of AI throughout the enterprise. It will be a nice showpiece for the upcoming conference and the big AI initiative announcement the CEO is planning. + +> Task 1: Review the two PDFs and decide how best to chunk up the data with a single strategy to optimally answer the variety of questions you expect to receive from people. +> +> *Hint: Create a list of potential questions that people are likely to ask!* + +āœ…Deliverables: +1. Describe the default chunking strategy that you will use. +2. Articulate a chunking strategy that you would also like to test out. +3. Describe how and why you made these decisions + +## Task 2: Building a Quick End-to-End Prototype +**You are an AI Systems Engineer.** The SVP of Technology has tasked you with spinning up a quick RAG prototype for answering questions that internal stakeholders have about AI, using the data provided in Task 1. + +> Task 2: Build an end-to-end RAG application using an industry-standard open-source stack and your choice of commercial off-the-shelf models + +āœ…Deliverables: +1. Build a prototype and deploy to a Hugging Face Space, and create a short (< 2 min) loom video demonstrating some initial testing inputs and outputs. +2. How did you choose your stack, and why did you select each tool the way you did? + +## Task 3: Creating a Golden Test Data Set +**You are an AI Evaluation & Performance Engineer.** The AI Systems Engineer who built the initial RAG system has asked for your help and expertise in creating a "Golden Data Set." + +> Task 3: Generate a synthetic test data set and baseline an initial evaluation + +āœ…Deliverables: +1. Assess your pipeline using the RAGAS framework including key metrics faithfulness, answer relevancy, context precision, and context recall. Provide a table of your output results. +2. What conclusions can you draw about performance and effectiveness of your pipeline with this information? + +## Task 4: Fine-Tuning Open-Source Embeddings +**You are an Machine Learning Engineer.** The AI Evaluation and Performance Engineer has asked for your help in fine-tuning the embedding model used in their recent RAG application build. + +> Task 4: Generate synthetic fine-tuning data and complete fine-tuning of the open-source embedding model + +āœ…Deliverables: +1. Swap out your existing embedding model for the new fine-tuned version. Provide a link to your fine-tuned embedding model on the Hugging Face Hub. +2. How did you choose the embedding model for this application? + +## Task 5: Assessing Performance +**You are the AI Evaluation & Performance Engineer.** It's time to assess all options for this product. + +> Task 5: Assess the performance of 1) the fine-tuned model, and 2) the two proposed chunking strategies + +āœ…Deliverables: +1. Test the fine-tuned embedding model using the RAGAS frameworks to quantify any improvements. Provide results in a table. +2. Test the two chunking strategies using the RAGAS frameworks to quantify any improvements. Provide results in a table. +3. The AI Solutions Engineer asks you ā€œWhich one is the best to test with internal stakeholders next week, and why?ā€ + +## Task 6: Managing Your Boss and User Expectations +**You are the SVP of Technology.** Given the work done by your team so far, you're now sitting down with the AI Solutions Engineer. You have tasked the solutions engineer to test out the new application with at least 50 different internal stakeholders over the next month. +1. What is the story that you will give to the CEO to tell the whole company at the launch next month? +2. There appears to be important information not included in our build, for instance, the [270-day update](https://www.whitehouse.gov/briefing-room/statements-releases/2024/07/26/fact-sheet-biden-harris-administration-announces-new-ai-actions-and-receives-additional-major-voluntary-commitment-on-ai/) on the 2023 executive order on [Safe, Secure, and Trustworthy AI](https://www.whitehouse.gov/briefing-room/presidential-actions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-artificial-intelligence/). How might you incorporate relevant white-house briefing information into future versions? + +## Your Final Submission +Please include the following in your final submission: +1. A public link to a **written report** addressing each deliverable and answering each question. +2. A public link to any relevant **GitHub repo** +3. A public link to the **final version of your application** on Hugging Face +4. A public link to your **fine-tuned embedding model** on Hugging Face \ No newline at end of file diff --git a/Roles.md b/Roles.md new file mode 100644 index 0000000000000000000000000000000000000000..e38d2c86ed961d2c179fb28f3b19888cd2705f57 --- /dev/null +++ b/Roles.md @@ -0,0 +1,16 @@ +# Roles + + My Role - AI Solutions Engineer + +Here are the condensed roles and responsibilities of an AI Solutions Engineer: + +1. **Design and Develop AI Models**: Analyze business needs, develop, and fine-tune machine learning and AI models. +2. **System Integration**: Architect AI solutions that integrate with existing IT systems and platforms. +3. **Data Management**: Preprocess, clean, and manage data for training AI models; perform feature engineering. +4. **Deployment and Automation**: Implement CI/CD pipelines for deploying AI models and maintain automation. +5. **Monitor and Optimize Models**: Continuously evaluate, monitor, and retrain models to ensure optimal performance. +6. **Collaborate Cross-Functionally**: Work with data scientists, software engineers, and business teams for seamless integration. +7. **Ensure Scalability and Performance**: Optimize AI solutions for scalability, efficiency, and low latency. +8. **Security and Compliance**: Implement data privacy and security measures for AI solutions. +9. **Stay Updated on AI Trends**: Continuously research and apply the latest AI technologies and methodologies. +10. **User Training and Support**: Provide training and ongoing support to stakeholders using AI systems. \ No newline at end of file diff --git a/Tasks/Task 1/Task1.md b/Tasks/Task 1/Task1.md new file mode 100644 index 0000000000000000000000000000000000000000..4f88b0c01026d6438a5c2716ae63dfe2de2d3725 --- /dev/null +++ b/Tasks/Task 1/Task1.md @@ -0,0 +1,243 @@ +### Deliverable 1: Describe the Default Chunking Strategy You Will Use + +The default chunking strategy that I will use is based on the **`RecursiveCharacterTextSplitter`** method. This splitter divides text into manageable chunks while maintaining semantic coherence, ensuring that the chunks do not break in the middle of thoughts or sentences. It allows for flexible and dynamic chunking based on the nature of the document. + +#### Key Details of the Default Strategy: +- **Adaptive Chunk Sizes**: The splitter first attempts to split the text into large sections (e.g., paragraphs). If a chunk exceeds a certain length, it recursively breaks it down into smaller units (sentences), ensuring each chunk remains within the ideal size for embedding (e.g., 1,000 tokens). +- **Flexibility**: It works well for both structured and unstructured documents, making it suitable for a variety of AI-related documents like the *AI Bill of Rights* and *NIST RMF*. +- **Context Preservation**: Since it operates recursively, the splitter minimizes the risk of breaking meaningful content, preserving important relationships between concepts. + +### Deliverable 2: Articulate a Chunking Strategy You Would Also Like to Test Out + +In addition to the default strategy, I would like to test out a **Section- and Topic-Based Chunking Strategy** combined with **SemanticChunker**. This strategy would involve splitting the documents based on predefined sections or topics, allowing the chunking process to align more closely with the structure and meaning of the document. + +#### Key Details of the Alternative Strategy: +- **Section-Based Chunking**: This strategy would first divide the document into sections or sub-sections based on headers, topics, or principles (e.g., the five principles in the *AI Bill of Rights* or the different phases in *NIST RMF*). This ensures that each chunk retains a logical structure. +- **SemanticChunker Integration**: The SemanticChunker further refines chunking by considering the contentā€™s coherence, creating semantically meaningful segments rather than simply splitting based on length. This would work particularly well for documents like the *AI Bill of Rights*, where each principle is discussed with examples and cases. +- **Adaptability**: The strategy allows adaptation based on the specific document, improving retrieval for highly structured documents while maintaining the flexibility to handle less-structured ones. + +### Deliverable 3: Describe How and Why You Made These Decisions + +#### 1. **Default Chunking Strategy**: + - **Rationale**: The decision to use `RecursiveCharacterTextSplitter` as the default is driven by its versatility and efficiency. It balances chunk size and coherence without relying on predefined structures, which makes it robust across various document types, including both structured (like the AI Bill of Rights) and unstructured (user-uploaded PDFs). It is particularly useful for retrieval systems where chunk size impacts the performance of embedding models. + - **Why It Works**: This strategy allows for better handling of document diversity and ensures that chunks remain contextually rich, which is crucial for accurate retrieval in a conversational AI system. + +#### 2. **Alternative Section-Based Chunking**: + - **Rationale**: The section-based chunking strategy is more targeted toward highly structured documents. For documents like the *NIST AI RMF*, which have clear sections and subsections, breaking the text down by these categories ensures that the system can retrieve contextually related chunks for more precise answers. + - **Why Itā€™s Worth Testing**: This strategy enhances retrieval relevance by aligning chunks with specific sections and principles, making it easier to answer detailed or multi-part questions. In combination with the SemanticChunker, it provides the benefit of preserving meaning across larger contexts. + +#### 3. **Combining Performance and Coherence**: + - **Decisions**: I made these decisions to ensure that both performance and coherence are maximized. The default method is fast, flexible, and works well across a variety of documents, while the section-based strategy is designed to improve the quality of responses in documents with clearly defined structures. + - **Efficiency Consideration**: By choosing a performant embedding model and efficient chunking strategies, I aimed to balance speed and relevance in the retrieval process, ensuring that the system remains scalable and responsive. + +### Summary: +- **Default Strategy**: RecursiveCharacterTextSplitter for its adaptability across document types. +- **Test Strategy**: Section-based chunking with SemanticChunker for enhancing the accuracy of retrieval from structured documents. +- **Decision Rationale**: Both strategies were chosen to provide a balance between flexibility, coherence, and performance, ensuring that the system can effectively handle diverse document structures and retrieval needs. + + + + + +# Problem Statement + +People are concerned about the implications of AI, and no one seems to understand the right way to think about building ethical and useful AI applications for enterprises. + +# Understanding the Data + +## Blueprint for an AI Bill of rights + +The "Blueprint for an AI Bill of Rights," published by the White House Office of Science and Technology Policy in October 2022, outlines a framework to ensure that automated systems, including those powered by AI, respect civil rights, privacy, and democratic values. The document is structured around five core principles: + +Safe and Effective Systems: Automated systems should be designed with input from diverse communities and experts, undergo rigorous pre-deployment testing, and be monitored to ensure safety and effectiveness. This includes protecting users from foreseeable harm and ensuring that systems are not based on inappropriate or irrelevant data. + +Algorithmic Discrimination Protections: Automated systems must be designed and used in ways that prevent discrimination based on race, gender, religion, and other legally protected categories. This principle includes proactive testing and continuous monitoring to prevent algorithmic bias and discrimination. + +Data Privacy: Individuals should have control over how their data is collected and used, with automated systems adhering to privacy safeguards by default. This principle emphasizes informed consent, minimizing unnecessary data collection, and prohibiting the misuse of sensitive data, such as in areas of health or finance. + +Notice and Explanation: People should be aware when automated systems are affecting their rights, opportunities, or access to services, and should be provided with understandable explanations of how these systems operate and influence outcomes. + +Human Alternatives, Consideration, and Fallback: Users should have the ability to opt out of automated systems in favor of human alternatives where appropriate. There should be mechanisms for people to contest and resolve issues arising from decisions made by automated systems, especially in high-stakes areas like healthcare, education, and criminal justice. + +The framework aims to protect the public from harmful outcomes of AI while allowing for innovation, recommending transparency, accountability, and fairness across sectors that deploy automated systems. However, the Blueprint is non-binding, meaning it does not constitute enforceable U.S. government policy but instead serves as a guide for best practicesā€‹ + +## NIST AI Risk Management Framework + +The document titled **NIST AI 600-1** outlines the **Artificial Intelligence Risk Management Framework (AI RMF)**, with a specific focus on managing risks related to **Generative Artificial Intelligence (GAI)**. Published by the **National Institute of Standards and Technology (NIST)** in July 2024, this framework provides a profile for organizations to manage the risks associated with GAI, consistent with President Biden's Executive Order (EO) 14110 on "Safe, Secure, and Trustworthy AI." + +### Key aspects of the document include: + +1. **AI Risk Management Framework (AI RMF)**: This framework offers organizations a voluntary guideline to integrate trustworthiness into AI systems. It addresses the unique risks associated with GAI, such as confabulation (AI hallucinations), bias, privacy, security, and misuse for malicious activities. + +2. **Suggested Risk Management Actions**: The document provides detailed actions across various phases of AI development and deployment, such as governance, testing, monitoring, and decommissioning, to mitigate risks from GAI. + +3. **Generative AI-Specific Risks**: The document discusses risks unique to GAI, including: + - **Data privacy risks** (e.g., personal data leakage, sensitive information memorization) + - **Environmental impacts** (e.g., high energy consumption during model training) + - **Harmful content generation** (e.g., violent or misleading content) + - **Bias amplification and model homogenization** + - **Security risks**, such as prompt injection and data poisoning + +4. **Recommendations for Organizations**: It emphasizes proactive governance, transparency, human oversight, and tailored policies to manage AI risks throughout the entire lifecycle of AI systems. + +This framework aims to ensure that organizations can deploy GAI systems in a responsible and secure manner while balancing innovation with potential societal impacts. + + + +## Sample Questions from Internet + +Here is the consolidated set of real user questions regarding AI, ethics, privacy, and risk management with source URLs: + +1. **How can companies ensure AI does not violate data privacy laws?** + Users are concerned about how AI handles personal data, especially with incidents like data spillovers where information leaks unintentionally across systems. + Source: [Stanford HAI](https://hai.stanford.edu/news/privacy-ai-era-how-do-we-protect-our-personal-information), [Transcend](https://transcend.io/blog/ai-and-your-privacy-understanding-the-concerns). + +2. **What steps can organizations take to minimize bias in AI models?** + Concerns about fairness in AI applications, particularly in hiring, lending, and law enforcement. + Source: [ISC2](https://www.isc2.org/Articles/AI-Ethics-Dilemmas-in-Cybersecurity), [JDSupra](https://www.jdsupra.com/legalnews/five-ethics-questions-to-ask-about-your-5303517/). + +3. **How do we balance AI-driven cybersecurity with privacy?** + Striking a balance between enhancing security and avoiding over-collection of personal data. + Source: [ISC2](https://www.isc2.org/Articles/AI-Ethics-Dilemmas-in-Cybersecurity), [HBS Working Knowledge](https://hbswk.hbs.edu/item/navigating-consumer-data-privacy-in-an-ai-world). + +4. **What are the legal consequences if an AI system makes an unethical decision?** + Understanding liability and compliance when AI systems cause ethical or legal violations. + Source: [JDSupra](https://www.jdsupra.com/legalnews/five-ethics-questions-to-ask-about-your-5303517/), [Transcend](https://transcend.io/blog/ai-and-your-privacy-understanding-the-concerns). + +5. **How can organizations ensure transparency in AI decision-making?** + Ensuring explainability and transparency, especially in high-stakes applications like healthcare and criminal justice. + Source: [ISC2](https://www.isc2.org/Articles/AI-Ethics-Dilemmas-in-Cybersecurity), [HBS Working Knowledge](https://hbswk.hbs.edu/item/navigating-consumer-data-privacy-in-an-ai-world). + +6. **How can we design AI systems to be ethics and compliance-oriented from the start?** + Building AI systems with ethical oversight and controls from the beginning. + Source: [JDSupra](https://www.jdsupra.com/legalnews/five-ethics-questions-to-ask-about-your-5303517/). + +7. **What are the security risks posed by AI systems?** + Addressing the growing risks of security breaches and data leaks with AI technologies. + Source: [Transcend](https://transcend.io/blog/ai-and-your-privacy-understanding-the-concerns), [ISC2](https://www.isc2.org/Articles/AI-Ethics-Dilemmas-in-Cybersecurity). + +8. **How can AI's impact on job displacement be managed ethically?** + Addressing ethical concerns around job displacement due to AI automation. + Source: [ISC2](https://www.isc2.org/Articles/AI-Ethics-Dilemmas-in-Cybersecurity). + +9. **What measures should be in place to ensure AI systems are transparent and explainable?** + Ensuring that AI decisions are explainable, particularly in critical areas like healthcare and finance. + Source: [ISC2](https://www.isc2.org/Articles/AI-Ethics-Dilemmas-in-Cybersecurity), [HBS Working Knowledge](https://hbswk.hbs.edu/item/navigating-consumer-data-privacy-in-an-ai-world). + +10. **How do companies comply with different AI regulations across regions like the EU and US?** + Navigating the differences between GDPR in Europe and US privacy laws. + Source: [Transcend](https://transcend.io/blog/ai-and-your-privacy-understanding-the-concerns), [HBS Working Knowledge](https://hbswk.hbs.edu/item/navigating-consumer-data-privacy-in-an-ai-world). + + Do the organization's personnel and partners receive AI risk management training to enable them to perform their duties and responsibilities consistent with related policies, procedures, and agreements? + +Will customer data be used to train artificial intelligence, machine learning, automation, or deep learning? + +Does the organization have an AI Development and Management Policy? + +Does the organization have policies and procedures in place to define and differentiate roles and responsibilities for human-AI configurations and oversight of AI systems? + +Who is the third-party AI technology behind your product/service? + +Has the third-party AI processor been appropriately vetted for risk? If so, what certifications have they obtained? + +Does the organization implement post-deployment AI system monitoring, including mechanisms for capturing and evaluating user input and other relevant AI actors, appeal and override, decommissioning, incident response, recovery, and change management? + +Does the organization communicate incidents and errors to relevant AI actors and affected communities and follow documented processes for tracking, responding to, and recovering from incidents and errors? + +Does your company engage with generative AI/AGI tools internally or throughout your company's product line? + +If generative AI/AGI is incorporated into the product, please describe any governance policies or procedures. + +Describe the controls in place to ensure our data is transmitted securely and is logically and/or physically segmented from those of other customers. + +These links provide direct access to discussions about AI ethics, privacy, and risk management. + +## Document Structure + +- Both of these documents follows a structure which will make easier to chunk the documents but the implemenation of such section/topic based strategy is complex and timeconsuming as this needs to be dynamic based on the document uploaded. +- We could chunk the pdf in sections then sub-sections then pages and sentence/para. This will nicely break the document preserving document structure. +- There is a chance the user may uploadd a document that is not structure which means going with the assumption that the document will always be in a structured format will not work. + + +# Dealing with the data + +Considering all the above going with a generic approach to cover all uses would be sensible. We will go with usual `PyMuPDFLoader` library to load the pdf and chunk the documents using `RecursiveCharacterTextSplitter` to begin with. + +Would like to use `PyPDFium2Loader` which is very slow compared to the `PyMuPDFLoader`. IF our use case requires the populating the vector store before hand we could go with this loader. The `PyPDFium2Loader` loader took 2mins 30secs to load these two pdf. Comparing the quality of the output there is not much difference between the two. So will use `PyMuPDFLoader` + +To improve the quality of the retrival we can group the documents by similar context which provides better context retrival. + +Chunking strategy + +`RecursiveCharacterTextSplitter` + +`SemanticChunker` + + Improved Coherence: Chunks are more likely to contain complete thoughts or ideas. + Better Retrieval Relevance: By preserving context, retrieval accuracy may be enhanced. + Adaptability: The chunking method can be adjusted based on the nature of the documents and retrieval needs. + Potential for Better Understanding: LLMs or downstream tasks may perform better with more coherent text segments. + +Advanced retrieval techniques tried + +1. Context enrichment - Creates some duplicates need to investigate why for later +2. Contextual Compression - Creates better response but takes time. Will need to check if streaming helps. + +More for later. + +Experimented with above chunking strategy and found that `RecursiveCharacterTextSplitter` with contextual compression provides better results. + + +# Choise of embedding model + +The quality of generation is directly proportional to the quality of the retrieval and at the same time we wanted to choose smaller model that is performant. I choose to use the `snowflake-arctic-embed-l` embedding model as it is small with 334 Million parameter with 1024 dimension support. Currently it is at 27 rank the MTEB leader board which suggest to me that it is efficient competing with other large models. + +==== + +Your response effectively breaks down the documents and uses a clear, methodical approach to answering the task. However, to enhance the response further, consider the following improvements: + +### 1. **Aligning Chunking Strategy with Context** + - **Current Strategy**: You mention using `RecursiveCharacterTextSplitter` and `SemanticChunker`, which is a good start. + - **Improvement**: Since both documents have well-defined sections and are structured (NIST RMF includes clear sections, and AI Bill of Rights is principles-based), it would be beneficial to first chunk based on sections and subsections, while combining it with context-based chunking. Instead of focusing on one chunking method, you can adapt based on the structure of each document. + - **Dynamic Chunking**: Also, mention how the method would dynamically adapt to less-structured documents if uploaded in the future, ensuring scalability. + +### 2. **Specific Chunking Examples** + - **Blueprint for AI Bill of Rights**: + - Principles can form separate chunks (e.g., *Safe and Effective Systems*, *Algorithmic Discrimination Protections*). + - Subsections can also include further breaking down examples or cases cited under each principle. + - **NIST AI RMF**: + - As each section (such as "Suggested Actions to Manage Risks" or "GAI Risk Overview") has detailed subcategories, chunk them accordingly. + - Include how you will preserve context when chunking specific actions. + +### 3. **Incorporating Expected Questions** + - You have already listed good examples of user questions. However, for the purpose of improving retrieval: + - **Enhance Contextual Retrieval**: Suggest tailoring your vector store to group similar questions by topic, such as data privacy, bias prevention, and AI safety. This allows better retrieval of relevant chunks across both documents when users ask questions. + - **Example**: A question about "data privacy" should retrieve answers both from the *Data Privacy* section of AI Bill of Rights and the *Data Privacy Risks* section of the NIST RMF, creating a more comprehensive answer. + +### 4. **Document Summarization in the Vector Store** + - If possible, create summarizations for sections and topics within both documents and store them in your vector database. Summaries improve quick lookup without requiring a deep scan through every chunk. + +### 5. **Advanced Techniques** + - **Context Enrichment**: Mention that it needs further investigation but is a promising avenue. Focus on eliminating duplication by refining preprocessing or filtering steps when enriching. + - **Contextual Compression**: Explain how you might use this to generate concise answers that retain meaning, which could be useful for long or dense document sections. + +### 6. **Handling Duplicate Content** + - Add a comment about how duplicate information across different sections can be handled by maintaining a cache or reference of repeated content in different chunks to avoid redundancies. + +### 7. **Performance and Efficiency** + - Since `PyPDFium2Loader` is slower, clarify that you will use it only if high-quality, OCR-accurate extraction is critical, but `PyMuPDFLoader` is your preferred option for efficiency and initial loading. This could be useful for streaming applications. + +### Enhanced Structure for Response: +1. **Problem Statement** + - Continue with the problem definition, but expand on real-world implications of ethical and risk management in AI. + +2. **Understanding the Data** + - Break the two documents down clearly into sections and discuss specific strategies for how chunking can preserve meaning within these sections. + +3. **Advanced Retrieval & Chunking** + - Expand this section to include the chunking methods you've outlined, and specify the improvements you will explore (e.g., dynamic chunking, context-based grouping). + +4. **Performance Considerations** + - Detail how you will balance quality and performance based on user needs and document types. + +This would strengthen your approach, improving both the technical accuracy and user experience. \ No newline at end of file diff --git a/Tasks/Task 1/pre-processing.ipynb b/Tasks/Task 1/pre-processing.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..59b90ce3a29177a23ae3e0bec806efba60b59c6c --- /dev/null +++ b/Tasks/Task 1/pre-processing.ipynb @@ -0,0 +1,1299 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "% pip install numpy==1.26.4 \\\n", + "openai==1.44.1 \\\n", + "qdrant-client==1.11.2 \\\n", + "langchain==0.3.0 \\\n", + "langchain-text-splitters==0.3.0 \\\n", + "langchain-community==0.3.0 \\\n", + "langchain_experimental \\\n", + "langchain_qdrant \\\n", + "langchain_openai \\\n", + "pypdf==4.3.1 \\\n", + "PyMuPDF==1.24.10 \\\n", + "pymupdf4llm \\\n", + "sentence_transformers \\\n", + "langchain_huggingface " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n", + "NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n", + "SMALL_DOC = \"https://arxiv.org/pdf/1908.10084\" \n", + "documents_to_preload = [\n", + " BOR_FILE_PATH,\n", + " NIST_FILE_PATH\n", + " # SMALL_DOC\n", + "]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/lib/python3.11/site-packages/sentence_transformers/cross_encoder/CrossEncoder.py:13: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from tqdm.autonotebook import tqdm, trange\n" + ] + } + ], + "source": [ + "# Embedding model - snowflake-arctic-embed-l\n", + "from langchain_huggingface import HuggingFaceEmbeddings\n", + "\n", + "model_name = \"Snowflake/snowflake-arctic-embed-l\"\n", + "embedding_model = HuggingFaceEmbeddings(model_name=model_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from pdfloader import PDFLoaderWrapper\n", + "from langchain_experimental.text_splitter import SemanticChunker\n", + "\n", + "\n", + "pdf_loader = PDFLoaderWrapper(\n", + " documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n", + ")\n", + "documents = await pdf_loader.aload()\n", + "\n", + "text_splitter = SemanticChunker(embedding_model, buffer_size=5, breakpoint_threshold_type=\"percentile\",breakpoint_threshold_amount=90)\n", + "\n", + "chunked_docs = text_splitter.split_documents(documents)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "# Recursive splitter\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "recursive_text_splitter = RecursiveCharacterTextSplitter(\n", + " chunk_size = 1024,\n", + " chunk_overlap = 100,\n", + " length_function = len,\n", + ")\n", + "recursive_chunked_docs = recursive_text_splitter.split_documents(documents)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"QDRANT_API_URL\"] = getpass.getpass(\"Enter Your Qdrant API URL: \")\n", + "os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter Your OpenAI API Key: \")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['8dd5b1e7fd464e2a90c28a8eea8b0cb9',\n", + " '906e0c268d564dbc89c0b8398e235407',\n", + " '4b81191a4cc94fbd835dc9c942e9543a',\n", + " '25c3b7fffa8d4bc29790057fe2f4d025',\n", + " '3ad5906a8a274b56bd05e4ac39ffe459',\n", + " 'e3fa01bef57c489ca014be2e589b7ef1',\n", + " 'af5fc5121c6a438a8fc5dea454b7e92f',\n", + " '80500cf02d5748c39b1c62288459c306',\n", + " '5db6eebee14b4aafa948e4f9aa4f7aa2',\n", + " '99385298e8744643822e01525bdff89e',\n", + " 'eddc9704820d4005b7c62a5085f69454',\n", + " '4324a624f4054ae5baa7270d9f6aaa56',\n", + " '9eb24bea31a749f1b7a86ac2b186ec14',\n", + " '7e9c9763bebf40cea1833ea6ad376eeb',\n", + " 'cc8846008cac472e88eb16497c560a15',\n", + " '5af0886e387449fc89f1d0e82c32c590',\n", + " '824ae7c1c15a43c8b62713f02d91e0b5',\n", + " 'f0ef1b30251b4429ad7d902b85fafcf8',\n", + " '314a75e55d1b4c1fa46f49610d745f95',\n", + " '66828a5f9536480bbd08d94f087bc44b',\n", + " '8230b8add982486f9ac8e120a27d3aec',\n", + " 'dd1c75bb5c1441468ac8e7d4595bf0b9',\n", + " 'a9b1b1b87eeb48b78ed4cf6adddee9d2',\n", + " 'eeacab16c9d94d08a791c516e0a65f6b',\n", + " '187badb4dc064743898f5e5218114250',\n", + " '0ecc4e873fe047ce8afc33e19fe40c3f',\n", + " 'be7b81185ce140229bee6d1306120528',\n", + " 'd8a9a361dc8a4917aefc2e0a17efafad',\n", + " '7ca3b3bf947e449e8f58cd4fca12d884',\n", + " '07a8b46151e74641b97ad823ef91082f',\n", + " '421004f00dbb4a47a81c424ad5f64e39',\n", + " '53750f6e3cfa481d971d7e5a6b9f55c5',\n", + " 'e9f9524be6884599893590ad5acdb12b',\n", + " '446225894d9747e0a888b596875ac83b',\n", + " '7677215528c44061bfb018e42a13e528',\n", + " '183002bfeeaa4b75968dda61451c2f37',\n", + " '818cd888dd6b4735a602949aea2ea900',\n", + " 'c803c1690b5549a5a13578b2cd757b17',\n", + " '864bc201486e42bdb4caef6a266fa1ee',\n", + " '02cae44fcb9e4d2eb60a6da08b01a4e6',\n", + " '384e2a4f36d14359b22b0c0cdf463cd9',\n", + " '6de4fdb915164aa68e076a2d6e1913ae',\n", + " '8da0e4ecc526416ca3be4043879ea17b',\n", + " 'e63359693473494d922f996b57c65d3b',\n", + " '76313322b20b4e368f2aaefbf911df6b',\n", + " '455bced7b2544b69a5c547d998548748',\n", + " 'd1abce93130d4eb49cdc1aca8b7c9c60',\n", + " '966dde23cfa144b899b60a3659f32eb4',\n", + " '4fe01c77f1ae4b70a18c56fc6cb9679d',\n", + " '7aa14bc014fa445cbc061c47a3fe1c31',\n", + " '0aa076f900614fcfb37883057c67e6c1',\n", + " '2bae6b24f8234792b914a75712fb89fc',\n", + " '74fb5f5dfdb8413a9afaaf472a009ac2',\n", + " 'b1c95e4126e842d598eae6fbc455b82c',\n", + " '2e326893157844fe88f762b96aa46b99',\n", + " 'd27b5c85573d44658a4d338c39890629',\n", + " '1f967a5cae05418d94f9f3d07dcce74e',\n", + " '7667f8b8e5914417a68e7d41256cfc98',\n", + " '7fcd90b390494d3686e532a6528bd021',\n", + " '8c64aecd850b48b3bac216e73ebad1e0',\n", + " 'ca845f98d6b44c02b9f9edafcd75aa2e',\n", + " '571b12484be44ea6b406e6cdcd0662a5',\n", + " 'fa099bf314614075bf8bfc58110f52f1',\n", + " 'b98bfe7e8c234ed59b5d893929bac64e',\n", + " '3bc58c54665d4a4ca394ebb13debfabc',\n", + " 'a276d45eccd54056a8855ccdb5907df3',\n", + " '0f9ac79d967942d0b56b9fe200dc7846',\n", + " '07c5e3794b244e28b384dad31a2c63a2',\n", + " 'deb29326ff1e4fec934c35710d4e0dcf',\n", + " '5278dac4122044879b9dd1a7c557b7a8',\n", + " '041338e04daa4482a7d65c311ab0f3f5',\n", + " '1ed9bbc381b1423a95ef935cc16e277c',\n", + " '20bb221a9b654bbc99edc812475adc12',\n", + " '188fba73978143e8a22370774e1d31fd',\n", + " '3a65a41271a947848aba4939473d0f85',\n", + " 'c5cee035027048338a81b9bf0830cf57',\n", + " '03325dbfeb164512a565172407ae0ec3',\n", + " '4494b4f19cff440281c034ede5e675e1',\n", + " '2b55dd78b0f5461d917eeaec2a75dcb7',\n", + " 'a2aef7ef0741438fb643a4fb225f1ffd',\n", + " '3886d787efdd4d2bb3fc702ffd911db8',\n", + " '71455bc57478429e8a1269ce7332302a',\n", + " '0c2896f473c749f9888b5723ee834a5d',\n", + " '1a8507e2597049dc8287d21172dfe518',\n", + " '943e65c033774aed99116750cfbb5f5a',\n", + " '78bfa949218f48c7b365569f2c3396ff',\n", + " '6812e197478a40bf86cc363d11fc0856',\n", + " 'e9cd2bf8ad454aa7af9446e62d9d845d',\n", + " 'ca970fdebb1e4d79853090a7c73722f5',\n", + " '84400697300c4d468f5c58d09fd63d56',\n", + " 'ad1bf5c566a147e6b66f9f3502a227fd',\n", + " '0c620c86587a4ae8975a0d066eb80e97',\n", + " '1aa1ebd384774410ac011ca3e535808f',\n", + " 'f30f7dba0182402c8abce8d9b07df99b',\n", + " '73dd6c906bd14d3494dc8def54680e0f',\n", + " '632c7eafa1c048979b65c5e8ecacb98e',\n", + " 'b3edcc98879747dbbed2e8b6e19e1baf',\n", + " '4f4bef9639bb454e87cb61051e2d4c82',\n", + " 'd0c059bc4e04474f9355c87964b3b470',\n", + " '412fa6deb62546c4a092988f96ddb425',\n", + " '2283f3ef3e7141738c8966fa4333ba05',\n", + " '7ed4340667d643b193b45f7f21a238ed',\n", + " '9209830e86c54dbd9974cc737bbdbe91',\n", + " '89e28024e5f14377a6b2efa1997f370a',\n", + " '2f4b769a22b24bb49bed8917adae1f9e',\n", + " 'fcfc743d434d43b886afb80c5377e1fd',\n", + " '300f3cfca0874ee2818241856f2175df',\n", + " '55ea80ccf78f4c9cb622c1451951e723',\n", + " '4b755a511dbd46f79eb4b3bda119e79c',\n", + " '956a7f2f70854e1a82dfc542fd761492',\n", + " '190c9cd3f01a4ea894877f4ab35000bf',\n", + " '381a0acf443244c78f303b9f6b72535f',\n", + " 'd7afcec2075343b19d1320605cc41b46',\n", + " '7898f81259be4c42b44cdfd3b41aa25b',\n", + " 'd86e65ae1f8140e299ccf27583735b7a',\n", + " 'ee553712557545a0b0320adc4d563bca',\n", + " '6fb6a0b739a64b909cee096d722a4f6e',\n", + " 'be1514e798f04581af33447b86f002c8',\n", + " 'c92113c2d4344ce8a10e7a6c1d089f4e',\n", + " 'dc47dd1e29bb49768c2b88054f91c69f',\n", + " 'd72643a36bf6415795a3694f93e5c376',\n", + " '0709e83c2d974cb7aa30b17f8a5e5050',\n", + " 'f4bc420a5b4c4cdd91c4441837fbcfae',\n", + " '712e9898ff5f44eda54aebcf54931760',\n", + " '1916383ddb404a32ab833c2add8e2511',\n", + " '4a01c037505943a3be4fa183de7d5c73',\n", + " 'cb29c514ccf8416491654441749f9889',\n", + " '57cc735133754979862b8dd27ccf45d8',\n", + " '6e28a749e0ee49d8b6ba562ab268e474',\n", + " 'd915b593c4194759a4ca48304ce54b56',\n", + " '83bf4923ef4847b1b3faa0a85ce85d9c',\n", + " 'c7165907a5774d7a9cfe034328875f16',\n", + " 'fb83c16fe2ac4a0b8a2cdd3a099b0751',\n", + " 'd8d56a63f4104e1d9b06c5c8d6246d4d',\n", + " 'b92d479d4fea486980ce1133ef0d9049',\n", + " 'b264b04721c14738a6e018c3d089e3e2',\n", + " 'bf50880e770d4e2a80415e87b8f95788',\n", + " '5ca16b29007f4b919ac1f3fdf261aa10',\n", + " '30f3d89d1ae042afb3b745451e0a5fdc',\n", + " '41dd324a662a4e79935980dc8e53ab8f',\n", + " '7590bcc7d6d540b1bd92a7ce69c0e9b2',\n", + " '72973a23774d4bebb9c42dcf885ae06a',\n", + " 'ce3e692e73084116ae834e72349032a7',\n", + " '044469614deb404f8d3b1860907e0f75',\n", + " '3805bb3205c5411daf2a64a7742e59fe',\n", + " '03aa772e62b44423b75ec05c90e8687e',\n", + " '740e824d876d44c7b30599b4dcb8eb44',\n", + " 'ba27340cf2144d15bee2a5f5b7e00622',\n", + " '4c6a7847bc554fddadd0a884c26612bc',\n", + " '8a13a8d664c4453b8f71c01b28ec8dff',\n", + " '113a3db0cd0d4e0f8067c5fa074967df',\n", + " '28af19cb148f49049336aa1b52c14a98',\n", + " '795bc0bc5a9c4ff8b472e2a9c9c59dbd',\n", + " 'fb36fe1dad1c4280a7186ea5c20e64dc',\n", + " 'a50e1d30a67b4144bd8ce5ab32f1cd3a',\n", + " 'f1ccd9cc27b0414f96243f1c63a07fd5',\n", + " '3d2b64f6ceb74744b6b8374728142334',\n", + " '15f314cbb8a14f9286a814cafef76192',\n", + " 'ac540651b7a34d50b70e4c44cf25b3ca',\n", + " '6b87356c50d1404abe0a676b7f322a72',\n", + " '67902b525d014249900e54257590f7ea',\n", + " 'fc1189d79c824a74bd60dd5dc341aa2b',\n", + " 'b26845eaa60246399cef48b0a13d11b5',\n", + " '7b592acd329743a8a7a3b2569a048416',\n", + " '189e134f601441cc9f1514a778e3c820',\n", + " 'f7f1425e4c2d4e1fa9040ee85d368bf1',\n", + " '0297a5233d6f4275baf0a9957b0dc586',\n", + " '5afeb076d11841c890517fc92d0aa6f9',\n", + " '79fa6ed91f7a47b6bd764e1c8b412fa5',\n", + " 'ff4b75e4daaf4588ae69ba2f83816c15',\n", + " '84b7e45334a3477f8d8a64e3504fb620',\n", + " 'dbf780a26828491da830425df5a7a03c',\n", + " '25a6912b64f442f99f5787bef114ebff',\n", + " '00c07386007a4dc18072a431f7cf83b1',\n", + " 'f84828d74c0c446389732b8eb4d6570f',\n", + " 'a12ba2aaf84640a8816d9ce8e8a417e9',\n", + " '02223a887b2c4ec0891d45e75836b00e',\n", + " 'a62632e5379a4af5be885b4750d18650',\n", + " '5ef2d149eb314d879897648027e7e8aa',\n", + " '1a2c86d6906141b18700239300599566',\n", + " '584530895cc74af58cdb016c0ed63bc5',\n", + " 'e19a1e82a1ec4884a7c72f2996ac927f',\n", + " 'dbcd348813cc4365bf65c549333e669d',\n", + " '8bac146f886b4272bd40f51adb35c32b',\n", + " 'cae2438601594d6fb39d99d617fe6c0e',\n", + " 'e8306f6a959b4a219d096b968784c44e',\n", + " 'ebbdefa7da15403294655048c6fe3624',\n", + " '60579bd40852405b8345114456963981',\n", + " '211f66d20b5c451d91f310594b854ea0',\n", + " '6d726194ac8641a6a5f6d8ce3f192a7c',\n", + " 'c0154e0f56b049048f9bcb7f718173f2',\n", + " '081a6179661e41f69ab10b92027d161e',\n", + " '2c149c226d504053bfa94532a850efd1',\n", + " 'f833250f67bc4329922a5a7f7b7d07f7',\n", + " 'a0c1e7c49351406ca3567622b6b1e38e',\n", + " '72abd1e5dc824d6c8852f7331990b6f0',\n", + " '35c02c07ae484045a325940fcbe098fb',\n", + " '63b83f297d1d486e84ddb42c2af32900',\n", + " '17fb247ed7bc4599a8de06966e744b2a',\n", + " '371ea02f2a7e4e11b82ed0593a26a806',\n", + " '6847a89d7a944bf2bd95430c4d63def7',\n", + " 'a186a88983e64831bf42523b6522d706',\n", + " 'e3538719cffb4cb59efa815b27b4bf81',\n", + " '8ae7bdfed4e249dd98727cbb4f34259d',\n", + " 'd4f18e23e8444ffba7b13661b22ba1a9',\n", + " 'ab0aace578b2457cb10966f9a57dbfce',\n", + " '641437d62c1940d7a7d0711391802aa6',\n", + " '3f88fe5f258244d581a053a53b844bdc',\n", + " '5022a64b46dc4989b2a919193cdec7da',\n", + " 'e620e702fb604457b4724ce0f753138d',\n", + " 'ff9557d7d3c446a39829de03605a5254',\n", + " '5314c1de556d4b96af06fd3ac37cf1f4',\n", + " '56d98670e8f74b1881bac44ccfb9267d',\n", + " '2d06bf2cf37a48f98708e345e86a6114',\n", + " '1d6be38e70b74ce69cf68c32fdef0b9f',\n", + " '936184dab9ab4ff9905cbaccc0844e61',\n", + " 'ec080c630727417fa858340935e0557d',\n", + " 'f6792c2c58774d4e9ba97575ae5a9ddb',\n", + " 'c1df1df5060e41899363e2a0649100e4',\n", + " 'b2eb147898c64b359c951297318e6831',\n", + " '216d56dfdaae4c098b826c2c6dbe8132',\n", + " '7921cab4d11646168b5d186794f5db24',\n", + " 'b89e6d297f064e708c4fb903c6ebf15a',\n", + " '4ee471c58dbe4185b6968113228bb20e',\n", + " 'd64a74ed5b57427c9c6ce98a9f945b70',\n", + " '58cd9cbd849c456e85fb72a4abc5c69c',\n", + " '2e1835610aa749c896c8c165e3d84470',\n", + " '8008134cc1c44751bb95a3270cb89a44',\n", + " 'e4f098d6b9024392adc396aad0efb94f',\n", + " 'e8fcb91812d048efa5ba38a46cf40531',\n", + " '987e19cc2d674e2aa0d555af45ef874f',\n", + " '2816810a2eea4f0081baae4b28614796',\n", + " 'ffd0647c27664a779dabf843fcf83981',\n", + " '7a5e718e26b14f4daf674c901b3dde93',\n", + " 'a7248347d0b743d7b5db65f3b1b87cac',\n", + " '640f3399c6c340f19a11919a6402ba85',\n", + " 'fe73657d7e884bd48d93afdc193b73bb',\n", + " 'a9c649a44de94b5f82af06c804e3bb08',\n", + " '8265df5e7847443a8c91478ae1cabf72',\n", + " 'a3bb9ab12f814c4faa382b586fe13680',\n", + " '775f072a48674d6b9fdd0671c4064891',\n", + " '21048806b4c74f5db5b7f873c45adedb',\n", + " '1525157eac174ab684089f50e6c29969',\n", + " '136132cedfec4e3bacf2a8adc1fbd50f',\n", + " '08099904461749ddba96b17b61226622',\n", + " '1da17dd87dd448c4902f8e3a1ad1c51e',\n", + " 'fa9c4880b4a34b9aaca3f5363ff1b7f0',\n", + " '30eeafb17ae74a42b370e173e22abfea',\n", + " '0d3ad50f8c524e90b6c440865aaf63d8',\n", + " '51d52addb9df4c04afaf8f008fe89259',\n", + " '2aa50c713fe241f1b9b44107c0d47945',\n", + " '8615b8c9442c4031aee25316cdfc7cb5',\n", + " '8f8fd8a1edc043ee82c77381bf39a83c',\n", + " 'f15223b9a98445f2b7613e518e7bca83',\n", + " '6a437e8d6655430aaef679f6c6a84831',\n", + " 'c0a3331686754cd9929c4abd5d81dc7f',\n", + " 'ff36401f33d9424cb7112033ead5f58c',\n", + " '841220093a1242f0b04a4ac8d852e280',\n", + " '655ec122f3d24c069eecacc8e8bc8f82',\n", + " '6da4281d97ad46ed9ef6dd169c640afe',\n", + " 'eeef919024d54063ae3cd6c6f8f7a73a',\n", + " '9f55e4818e0c4bef82dec178dc64eaa4',\n", + " '8a652387c6eb424288a0022be058d00a',\n", + " '9a19d1114e674c618d23a1299f14f1ba',\n", + " '9e4b5fdbbda24ed5a2fbbc3923847a44',\n", + " '8b442e6de7bf401b8300c567a642a759',\n", + " '20c8152b25514d018eeb8542b4450ad2',\n", + " '5ae8d5d8230f46cab713cecbd97c847e',\n", + " 'e159ce134b7b46308fc919b22a9e808f',\n", + " '5a0d312175db4d15b85c0255b68bc027',\n", + " '6f82139d091145cba88a7f0fc367063d',\n", + " 'ee60e7e10d924f01b530b0291d939aae',\n", + " '3b73fe9ad953458bbd3d11f44b85fce5',\n", + " 'd15e56f06ba24e3cb6a1c4dd0568201d',\n", + " '16f3d27489ff423dbf7d027844d957db',\n", + " '6091bda6320149a1af5ccfb541e75148',\n", + " 'ec527d7203164f07ae7349cfa33829b3',\n", + " '26b4090286e346b4b686b13360cceea3',\n", + " '856dd023a8cd41108eea38b403eadd09',\n", + " '5434f50e81db44e5b80d3bc8816eb5bf',\n", + " 'e9022b1d39f24ec09981e8c66478705d',\n", + " 'd3f895478ed74239b4bc88e04e215f1a',\n", + " 'c1fb1bae1b5e42f8a65f8260c259e133',\n", + " '2a86b477a07e48afb2658742c30494e8',\n", + " 'da64e968fc3047089de2ffa4b62a8c0a',\n", + " '0a42ebaaedae4f73914398ad1486afb4',\n", + " 'dc66a40fba5f4e348216910fce0d2428',\n", + " 'e125ed2241b24a31ba40ed768a21d4f6',\n", + " '850eda324b734ffdad0fe63c92c91038',\n", + " 'd9e9ca7b0a634afdac1f4da62f2dcddf',\n", + " '7cf441e12164420fa8b58e0aa6d244de',\n", + " '0a6c2f48e5094e3399f7e1d0f38d873f',\n", + " 'c8c29dbfc7f840d7a7195aa74388b30c',\n", + " '07a90e50dcbb4352baa6636e9b687aab',\n", + " '85d60bfe6d684c1a8578c1d6710c867b',\n", + " 'ceb46b27e8994626a6d6d1c1acabff5d',\n", + " '506dd325656145ebb7d976de3b4953c0',\n", + " 'a83e954196874363b13c7cb3d7d8e025',\n", + " '16fc61ea959d4427b3fa723d7e58f2bf',\n", + " '3d4599e2ad2f47deae8c1c25d30dec68',\n", + " '260482de224a4ec998459a5d2f9384bd',\n", + " 'd480305e9fc34a55b8f146343fe1dd8e',\n", + " '9851b805fcf54766bd482d5a0d4a8d0f',\n", + " '5147fb1a9a904ff09b7c6885567fa94e',\n", + " '509f1c4ef2b348af821461d751850e93',\n", + " 'a045c48dd9444211a2f0087229df189d',\n", + " '3b90d03add21451aa40990b1f2dad9e1',\n", + " 'fa68102a0555422db1cc0f3822496a48',\n", + " 'ade010ff9a2644a38c7c3de875a3ac78',\n", + " '390f54300e1f41ac9224da683f00d31a',\n", + " 'aacaee53fce14e9395259a0609cc1646',\n", + " '00ca0c3998b64339874ad036983a0922',\n", + " '2b3b851a8bb6422abab843dc2148255f',\n", + " '363ed4276aa144b2baeec9dbc1fba38c',\n", + " 'e9bf60fe2f184793b37f268ffa486abc',\n", + " '98b185639a5d441ca60d7a5fe7620f8c',\n", + " '9ae478f07ca3465a9a447b3c7eab4b26',\n", + " '01fc2f0676754dc7baae898343e2bebf',\n", + " 'cf3dd2f39b1a48919b90571555e4befb',\n", + " 'b0ef26aca0404662b5706ccfa737a52b',\n", + " '9afcf7171eb74e628f99fa44a753c131',\n", + " '26f3917cc2274e998e115212273fe2ba',\n", + " 'ec369428b5fe43138d049f293dcd21a4',\n", + " '947d581cb3cd4555933504b8c64c54d3',\n", + " '16b495459a4e4bfd96a12655defa9551',\n", + " 'af15b6c943834e96a24363fbdca209f3',\n", + " 'a01a9d5cb1c041889bfd1cf29cd4c08a',\n", + " 'd3d0ee8f3c394ed680324fdcdb442241',\n", + " 'd56148720f974dffab53a4e8917c3833',\n", + " '32593dde550f45af9a36349bcd63192c',\n", + " '1d43c4caf83b470897e96410f4dae5ee',\n", + " 'ee1cd8b353cf45e3a88ce76faeebc9a1',\n", + " '8e7135cd24764e94b8d04e15ec86b9c0',\n", + " 'a7a918cf594e4a2992398acc924e6015',\n", + " '13837b8571154abe83bb0b8d8e08d406',\n", + " '63a4ce49d82d4da6bcb4da66db26bf35',\n", + " '2214b1db8432499286a9ad49d8a2391c',\n", + " 'af23e4413b7c42cc982b011d6432ec5a',\n", + " 'e4b5e669227c4112aec7a7c53f568b75',\n", + " '2d5f634bb7414afcac7b78ce7c0a864b',\n", + " 'beda5449b3124e379f35601a33ab4651',\n", + " '271af180d99846e4a0d8c57f444df81d',\n", + " '4af16168d5a1432e8ca9719c9000f58c',\n", + " '405113fc9e334cada56589b758cd9fd7',\n", + " 'c95f295e46ba4b82b9f92fc0dcc8c1df',\n", + " '475897fbe33347cf907f3cc381f40c0e',\n", + " 'b6779e2220c444d38741c06cc2bb380c',\n", + " '7d56c936c7d84514a67cd75e369449f7',\n", + " '272a9892cdb742dcbe5f90e29eefae72',\n", + " 'c28ace207c9d437da68cf599ac028bbb',\n", + " '6d3c684dd6894bd9bf24486175ed834d',\n", + " 'd8766ca5bb7d468399e6b864756a04e6',\n", + " 'f5501aae471447fba9a4ac7ccf88c1fe',\n", + " 'bb4d8f7876a141e0ba82eaebe7899c5a',\n", + " '1c87be78b3fd48a093c23a54904bf8bc',\n", + " '7cfe5d24a86645e1928a4700e2175e82',\n", + " '79a9e904f5bb48a3830647c6afccbb85',\n", + " 'deda349c16f54a9f85cd302269c22456',\n", + " '5339992d8dcd464294260f5c0c857fff',\n", + " 'd86ca1d2c07f4784956acb34d4d8c48c',\n", + " 'dadf0561bb1c4ad9a87cc33a21424d32',\n", + " '63c86b1adaed4514a75e0409a66b15c3',\n", + " '61425b5443b840f2b7d28347d4002192',\n", + " 'e7166ad200694bb7ae645e63495dbfa5',\n", + " '9a4e61507dbd4fcd96b9c4b8eb24e74d',\n", + " '5cb628ca8b8245e0ae326ccb8ae5635d',\n", + " 'cbf3322896f8445ead83a6907a9aae08',\n", + " '9156196800e64996891c0703499ffbb5',\n", + " '47c88e62be7e4cb88b7d4935ba38cff0',\n", + " '0fee655d64c34f84a07e6b889866a486',\n", + " 'e4472727736f4fa59d49536d8e331f95',\n", + " 'f933c36480f64d8b9600c5075a085e61',\n", + " '808c6b3ce87345b391843aaa6b253bfb',\n", + " 'f5d6ee781bd742fc88d5ddc2e5f0a7f4',\n", + " '281ac17550864cf5827193ddd577aad1',\n", + " '3d3aa0ebb1574fa7b498a13abb1b7c40',\n", + " 'f43bf31cfe994208b24e363f9459a7a0',\n", + " '9931894cfd004a20991a7fef40c23c86',\n", + " '1b34b9f61f164993b7387a73e961bf2b',\n", + " 'e2ca8df6b02d4240b7f1e4474b4765c7',\n", + " '42741aaeaec7422f8ab9c59d18430455',\n", + " '6a0a67f326704e11baae384eb567fa09',\n", + " 'f7894024f0764978a9eca821c29d3449',\n", + " '7f88bf5493764642a14a5bd8bbf04a71',\n", + " '93a5412c61204d53b94fda693fb561b8',\n", + " '3d265760e45a45d990240628c46fde6f',\n", + " '62112a36dcab48379590ca210ca09918',\n", + " '1abadc8684e64e6c8cbe1f7427d39678',\n", + " 'f1f8c1a6c7534c5bb386425728cfa2b3',\n", + " '81b640e8ae0747daadeda29da9f677f6',\n", + " '6428b2a89f384a6985d69b0183fc71c3',\n", + " '107fb02d9c7e4bffa9669509015e8af1',\n", + " '73b8fd9b8aea45a6adfc02d5795bec62',\n", + " '931cdf852b634abfb01b656221a8d0ae',\n", + " '81e9f8177fe9430a9fb17fd20522c955',\n", + " '7dbc18b381454afeb2a6041f60c2b23b',\n", + " 'f621fcf8f34f4629909ca455ecaa4f55',\n", + " 'c358be2860cf436d8fabb3200888c307',\n", + " '81516460f65740e9aef0f4babc29b2f1',\n", + " 'ecd2409c27cf47aeadeff569bd25ea85',\n", + " 'a4c997a1ad7f4990b2d71cb028463610',\n", + " '1ed8f8421791456db0543cd3e1ede40f',\n", + " '56f5077192e74e09a58017d0c3368bc4',\n", + " 'a639e85d3bee4530a53d132bfa7c58de',\n", + " '4c75146c59dd4541a8500f89dd060a2c',\n", + " '95c74438067f4bada1fee37942e06ed7',\n", + " '0b3f010515574c48b02bebf7a451052e',\n", + " '4db576ae022d42beadd921a81e977096',\n", + " '3998f2de8bf44929afa7ad0e2e86eccd',\n", + " '73cf1599b76d4061874e660228ca5f06',\n", + " '962fb1291b984d60adb133201b7eae48',\n", + " '365a19df65514c698d826e86fcdc6091',\n", + " '5e86e99df25a4cc287d8ea0605f8cb08',\n", + " 'f2cccab55efc43d5b098c38c31f687fb',\n", + " '3388b8f7db314bf5a60cd10dbbc45f9c',\n", + " '2a83e0ed7b4e4d2f906cfbc8dca7c512',\n", + " '5b4f99c2acab40248de70a0e92506bc0',\n", + " '278560e5a9e244e1a0a2ffa0ef7c261a',\n", + " '864b65e24dea4473ad0e4a5bc32f4c69',\n", + " '6b089bf4dd004ed78f1b92c50d414e47',\n", + " '5bebbff6685649b99fa304d40b9b6362',\n", + " 'aecb11bcf1444ad589508ea8bec77bdb']" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain_qdrant import QdrantVectorStore\n", + "from langchain_core.documents import Document\n", + "from qdrant_client import QdrantClient\n", + "from qdrant_client.http.models import Distance, VectorParams\n", + "\n", + "dimension = 1024\n", + "collection_name = \"ai-safety-sr-arctic-embed-l-semantic\"\n", + "qdrant_server = os.environ[\"QDRANT_API_URL\"]\n", + "qdrant_client = QdrantClient(url=qdrant_server,api_key=os.environ[\"QDRANT_API_KEY\"])\n", + "qdrant_client.create_collection(\n", + " collection_name=collection_name,\n", + " vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n", + ")\n", + "\n", + "vector_store = QdrantVectorStore(\n", + " client=qdrant_client,\n", + " collection_name=collection_name,\n", + " embedding=embedding_model,\n", + ")\n", + "\n", + "vector_store.add_documents(chunked_docs)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "retriever = vector_store.as_retriever(search_type=\"similarity_score_threshold\",\n", + " search_kwargs={'k':10,'score_threshold': 0.8})" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': 'b6779e22-20c4-44d3-8741-c06cc2bb380c', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Human-AI Conļ¬guration \\n'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '26f3917c-c227-4e99-8e11-5212273fe2ba', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Human-AI Conļ¬guration \\n'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 11, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '07a8b461-51e7-4641-b97a-d823ef91082f', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content=' \\n \\n \\nFROM \\nPRINCIPLES \\nTO PRACTICE \\nA TECHINCAL COMPANION TO\\nTHE Blueprint for an \\nAI BILL OF RIGHTS\\n12\\n'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '1d43c4ca-f83b-4708-97e9-6410f4dae5ee', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Human-AI Conļ¬guration \\nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \\n \\n'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 61, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '73cf1599-b76d-4061-874e-660228ca5f06', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='et al. (2023) Whose Opinions Do Language Models Reļ¬‚ect? arXiv.'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '00ca0c39-98b6-4339-874a-d036983a0922', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Make sure these tests cover various scenarios, such as crisis \\nsituations or ethically sensitive contexts. Human-AI Conļ¬guration; \\nInformation Integrity; Harmful Bias \\nand Homogenization; Dangerous, \\nViolent, or Hateful Content \\nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \\n \\n'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 59, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '81516460-f657-40e9-aef0-f4babc29b2f1', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='https://www.rand.org/pubs/research_reports/RRA2977-2.html. Nicoletti, L. et al. (2023) Humans Are Biased. Generative Ai Is Even Worse. Bloomberg. https://www.bloomberg.com/graphics/2023-generative-ai-bias/. National Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \\nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework. https://www.nist.gov/itl/ai-risk-management-framework \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \\nRisks and Trustworthiness. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \\nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \\nDescriptions of AI Actor Tasks. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product \\n'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 57, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '1b34b9f6-1f16-4993-b738-7a73e961bf2b', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='(2020) Overcoming Failures of Imagination in AI Infused System Development and \\nDeployment. arXiv.'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 0, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '8dd5b1e7-fd46-4e2a-90c2-8a8eea8b0cb9', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content=' \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nBLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKING AUTOMATED \\nSYSTEMS WORK FOR \\nTHE AMERICAN PEOPLE \\nOCTOBER 2022 \\n'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '8c64aecd-850b-48b3-bac2-16e73ebad1e0', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Some companies have instituted bias testing as part of their product \\nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \\nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \\nfor the use of automated systems in order to help prevent bias.')]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "retriever.invoke(\"What steps can organizations take to minimize bias in AI models?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['dd370438231c41dbb7b1b4f1e7673cf7',\n", + " '02ebba25e01941849b9e2c9d5097b55d',\n", + " '099f0083356a4914b53fcb30df633b50',\n", + " 'f8aefa25a4544c869ca4caaf686b3d47',\n", + " '9ec0798fb4554f95ab65bd05315af118',\n", + " '33bdad4db0ab4145b85726f77f1789ad',\n", + " '98a75a601b114b07953b5aef4e032b4a',\n", + " '1e49952c0d6743ba8ad52a049c18daa3',\n", + " 'c3babb9205e54ca99ba6e5a03679bdba',\n", + " '74cecdae132c4a5e953bd7e72ac6850e',\n", + " '29529ea9530541a0bb446a8e82fab913',\n", + " '4193dcf34f6249b1a29c49a52239deef',\n", + " '84cb5d0f2cee47beabd72baa54161155',\n", + " '622f279ac5bd40b082725d90972e9ae3',\n", + " '48e366f92aa449e89cf7158584d2cf6a',\n", + " 'e2ffb7cb2ac3482fb9290940fabe9582',\n", + " 'f52a4c3353544fff93f241cba063028a',\n", + " '0c81aa08ddd4496a9aaea4b001f3596c',\n", + " '3e9d8d7785b04d5fad063219c94ef0dd',\n", + " '76796785c7b64d428e48b7cf699e155a',\n", + " '593ab20fc2494634959b0bfd8821ea91',\n", + " '654421ae91df4739bfb1ebdfb7c9dda2',\n", + " '27ffe059aafd4d5fa795b2f893b1d57e',\n", + " 'f1468d8276444858acb33bd6e2d36e73',\n", + " '5a6a15255cdd438abd9b2c3358dca939',\n", + " 'fbb13ef430ca47d28013dda9feaf4625',\n", + " 'fc16826ddd504038bb5f32fd97cdd98e',\n", + " '72c878c56d8746dea51fdcf506e48894',\n", + " '257ac1e04a4b478ab3b84c81e5dfc3f4',\n", + " '68b157c05ced46828ac39894e69b8d08',\n", + " '535e59df03184e86b30a09cd2d169dcd',\n", + " '1a6d76252d364a758564a41b922d44a4',\n", + " '61e497e66868447988198ba831096707',\n", + " 'dd9f18bedfed443c8bed0fc4c34c5e23',\n", + " '0ca0575097c24b50a613d5a19de61cfc',\n", + " '7dfba6cbbfe34756ba3f40b1be282324',\n", + " 'e9b68e9579194b04ad65bbf85332d351',\n", + " '7545cee6d2e345ba90e95082a15271b8',\n", + " 'df1e9db6843a4ddbb788b1e9117db9a1',\n", + " 'bb0687d2f3d047138d0414d0b2a22917',\n", + " 'b79ed7024a064c1f9360692c93615657',\n", + " '70fb8aa096a74d0a975705ac44f08577',\n", + " '41bf93d83ebe414e91253e7a96f50ec7',\n", + " 'bdab13de5b514bf68921751b3051ce60',\n", + " 'ccd47e89a09c4519981dc5d9be7b1ad9',\n", + " '334ac2db387848f1829e174c6584288b',\n", + " '5484df8c41cb41babd01c3f8d62121a2',\n", + " 'dd1f97aef70e439ea02c8f0d0ea397e0',\n", + " '99ccff600f8b4470af445f1f060e5518',\n", + " 'd4de01e6623741d3b06c8ee973ad6670',\n", + " '6217b664cbba4a64bf6e4f2ffde27831',\n", + " '3974c50b7e3a4503925f1c397254d259',\n", + " '4959c05e7d8049a4b75cff3bdc6fc30d',\n", + " '9d3aeacd6513463fbe9d13c1fb2441fc',\n", + " '8777904b546e4ef5b2759f0a60fd1fca',\n", + " '7a73c81712804111b6145b57888455ae',\n", + " '87036e89882546b69613378b17610332',\n", + " '5c508cd4449449c486b811d65b9b6db1',\n", + " 'fd5a25bb9038481aabaed2b34a7f2cc9',\n", + " '80b47526b0224fc0ba54cf4a61da11cc',\n", + " 'a2c5d0697278407fb0d89c9c138bfba0',\n", + " '49cb7eb52ea043f2baf21b611709d83a',\n", + " '96d002f0aa0a4cd7a86b188ef7811e9e',\n", + " 'd5a4fd354f904b99a8700363f7bcec7d',\n", + " 'ec384dfa0a5d4caeab593a4d013e40de',\n", + " 'b613bc8f681141249d11f2eea7691f32',\n", + " '7b9e491ad88b48f19ce2698c4d8ef5ec',\n", + " '011946a9fed74b14b7d4be2ff4eaadf5',\n", + " '2c07a4769e85425a9a32a053f1293ff2',\n", + " 'c859a58fd5a54447a22217564e610e77',\n", + " '2d6fcf19e009459e82b344a699c6556f',\n", + " '22ec0544ffc44be1bd557cd91e96caf4',\n", + " '1a9fed777bc8454faed8c60a12dce190',\n", + " '898d974afb3f472a96c9cca3c698fee1',\n", + " '21cb161a28f34fc89b90b369d1895fd6',\n", + " '9409c811bf6542feae01351580bcb32b',\n", + " '1219e011429b4e29a84268bdbb66d7a3',\n", + " '3293d7aeecf54778a4b1e63f09f3f362',\n", + " '543f56abfcda469a826e797aa2a4ae36',\n", + " 'b714fd2157d145658860f0db0bd95163',\n", + " '8be2abe5fee54507a03f0a5d0ba2f0c7',\n", + " '76bf633b2d6d49b98845023de4024f09',\n", + " 'eb00c39f80904f79847f0156e1e88ff0',\n", + " 'c9695522dbb243cbaaf48b9a5b9f4105',\n", + " 'aa3a41bf64fb4136b1fc097ad40378eb',\n", + " '4ba6b441be194008aac5fec9aa0eac53',\n", + " '7b5e6c78bcc64d4c879cc0817436ab35',\n", + " '90229474433449648237b410db3cdfb7',\n", + " '1ace5952f4c043e0a0864b9926475add',\n", + " '2208ad4c34bc4fdb940e1cec9df0f6bc',\n", + " '15e5cc7345b64cc08ad9825085af5486',\n", + " 'ccac823288be4ed2a3d5617dda575120',\n", + " 'e01e349952e54ef4849519acdaa6725b',\n", + " '794cb0ec54724738a48d16e18f6cb3b9',\n", + " 'd9ebc60044124f0a890f7836cb58f4a2',\n", + " '9fda4771fa994fcaa609088cfc961dd4',\n", + " '67fd7a926392436a9b344903bbbc08f7',\n", + " '7892533c0b57466e8249f11d5cba07d3',\n", + " 'ba1b7ad3addd4557aa6983cc309fdd49',\n", + " '3663e44b29e14192abe4f49c98e3db45',\n", + " '476728c883404adea36c903128c98139',\n", + " 'dbec4491b8b94aa0b9327ead46b4251f',\n", + " 'b635ebf0a3644e71ab64bfd43faec517',\n", + " '149ee2b23af9448c896c5bf87f1c9257',\n", + " 'f6082f115738427db840c3ff58a7c48d',\n", + " 'd56f7d290c184a5aabdd80251ee807b5',\n", + " '37738bf3df9048c381a26116632aff03',\n", + " 'a139a0ccd8834a3293c65b8a9fb0a2ad',\n", + " '6cce500b28a944fbb6ad637ae5d3c227',\n", + " 'e16444a94afb409b827c3ee3f57237b9',\n", + " 'd2d0cd1eac154f17b31a6a97d87bca88',\n", + " '75bc3a67451e44a487939e6ca74e39ad',\n", + " '9074393c760844908a39e806f7d4714e',\n", + " 'fe16063331f848d897ac4466e9237fd7',\n", + " 'a9504de4c7cd427cae397ddd551e3bba',\n", + " '791979c2b3d747bf857370c4ab7e7757',\n", + " 'cc31a5edfbfd486482b202b8b87c8e9f',\n", + " '6cf68d7de2334461809f1867dfef1280',\n", + " 'e8acd67d2b614ebd8e0786dcd961c05c',\n", + " 'e30854b096ab48b3b7f3729bd07914e1',\n", + " 'ae46462add43450d899655e2e0819e59',\n", + " '1718ec5c859c4b37a8c8ac0bbcffa616',\n", + " 'ae4a9b112f364a34a47f0cf255b882a4',\n", + " '5ab58a4ae877437898e06968149195b2',\n", + " '76401834e26049b096cdbb054cb37c7a',\n", + " '52b9f9683ba14fb2a6f84f5b6d619b40',\n", + " '96d56773bf2e4d858f62654f20ccd53d',\n", + " 'd8a258c7815040808dbab44252e15e77',\n", + " '9f4ca5df27674f4cba5628dd98f1ef2b',\n", + " '8e0679ac4cbc44839a7f77a12ce52220',\n", + " '964aa8b0653c4bdaa236d5bb6f1eff1d',\n", + " '219697ddd2d545c484443ca116943b63',\n", + " '607bf4aa92c64da9a4873477e2e9b363',\n", + " 'efd6114026d2454d983e7b4063656266',\n", + " '81fdf32f6fb949508196043e4142261b',\n", + " '14f82f6a28a9488a93ce3724bdc2a476',\n", + " '3d44caaa301046af9acdd238d1e3cbda',\n", + " 'd8d11ff667ed4c0f94958e794e9b2c60',\n", + " '90ba7f9ad22d46d99316b36cf213ec01',\n", + " '76e251b11fb04240ae381af227c136bd',\n", + " '3dec921799b24369a5b9189dd28c0f55',\n", + " 'e7d706e31326405aa1a8acaa627ee2f6',\n", + " '9b8c00a9f69649a1a66decee2aa77c9c',\n", + " 'e2902a1405cf41db9a0428259089cbef',\n", + " '5d43e9e802fb49098531b257c5633723',\n", + " '413aeaa9a2bc4970b21371940026dba8',\n", + " 'bfb4471ff2af4bada2ac8bcd14429d24',\n", + " '533ff7a97fca4578999e6e0434df17d5',\n", + " '04ba353da230424bbf25ab29a18e20f4',\n", + " '1ed6dc9440f14ac9b1deadc10fbb660b',\n", + " '734aa1cc14a34825be7dbc947ecdb525',\n", + " '482153d0c05f4453a3bc9f57a1804406',\n", + " '76d988efad614d3dafc0ecb8fbdb2189',\n", + " '6d9ada3651704a0ca2b40664a59c8579',\n", + " 'ac9272192bfc416d8386487c9b381ccf',\n", + " '0d8c4c52de304f7f9778455f8ad178ec',\n", + " '9daf6aed58624240b7fb1fcf79d9dda2',\n", + " '7ee8e6bfacca440f9ef345e59eba7401',\n", + " 'e80b50ed3a3a42d2a24bfe85cd5d45db',\n", + " 'c0a0384aad9345f3b6162bf63c5bc0d3',\n", + " 'df7c71db74af415b96cb2cea32d0ba30',\n", + " '329b6e0ed9cb43cca48970fcc286e299',\n", + " 'fd4c170eb78d401fa3eec3253be26b98',\n", + " 'a98ea19a24cb4b04bd943d226dc41bba',\n", + " '8e917ca54af2400cbd83e08e28ef0bb0',\n", + " '78a3cf3647cc44f9abb083eaa8b79947',\n", + " '140a48e4acbd44da9fede75e12fa80f3',\n", + " '92d6e0856f5848869f7feb8cd17d7088',\n", + " 'bf482ac0daea44a789182b52fbd2f413',\n", + " 'f562e2d3c5b9422b845b6f87806e4d6c',\n", + " 'e8b0bdec066a4bb0b7a47ec7e10c10e1',\n", + " '45a054f0a9824867a52db472b2b65ad4',\n", + " '40ff751a666347ed9c2326341587ea51',\n", + " '1e35fbf001de48599063d4fe6dae165b',\n", + " 'fa3b6f2ebe274d54851e9a31975470a6',\n", + " '42242351bcb844c589f44a80cc139fdf',\n", + " 'e5eb97e74797481d998677225cbaf365',\n", + " '85f4bc0ed08c49feb6fd69f5659eaa36',\n", + " '87df9b4b3b8a44eaa8756bbf8c967d8e',\n", + " '7c04f74911aa4d629f0a545155e60b8d',\n", + " '133597b368564215a8b71b2535a07032',\n", + " '97473d57f4bd40af96f63bc06a1c6117',\n", + " '3b3f2e2c08774f42bf0c904230b06c4c',\n", + " '9661e9d3901e429d9030b04d28d98a19',\n", + " '488e7a22bcdd400fb1ea9e52a102cd8f',\n", + " '95086a933034484b9eeca08343c0dc21',\n", + " 'e5aa0c58bc1448169bdebc7e99fcbd42',\n", + " 'e6ba2f1ca8284d11a309757bc42ead7f',\n", + " '6510e8c73d10408fa038b242f95cae2d',\n", + " '36c2ba7197ce4c238472599a256f60db',\n", + " '12ce3404c5024cc5a3ca4d1c0773d759',\n", + " 'd0b909588d804b1aa89b496efcb6d16f',\n", + " 'e830a0b4f0fa483b9bb9816162bde54f',\n", + " 'eea7403faa024fc8a477b4b2e12bfc99',\n", + " '1fbbcd091c3948229841d1a1e53cedef',\n", + " '0bc14c99eb8141fc9f5ad1a13e8c5f90',\n", + " '544d8570f3e847aea814771f3af2397e',\n", + " 'd4e66a38f60c47deb7307e0f65829409',\n", + " 'b520c61605aa473d89c88e3d277f40c5',\n", + " '3f01a965995e495a8b3067fd0fdcc978',\n", + " '9a5da84235c14817a3f0bda30a2bbcbb',\n", + " '94792a1b19654f45a2fd8cc362dacddf',\n", + " 'b46df9b21e764b39a0180bf42f9a835b',\n", + " '47a4942336cd45cea7afcc68d99f1cd5',\n", + " '2081541601de49199f881ffcb1625d4d',\n", + " '972dcffe6c2e4cf2815cd571e9f4021d',\n", + " 'e4613bb436fe46aaa3c236a209038124',\n", + " '355cc289660d494db52e039127ecde34',\n", + " 'b2b84fffde454e2d967ec6330e637b37',\n", + " 'ab32b49526ee485d998dca366dace258',\n", + " '48e0545b903f49bca771a22861166708',\n", + " '47f62df12d494ba48b1bfee4bc1820f0',\n", + " 'c131bfc970324e068ff4e04df6191c8d',\n", + " '02a961ad52a34d1ead8d5c1a9ee12031',\n", + " 'dcc8094b71444056a9c85f1b69b7e6df',\n", + " 'd95234dd8571413ab9dc2cf2bd4031ef',\n", + " '2b80007625d04c3a8b3c10fd35181861',\n", + " 'b2b150d718c64b38925de1ee0abf14ed',\n", + " '49f5645c803b4da78f09c7f0d337867a',\n", + " 'af890841867746499efe8600704630b4',\n", + " '7218a0d2f3e34a729da8a10e41a591fa',\n", + " 'bafcc1d1244f40da99adcfb72f87b170',\n", + " '14f2b87359524bfbac74a4948fdd135d',\n", + " 'b209da5ac4ad4110834f018a3301f5cb',\n", + " 'bf06ddade01d466592ea9cadbada320d',\n", + " '865d5986afde44f4ab593708125e90ad',\n", + " '3be18dd8e0bb4940bc58b257bad9c5b9',\n", + " 'c02da0af95774b39af650dc268c8eed0',\n", + " '7fae32ee9e934e2b8f164212ad9190b0',\n", + " '8d0c1b678ea742cca445577d36a58e26',\n", + " '4c1667fd01804d08bca4485a427b7cf3',\n", + " '5d3bf9345565447095a9ffd9319997d9',\n", + " 'ca5889dd43c5498ca449a733c36631d9',\n", + " '5ba45f56b2f0412c835d4328b88037d3',\n", + " 'e1d3f4649f234a8395a63ae1de670449',\n", + " '17e91fcb2ae14f56a0f60c6acaf4258d',\n", + " 'ed1d8496e014462db3aae0a046d4aeed',\n", + " '8dd4483ba29448b7a285cefcaeb135fa',\n", + " '77db975e07284a7a814ef386664c97aa',\n", + " '93c28d0a7eb646969d0511b786fa7a71',\n", + " 'c596f3b0927c49c1a4193eb5f0479395',\n", + " 'e1648b2975284446bbbaefba431cdd78',\n", + " '76bcc756b84a4b169a128973ef7228fe',\n", + " 'ba57f715ffaf477fa15a733ddf5339aa',\n", + " '644902133d4645bfbd02d9629fb737da',\n", + " '124a4a8ec036421486e8501be3af4692',\n", + " '0b95233f27f54043aee48dd77096c62c',\n", + " '1ef09ede43d546b4a6a73b48c4cb48f1',\n", + " '9dbee7e4bb32427f8fd0b0229ca0d2a6',\n", + " 'f915f126e0f24f2299e6bfd16a5d3c1d',\n", + " '3686b153c85248f6a2fc1fff12eaafe3',\n", + " '850e99ca1e58439c8ccf36e2b6a7ecde',\n", + " '25c84a37812b47c8adfd41b30af8c0bb',\n", + " 'f96eb4a5818e4ebf8ae654d35cdc08a4',\n", + " '31aaf38fa0bf49f4964d317f000840fd',\n", + " '8c914f8f496741dab0d661f8bf84e061',\n", + " '215a8d37eb5249ae97b9471c8ec0f888',\n", + " '9f5e41e99d314cba824998d58ca1a611',\n", + " '6b01b294c1774e34919059a7388aabd9',\n", + " 'c1c6025360a9458085d5342cf8e703e0',\n", + " 'b3fdbd3082794bd4b0ab4d4f2c8149f8',\n", + " '700188e4d52b44fa9fde7512f54d7b1b',\n", + " '00dad32bd08f4b39b153cc96b8497f4b',\n", + " '9bb6f94103404153b68855d9993e9493',\n", + " '56e07e044da34280830555b42799444d',\n", + " '1d57812e47de41bf99efbbfa34865acd',\n", + " '39c2def37c7d4b15a4e766632ea9eb98',\n", + " '93e308150c2b44688cd13847402815b0',\n", + " '1f595875c8ee4ac9a261bfb0a429067c',\n", + " 'e7043593429e48a6bdcd8095f3ee2993',\n", + " '8827c0bc83eb4dbeb48befa28e6ded29',\n", + " 'acb099517d0449239adf6c9dde626772',\n", + " 'f94e571e12af4903bfe866f6e028124e',\n", + " 'd027a4d37b3640ad894587dab59a7494',\n", + " 'f35c824977cc4d46962b01ad10f5ceb4',\n", + " '836279f1552c4417a642da79743aeb33',\n", + " '002978f8f7ab4cada169ea0d054499a5',\n", + " 'e7a1c0978a2a4cd0b6b317ceda9874fc',\n", + " '0f977a8cf0514392882756b1f7c6fa26',\n", + " '7682cce87cfd470d95274b61e4eef8b8',\n", + " '3947f0e87d00475387486b47326ed258',\n", + " '6ddc01005056438cb611cd958b7a2d1d',\n", + " 'bc81516df0f440d8a1faa7363f011b75',\n", + " 'd676819f49004a56b7dc89cc5d5343ec',\n", + " 'b8fd60c1b629499dac6ea2cdeb837502',\n", + " '4e33d391a5634f0d82dd84dda6957811',\n", + " 'b007e62d2fe749a6ab713c005211a73f',\n", + " '18de6dda198e4f36befe7c81f88a7f42',\n", + " '029857b9fae04c498b62b46c40267afa',\n", + " '3be29a9773f24c079f24d9db9c662801',\n", + " 'a095542002ce46ca95e59094332f0228',\n", + " '40d61bdffba64369af605108a12a2999',\n", + " '7edeaefb56544debb539a1bafb766796',\n", + " '2f5335dc55594a04b67b07d86d937139',\n", + " 'f89a5a9d2d5047d7a74dd991ae0e8102',\n", + " 'fd4b6403ba4249b19c692a9dbdcdba01',\n", + " '3e02d74c82764b6997c4f965cfd6c233',\n", + " 'dfbe54ff42b4457db6cf921dc4ca0753',\n", + " '7abb841a671b40649ad478bc45c75b47',\n", + " 'a687e1b6e9dc40099a7d7d4ecd021a46',\n", + " 'ffdea99f5cec44e4abc9f4b8c6949fc1',\n", + " 'cb103ab7aa1a46f09d858dcf6880c862',\n", + " '4709ecfdce6c4392a245fad38093d1c4',\n", + " 'dbceabef7601444ba3a76c6bed960802',\n", + " 'a7f1acab40b145cbb2d8d84fc72733af',\n", + " '4df54db71ffa4dc4b1f10b022e3e6ef8',\n", + " 'a7fc14d6dbb14af6ae3ef0a3f68f1d07',\n", + " 'd5048b397eb04c6fb97a083b66aa6ac2',\n", + " 'b6420e8387b94f85aa36b5bfe589463f',\n", + " '767857ebb1fe41269ed4d82d967956d9',\n", + " '400246f61fa94320a267b7ab3f2e8cc7',\n", + " '8b763f3947974bd192d2d884c05c6428',\n", + " 'ea8f1f4852d64e09be5ac90b04404dad',\n", + " 'f6866c404c2c40b3a4314563846a911f',\n", + " '837631a58af84142b5042772b24da3bf',\n", + " '9d1586da31e44e6f9fb64ce9ff157673',\n", + " '098b85fd43c24f1aa3fd7c93a48fb98b',\n", + " 'f324b2ea7aef49979b23bb34f78846e8',\n", + " '6c35050aaecf43e7b7fcf40e9edfaa2f',\n", + " '910792f650bb463493a1b85488133ab5',\n", + " '2d428a2e50194db792e5a02146be9364',\n", + " '90ba4b5f05124c26aaec626e14ff2138',\n", + " '5c4b11be82e54ebab89237c6a4928284',\n", + " '11c9ce7a16094f788485a13218479435',\n", + " '27ef6c0ee62d4f4d9767d255f5b0bda6',\n", + " '603495b3760e453bbc7264b287754bd6',\n", + " '2afe6de14bc34f6381f30124a069d391',\n", + " '8666745692804c2bbd16b997d75f9426',\n", + " 'f9789e9010d44d2fa2eeadd121f9186b',\n", + " '3fdd94668daa4bd6a96e125c2725d9b4',\n", + " '02cf5c44de76414bb431468c721ea6ad',\n", + " 'ea3dc743311b46c6b7c6117c57de0333',\n", + " 'cac324fb19374fc892f4db768850823d',\n", + " 'ab4126594df44beb906274bbb1c0f40b',\n", + " '4cb686862a9a46ecaaa414860edba1cd',\n", + " '0e6481c6b52e43e5856a779b814b509c',\n", + " '73c0feaefcd44cffaa712880076005c7',\n", + " '147e1975a5b545c39eedde3c9e112d3f',\n", + " 'b52a309d67e44730a3e13f395aec79d4',\n", + " '6068165a21b64de191a203024d30275b',\n", + " '939af554456e4f9cb33268bd36d792c5',\n", + " '3dffe1eb87754b6ab3c932d8d77cfa00',\n", + " 'bf7419dc8ad84fc9b15e09b9125fe6b8',\n", + " 'f76718fe634243029f02130498d5afcd',\n", + " 'ef23c33828ff408abab8607b82eeb016',\n", + " 'da5f19bce12048a2aa11b06b85072f9c',\n", + " '32f57a9a758a4e41849fd85cedef76f0',\n", + " 'fba4c4a802904152bbeb6edb051e2607',\n", + " '728acf0b65ad4108a1e7a72b146e338e',\n", + " '0ff6d22870074917a6c014c33f4b7cf9',\n", + " '29d7bb91ce6b4a74893c614f725c5178',\n", + " 'b690e48b7dff464cb73dbdf1e6149309',\n", + " 'd92a821e7fdc4276b4fc8201dadfef62',\n", + " '7dcaeef50440471f83e8febf47b1049c',\n", + " 'd06797d35176423f97a118fb2921bf35',\n", + " '8601101c7fa9428c9624bce8ea4cee15',\n", + " 'f45441e8e3264add88478323c93e2d38',\n", + " '985d73938f0744fd9ca3e05d8ed4d99e',\n", + " 'aec96ecb915d47eea4ae0dae9dc95446',\n", + " '8120edfc5cf247babcaaf6e7bf59ebe1',\n", + " '821cb61115ef45f3b24ecf7e7ffe5b27',\n", + " 'e4dc1ed27a224c9294f648336d261c53',\n", + " 'b90241dc87844d628f45401349ab9887',\n", + " 'a255e1c4b31241058595c134f5de807a',\n", + " '648c5ad4530442e5b50f28f40c386bb6',\n", + " 'ffa4b3e9e4ae4289b0c404da4abadcbc',\n", + " 'fa5af409bfe441d1bcee2d0b5e377678',\n", + " '033a5c2bd8374ca6b8cb9b4d965954ab',\n", + " '357babd6dd4947749142359d0fc0cdd3',\n", + " 'e4853735544842de9790530cb56a3eba',\n", + " '3698a84c5a8644d99072de0b3a6aa9f5',\n", + " 'd9c6bac4f1dd41dcb3318843c7f79489',\n", + " '3dd799500efb450f8004d4240e037b20',\n", + " '686426621d894b2781e6fb48d4b16c8e',\n", + " '85b807e001ff46fe985770fa6af9a534',\n", + " '097971e5d47043a2b4d569d56634bb2e',\n", + " '46bb945a79c94a7da13c0c1506e1c457',\n", + " '5ef5cbe182d94b5d899d2dbc9595b3a8',\n", + " '466985e3a3ce424ab38284db938f8d40',\n", + " '3a87f5d24ee448649a1fb37e1572f0e4',\n", + " 'e17801c99d6944b899da6568724826a9',\n", + " '24d0a98cbe0d4450a1e3994c6dab3a15',\n", + " '3fa0add1d0b642f296690f408b0372c8',\n", + " '1d657774ca004dc79b7fcb36ac85e26e',\n", + " '7e69a8dc03104c72bdfc8cb6c2fcf9ee',\n", + " '40c85cbead07447a8ba67f4c279ffd8b',\n", + " '7b595edf61784a549e64edfc1e18a497',\n", + " 'a4d50cd2c1534c02b0a34458d25920d0',\n", + " '318e51c677b949d09d9a61fb7a069082',\n", + " 'a45a8c5d6a6a47ccbafb57a1bb45c4b0',\n", + " 'a0c089e55b15476e8a89292b31b310fc',\n", + " '41fcfb134a8a44c9931edccd36627ca2',\n", + " '7e7687c8087a4174850cd19935c845bd',\n", + " 'a812d2adb03546538480ad44b33fd2bb',\n", + " '5c5779f29b93468ca603bf37687d068d',\n", + " '50783f0c8c944ff19aa86f2e5ac781ac',\n", + " '4a21f30d19f24fd5a331537371b46dce',\n", + " 'adda51e0076048ca98142173498f3af7',\n", + " '07fa53932ec041d5bcd71d77b273d8d2',\n", + " '18768f62094043548f4d280627a9d3a9',\n", + " 'd5fcc7eeeb154b179028b03beaf8f3f8',\n", + " 'a6859c0e9eb74acfbfde7fefbc76d9b6',\n", + " 'c2e90d49e7f14233acebd2ac10622efc',\n", + " 'e0997f7ee41742ed8a0179f9805bf12e',\n", + " '9b8dc4963513406d90a71935b05a7601',\n", + " '1cfc3fe1c73f43f69d776c880641baa9',\n", + " '34fc9ec282fe475d8314ea0a3a44b881',\n", + " '0943841a14ef474b8844520d91bfee9c',\n", + " 'ef27e818cad1462aa3bf7d9aaa19700f',\n", + " '6e2fdf8601904078986ebb1c71bc8168',\n", + " 'c18bf0d5ee5e45c8806cc1ea7d486bba',\n", + " 'a87b5e1ac2dd42d488929580625996de',\n", + " '8d384236c7e448439e9230a000d6aaf7',\n", + " '6dabbe7ff4814ab4bdcb84715ab20af4',\n", + " '943a13d4ef0d4bc2a81b16078b580e78',\n", + " 'baa6fd13ff6f445a8adc6482e59eb411',\n", + " '57b39b4eee4d4a0aac587d53ef68ff8d',\n", + " 'fd07301c3d554f02a0793d5f7bb63f35',\n", + " 'e99969118642471f887206e1c6a507e7',\n", + " '584dc05d60844536bdaac7bb3c1b7cd4',\n", + " 'f1ba0bf7f2b54612a3ffa4a66dd0989c',\n", + " 'e254c679f27c4143bcdda32f15e846e1',\n", + " '2d51c45ed9ca4a3ab95590cff7047d37',\n", + " '3bc9228d85364bd1b1f5bb7d13af5a8c',\n", + " 'b2de27d51d964d8c9e190f42d7ad9768',\n", + " 'e08419ed7e154a7da62745b3bd5ebd78',\n", + " '70a099122b2d43dbbe23d375432beaa1',\n", + " '01c551bc80134225ad0391bb295b365c',\n", + " 'c2388131208c4d8c868af7d7e6405cca',\n", + " 'bb0e3a6a10cc4ac29a168fcee5042c17',\n", + " '4b20b4b550da419d88ee62758c495138',\n", + " '5ea09fb9fb074218b814dde11c1aed3f',\n", + " 'ee62a59cd0784f4cbabb349725d7fe78',\n", + " 'a58457312c084595ab30bd5c59d0b3cb',\n", + " 'af5f60ae6a3a48129b15c89bebe493cf',\n", + " 'e5fddd76079f492f83ab1582f8d46893',\n", + " 'fd4e785b8d9c4d7bb8af493c54ee6870',\n", + " 'd91c68bc847c4c5993e6fa53b657b504',\n", + " '1bfd2c9fa301401a91ff49843b1c842c',\n", + " '9684447b6a9044339ff355877d86f7d3',\n", + " '6fcdfdbf98c64c8b89a3730f72f8268f',\n", + " '6ccdf85fa1ce4bd19775b7fcf5a12ee7',\n", + " 'e67bb1231dce4f35b3914e3a40bb9c12',\n", + " '7a55571c1f654084844ec308bba0ba42',\n", + " '38d650edd70742898f976233a7dfb85a',\n", + " '496b340000dc4eb09780ce18b3ba5392',\n", + " 'a79b3b9c6ea74599885fdb8d28d12cfd',\n", + " 'f4de9dfe9b9c47f7aee86802c145d2d4',\n", + " 'c5b4d1fed5874094acaadc869998174f',\n", + " 'd4ca3a34bfeb41bc8065e682213eaaab',\n", + " '40786361d73e4a58a173a099821b3020',\n", + " 'c267c513aa0049168f2ab2e2444029c9',\n", + " 'de47fda5d45340b58a4febf243c18c90',\n", + " 'b8f6f44e9ca64d28956c159f9aa284bb',\n", + " 'cb6cd0c5ffb743ce8d07d0c02ed2cbe3',\n", + " 'f42720e5bcd94c4ab0f3880cf75dbb50',\n", + " '3bd78527eff54c5db6ead2f0471d1b55',\n", + " 'c5257e382fbc4f69a16aa0bc047dfee2',\n", + " '22df791dc8fa45c9867e2cd4de171bd9',\n", + " 'b3a0c0feba764bd0abfb446204a8239f',\n", + " 'ba39d1b49cf840289c2d2d04e88948cd',\n", + " '1a956280d4db49aea6007c9c1d0f698a',\n", + " 'e7062f6c5dba476facf895b6faee99cd',\n", + " '95f2febaef5a433f89c11d3e9741347f',\n", + " 'fded3452cdbf42fa90f7fadfacd5dd63',\n", + " '0a89bd45fc9d4148828cddb02a0921e7']" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Vector Store with recursive chunked documents\n", + "\n", + "recursive_collection_name = \"ai-safety-sr-arctic-embed-l-recursive\"\n", + "\n", + "recursive_qdrant_client = QdrantClient(url=qdrant_server,api_key=os.environ[\"QDRANT_API_KEY\"])\n", + "# recursive_qdrant_client.create_collection(\n", + "# collection_name=recursive_collection_name,\n", + "# vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n", + "# )\n", + "\n", + "recursive_vector_store = QdrantVectorStore(\n", + " client=recursive_qdrant_client,\n", + " collection_name=recursive_collection_name,\n", + " embedding=embedding_model,\n", + ")\n", + "\n", + "recursive_vector_store.add_documents(recursive_chunked_docs)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "recursive_retriever = recursive_vector_store.as_retriever(search_type=\"similarity_score_threshold\",\n", + " search_kwargs={'k':10,'score_threshold': 0.8})" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 11, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '70fb8aa0-96a7-4d0a-9757-05ac44f08577', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='FROM \\nPRINCIPLES \\nTO PRACTICE \\nA TECHINCAL COMPANION TO\\nTHE Blueprint for an \\nAI BILL OF RIGHTS\\n12'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 50, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': 'e254c679-f27c-4143-bcdd-a32f15e846e1', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='ā€¢ Accessibility and reasonable \\naccommodations \\nā€¢ AI actor credentials and qualiļ¬cations \\nā€¢ Alignment to organizational values \\nā€¢ Auditing and assessment \\nā€¢ Change-management controls \\nā€¢ Commercial use \\nā€¢ Data provenance'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '2c07a476-9e85-425a-9a32-a053f1293ff2', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \\npart of the system, and specific performance expectations; a description of any data used to train machine \\nlearning models or for other purposes, including how data sources were processed and interpreted, a \\nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \\nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \\nidentification and management assessments and any steps taken to mitigate potential harms; the results of \\nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \\nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \\nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 51, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': 'e08419ed-7e15-4a7d-a627-45b3bd5ebd78', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': 'd9ebc600-4412-4f0a-890f-7836cb58f4a2', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='for any resulting algorithmic discrimination. \\n26\\nAlgorithmic \\nDiscrimination \\nProtections'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 0, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': 'dd370438-231c-41db-b7b1-b4f1e7673cf7', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='BLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKING AUTOMATED \\nSYSTEMS WORK FOR \\nTHE AMERICAN PEOPLE \\nOCTOBER 2022'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '7b595edf-6178-4a54-9e64-edfc1e18a497', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='guide the design of provenance data-tracking techniques. \\nHuman-AI Conļ¬guration; \\nInformation Integrity \\nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \\ndata. \\nHarmful Bias and Homogenization \\nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 59, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': 'b8f6f44e-9ca6-4d28-956c-159f9aa284bb', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \\nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \\nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \\nhttps://www.nist.gov/itl/ai-risk-management-framework \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \\nRisks and Trustworthiness. \\nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \\nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \\nDescriptions of AI Actor Tasks.'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 57, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '38d650ed-d707-4289-8f97-6233a7dfb85a', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='54 \\nAppendix B. References \\nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \\nAI Incident Database. https://incidentdatabase.ai/ \\nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \\nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \\nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \\nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \\nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \\nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \\nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \\nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \\nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \\nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 12, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': 'd5048b39-7eb0-4c6f-b97a-083b66aa6ac2', '_collection_name': 'ai-safety-sr-arctic-embed-l-recursive'}, page_content='Priorities Related to Information Integrity Research and Development.')]" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "recursive_retriever.invoke(\"What steps can organizations take to minimize bias in AI models?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "# Trying Compression retriver\n", + "from langchain.retrievers import ContextualCompressionRetriever\n", + "from langchain.retrievers.document_compressors import LLMChainExtractor\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "base_retriever = recursive_retriever\n", + "\n", + "#Create a contextual compressor\n", + "compressor_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\", max_tokens=4000)\n", + "compressor = LLMChainExtractor.from_llm(compressor_llm)\n", + "\n", + "#Combine the retriever with the compressor\n", + "compression_retriever = ContextualCompressionRetriever(\n", + " base_compressor=compressor,\n", + " base_retriever=base_retriever\n", + ")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter Your OpenAI API Key: \")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "\n", + "RAG_PROMPT = \"\"\"\\\n", + "Given a provided context and question, you must answer the question based only on context.\n", + "\n", + "If you cannot answer the question based on the context - you must say \"I don't know\".\n", + "\n", + "Context: {context}\n", + "Question: {question}\n", + "\"\"\"\n", + "\n", + "rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import ChatOpenAI\n", + "\n", + "# Using the same model used in the app.\n", + "chat_model_name = \"gpt-4o\"\n", + "llm = ChatOpenAI(model=chat_model_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [], + "source": [ + "from operator import itemgetter\n", + "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", + "from langchain.schema import StrOutputParser\n", + "\n", + "ai_safety_rag_chain = (\n", + " {\"context\": itemgetter(\"question\") | compression_retriever, \"question\": itemgetter(\"question\")}\n", + " | rag_prompt | llm | StrOutputParser()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Companies can ensure AI does not violate data privacy laws by incorporating built-in protections and ensuring that data collection conforms to reasonable expectations. They should collect only the data strictly necessary for the specific context and seek user permission, respecting their decisions regarding the collection, use, access, transfer, and deletion of data. If obtaining user permission is not possible, alternative privacy by design safeguards should be used. Additionally, systems should avoid user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults.'" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ai_safety_rag_chain.invoke({\"question\" : \"How can companies ensure AI does not violate data privacy laws?\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"I don't know.\"" + ] + }, + "execution_count": 40, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ai_safety_rag_chain.invoke({\"question\" : \"What are the implications of using GAI systems for organizations in terms of risk management and compliance?\"})" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/Task 1/settingup-vectorstore-chunking-strategy.ipynb b/Tasks/Task 1/settingup-vectorstore-chunking-strategy.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..5e33f6983eba8b263f2f8fb78190af0b791671f4 --- /dev/null +++ b/Tasks/Task 1/settingup-vectorstore-chunking-strategy.ipynb @@ -0,0 +1,1474 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: langchain in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (0.3.0)\n", + "Requirement already satisfied: langchain-core in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (0.3.2)\n", + "Requirement already satisfied: langchain-community in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (0.3.0)\n", + "Requirement already satisfied: langchain-experimental in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (0.3.0)\n", + "Requirement already satisfied: langchain-qdrant in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (0.1.4)\n", + "Requirement already satisfied: qdrant-client in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (1.11.2)\n", + "Requirement already satisfied: tiktoken in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (0.7.0)\n", + "Requirement already satisfied: pymupdf in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (1.24.10)\n", + "Requirement already satisfied: PyYAML>=5.3 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (6.0.1)\n", + "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (2.0.32)\n", + "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (3.9.5)\n", + "Requirement already satisfied: langchain-text-splitters<0.4.0,>=0.3.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (0.3.0)\n", + "Requirement already satisfied: langsmith<0.2.0,>=0.1.17 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (0.1.120)\n", + "Requirement already satisfied: numpy<2,>=1 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (1.26.4)\n", + "Requirement already satisfied: pydantic<3.0.0,>=2.7.4 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (2.8.2)\n", + "Requirement already satisfied: requests<3,>=2 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (2.32.3)\n", + "Requirement already satisfied: tenacity!=8.4.0,<9.0.0,>=8.1.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain) (8.5.0)\n", + "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain-core) (1.33)\n", + "Requirement already satisfied: packaging<25,>=23.2 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain-core) (23.2)\n", + "Requirement already satisfied: typing-extensions>=4.7 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain-core) (4.11.0)\n", + "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain-community) (0.6.7)\n", + "Requirement already satisfied: pydantic-settings<3.0.0,>=2.4.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langchain-community) (2.5.2)\n", + "Requirement already satisfied: grpcio>=1.41.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from qdrant-client) (1.66.0)\n", + "Requirement already satisfied: grpcio-tools>=1.41.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from qdrant-client) (1.66.0)\n", + "Requirement already satisfied: httpx>=0.20.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx[http2]>=0.20.0->qdrant-client) (0.27.2)\n", + "Requirement already satisfied: portalocker<3.0.0,>=2.7.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from qdrant-client) (2.10.1)\n", + "Requirement already satisfied: urllib3<3,>=1.26.14 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from qdrant-client) (2.2.1)\n", + "Requirement already satisfied: regex>=2022.1.18 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from tiktoken) (2024.5.15)\n", + "Requirement already satisfied: PyMuPDFb==1.24.10 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from pymupdf) (1.24.10)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.9.4)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community) (3.21.2)\n", + "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community) (0.9.0)\n", + "Requirement already satisfied: protobuf<6.0dev,>=5.26.1 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from grpcio-tools>=1.41.0->qdrant-client) (5.27.3)\n", + "Requirement already satisfied: setuptools in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from grpcio-tools>=1.41.0->qdrant-client) (75.1.0)\n", + "Requirement already satisfied: anyio in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client) (3.7.1)\n", + "Requirement already satisfied: certifi in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client) (2024.8.30)\n", + "Requirement already satisfied: httpcore==1.* in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client) (1.0.5)\n", + "Requirement already satisfied: idna in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client) (3.7)\n", + "Requirement already satisfied: sniffio in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpcore==1.*->httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client) (0.14.0)\n", + "Requirement already satisfied: h2<5,>=3 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from httpx[http2]>=0.20.0->qdrant-client) (4.1.0)\n", + "Requirement already satisfied: jsonpointer>=1.9 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from jsonpatch<2.0,>=1.33->langchain-core) (2.4)\n", + "Requirement already satisfied: orjson<4.0.0,>=3.9.14 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from langsmith<0.2.0,>=0.1.17->langchain) (3.10.7)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from pydantic<3.0.0,>=2.7.4->langchain) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.20.1 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from pydantic<3.0.0,>=2.7.4->langchain) (2.20.1)\n", + "Requirement already satisfied: python-dotenv>=0.21.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from pydantic-settings<3.0.0,>=2.4.0->langchain-community) (1.0.1)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from requests<3,>=2->langchain) (3.3.2)\n", + "Requirement already satisfied: hyperframe<7,>=6.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client) (6.0.1)\n", + "Requirement already satisfied: hpack<5,>=4.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from h2<5,>=3->httpx[http2]>=0.20.0->qdrant-client) (4.0.0)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community) (1.0.0)\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install \\\n", + " langchain \\\n", + " langchain-core\\\n", + " langchain-community \\\n", + " langchain-experimental \\\n", + " langchain-qdrant \\\n", + " qdrant-client \\\n", + " tiktoken \\\n", + " pymupdf \n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from uuid import uuid4\n", + "\n", + "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", + "os.environ[\"LANGCHAIN_PROJECT\"] = f\"AIE4 - LangGraph - {uuid4().hex[0:8]}\"\n", + "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangSmith API Key: \")" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages/pypdfium2/_helpers/textpage.py:80: UserWarning: get_text_range() call with default params will be implicitly redirected to get_text_bounded()\n", + " warnings.warn(\"get_text_range() call with default params will be implicitly redirected to get_text_bounded()\")\n" + ] + } + ], + "source": [ + "from langchain_community.document_loaders import PyMuPDFLoader\n", + "from langchain.document_loaders import PyPDFium2Loader\n", + "# Additional content \n", + "# https://arxiv.org/pdf/2306.12001\n", + "BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n", + "bor_documents = PyMuPDFLoader(file_path=BOR_FILE_PATH).load()\n", + "\n", + "NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n", + "nist_documents = PyMuPDFLoader(file_path=NIST_FILE_PATH).load()\n", + "\n", + "# PyPDFium2Loader_bor_documents = PyPDFium2Loader(file_path=BOR_FILE_PATH).load()\n", + "# PyPDFium2Loader_nist_documents = PyPDFium2Loader(file_path=NIST_FILE_PATH).load()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Blueprint for an AI Bill of Rights'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "bor_title = str(bor_documents[0].metadata.get(\"title\"))\n", + "bor_title" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile'" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nist_title = str(nist_documents[0].metadata.get(\"title\"))\n", + "nist_title" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 1, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': ''}, page_content=' \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nAbout this Document \\nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People was \\npublished by the White House Office of Science and Technology Policy in October 2022. This framework was \\nreleased one year after OSTP announced the launch of a process to develop ā€œa bill of rights for an AI-powered \\nworld.ā€ Its release follows a year of public engagement to inform this initiative. The framework is available \\nonline at: https://www.whitehouse.gov/ostp/ai-bill-of-rights \\nAbout the Office of Science and Technology Policy \\nThe Office of Science and Technology Policy (OSTP) was established by the National Science and Technology \\nPolicy, Organization, and Priorities Act of 1976 to provide the President and others within the Executive Office \\nof the President with advice on the scientific, engineering, and technological aspects of the economy, national \\nsecurity, health, foreign relations, the environment, and the technological recovery and use of resources, among \\nother topics. OSTP leads interagency science and technology policy coordination efforts, assists the Office of \\nManagement and Budget (OMB) with an annual review and analysis of Federal research and development in \\nbudgets, and serves as a source of scientific and technological analysis and judgment for the President with \\nrespect to major policies, plans, and programs of the Federal Government. \\nLegal Disclaimer \\nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People is a white paper \\npublished by the White House Office of Science and Technology Policy. It is intended to support the \\ndevelopment of policies and practices that protect civil rights and promote democratic values in the building, \\ndeployment, and governance of automated systems. \\nThe Blueprint for an AI Bill of Rights is non-binding and does not constitute U.S. government policy. It \\ndoes not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \\ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \\ntherefore does not require compliance with the principles described herein. It also is not determinative of what \\nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \\nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \\nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \\nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \\nintelligence activities. \\nThe appropriate application of the principles set forth in this white paper depends significantly on the \\ncontext in which automated systems are being utilized. In some circumstances, application of these principles \\nin whole or in part may not be appropriate given the intended use of automated systems to achieve government \\nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \\nautomated systems in certain settings such as AI systems used as part of school building security or automated \\nhealth diagnostic systems. \\nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \\nequities, for example, between the protection of sensitive law enforcement information and the principle of \\nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \\nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \\nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \\nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960, \\nPromoting the Use of Trustworthy Artificial Intelligence in the Federal Government (December 2020). \\nThis white paper recognizes that national security (which includes certain law enforcement and \\nhomeland security activities) and defense activities are of increased sensitivity and interest to our nationā€™s \\nadversaries and are often subject to special requirements, such as those governing classified information and \\nother protected data. Such activities require alternative, compatible safeguards through existing policies that \\ngovern automated systems and AI, such as the Department of Defense (DOD) AI Ethical Principles and \\nResponsible AI Implementation Pathway and the Intelligence Community (IC) AI Ethics Principles and \\nFramework. The implementation of these policies to national security and defense activities can be informed by \\nthe Blueprint for an AI Bill of Rights where feasible. \\nThe Blueprint for an AI Bill of Rights is not intended to, and does not, create any legal right, benefit, or \\ndefense, substantive or procedural, enforceable at law or in equity by any party against the United States, its \\ndepartments, agencies, or entities, its officers, employees, or agents, or any other person, nor does it constitute a \\nwaiver of sovereign immunity. \\nCopyright Information \\nThis document is a work of the United States Government and is in the public domain (see 17 U.S.C. Ā§105). \\n2\\n')" + ] + }, + "execution_count": 61, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "bor_documents[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 1}, page_content='About this Document\\r\\nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People was \\r\\npublished by the White House Office of Science and Technology Policy in October 2022. This framework was \\r\\nreleased one year after OSTP announced the launch of a process to develop ā€œa bill of rights for an AI-powered \\r\\nworld.ā€ Its release follows a year of public engagement to inform this initiative. The framework is available \\r\\nonline at: https://www.whitehouse.gov/ostp/ai-bill-of-rights\\r\\nAbout the Office of Science and Technology Policy\\r\\nThe Office of Science and Technology Policy (OSTP) was established by the National Science and Technology\\r\\nPolicy, Organization, and Priorities Act of 1976 to provide the President and others within the Executive Office \\r\\nof the President with advice on the scientific, engineering, and technological aspects of the economy, national \\r\\nsecurity, health, foreign relations, the environment, and the technological recovery and use of resources, among \\r\\nother topics. OSTP leads interagency science and technology policy coordination efforts, assists the Office of \\r\\nManagement and Budget (OMB) with an annual review and analysis of Federal research and development in \\r\\nbudgets, and serves as a source of scientific and technological analysis and judgment for the President with \\r\\nrespect to major policies, plans, and programs of the Federal Government.\\r\\nLegal Disclaimer\\r\\nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People is a white paper\\r\\npublished by the White House Office of Science and Technology Policy. It is intended to support the \\r\\ndevelopment of policies and practices that protect civil rights and promote democratic values in the building, \\r\\ndeployment, and governance of automated systems.\\r\\nThe Blueprint for an AI Bill of Rights is non-binding and does not constitute U.S. government policy. It \\r\\ndoes not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \\r\\ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \\r\\ntherefore does not require compliance with the principles described herein. It also is not determinative of what \\r\\nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \\r\\nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \\r\\nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \\r\\nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \\r\\nintelligence activities.\\r\\nThe appropriate application of the principles set forth in this white paper depends significantly on the \\r\\ncontext in which automated systems are being utilized. In some circumstances, application of these principles \\r\\nin whole or in part may not be appropriate given the intended use of automated systems to achieve government \\r\\nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \\r\\nautomated systems in certain settings such as AI systems used as part of school building security or automated \\r\\nhealth diagnostic systems.\\r\\nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \\r\\nequities, for example, between the protection of sensitive law enforcement information and the principle of \\r\\nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \\r\\nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \\r\\nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \\r\\nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960, \\r\\nPromoting the Use of Trustworthy Artificial Intelligence in the Federal Government (December 2020).\\r\\nThis white paper recognizes that national security (which includes certain law enforcement and \\r\\nhomeland security activities) and defense activities are of increased sensitivity and interest to our nationā€™s \\r\\nadversaries and are often subject to special requirements, such as those governing classified information and \\r\\nother protected data. Such activities require alternative, compatible safeguards through existing policies that \\r\\ngovern automated systems and AI, such as the Department of Defense (DOD) AI Ethical Principles and \\r\\nResponsible AI Implementation Pathway and the Intelligence Community (IC) AI Ethics Principles and \\r\\nFramework. The implementation of these policies to national security and defense activities can be informed by \\r\\nthe Blueprint for an AI Bill of Rights where feasible.\\r\\nThe Blueprint for an AI Bill of Rights is not intended to, and does not, create any legal right, benefit, or \\r\\ndefense, substantive or procedural, enforceable at law or in equity by any party against the United States, its \\r\\ndepartments, agencies, or entities, its officers, employees, or agents, or any other person, nor does it constitute a \\r\\nwaiver of sovereign immunity.\\r\\nCopyright Information\\r\\nThis document is a work of the United States Government and is in the public domain (see 17 U.S.C. Ā§105). \\r\\n2\\n')" + ] + }, + "execution_count": 60, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "PyPDFium2Loader_bor_documents[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 123, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages/pydantic/_internal/_fields.py:161: UserWarning: Field \"model_name\" has conflict with protected namespace \"model_\".\n", + "\n", + "You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n", + " warnings.warn(\n", + "/opt/miniconda3/envs/llmops-course/lib/python3.11/site-packages/sentence_transformers/cross_encoder/CrossEncoder.py:11: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", + " from tqdm.autonotebook import tqdm, trange\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "676e2dea406c411e970629d869df7495", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "modules.json: 0%| | 0.00/349 [00:00 str:\n", + " \"\"\"\n", + " Enrich the document context by adding the surrounding text (context window).\n", + " Args:\n", + " - document: The retrieved document.\n", + " - window_size: Number of adjacent passages to include as context (before and after).\n", + " \"\"\"\n", + " doc_text = document.page_content.split(\"\\n\")\n", + " enriched_text = []\n", + " for i, passage in enumerate(doc_text):\n", + " context = doc_text[max(0, i - window_size):min(len(doc_text), i + window_size + 1)]\n", + " enriched_text.append(\"\\n\".join(context))\n", + " return \"\\n\".join(enriched_text)\n", + "\n", + " def _get_relevant_documents(self, query: str) -> List[Document]:\n", + " \"\"\"\n", + " Retrieve documents and apply context enrichment.\n", + " Args:\n", + " - query: The query string.\n", + " Returns:\n", + " - List of enriched documents.\n", + " \"\"\"\n", + " documents = self.retriever.get_relevant_documents(query)\n", + " enriched_documents = []\n", + " for doc in documents:\n", + " enriched_content = self.enrich_context(doc, self.window_size)\n", + " enriched_doc = Document(page_content=enriched_content, metadata=doc.metadata)\n", + " enriched_documents.append(enriched_doc)\n", + " return enriched_documents\n", + "\n", + " async def _aget_relevant_documents(self, query: str) -> List[Document]:\n", + " \"\"\"\n", + " Async version of the document retrieval and enrichment.\n", + " Args:\n", + " - query: The query string.\n", + " Returns:\n", + " - List of enriched documents.\n", + " \"\"\"\n", + " documents = await self.retriever.aget_relevant_documents(query)\n", + " enriched_documents = []\n", + " for doc in documents:\n", + " enriched_content = self.enrich_context(doc, self.window_size)\n", + " enriched_doc = Document(page_content=enriched_content, metadata=doc.metadata)\n", + " enriched_documents.append(enriched_doc)\n", + " return enriched_documents\n" + ] + }, + { + "cell_type": "code", + "execution_count": 111, + "metadata": {}, + "outputs": [], + "source": [ + "# todo: For some reason this creates duplicate need to debug this later.\n", + "context_enriched_retriever = ContextEnrichedRetriever(retriever=retriver, window_size=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 128, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0: Blueprint for an AI Bill of Rights\n", + "In some cases, exceptions to \n", + "the principles described in the Blueprint for an AI Bill of Rights may be necessary to comply with existing law, \n", + "conform to the practicalities of a specific use case, or balance competing public interests. In particular, law \n", + "enforcement, and other regulatory contexts may require government actors to protect civil rights, civil liberties, \n", + "and privacy in a manner consistent with, but using alternate mechanisms to, the specific principles discussed in \n", + "this framework. The Blueprint for an AI Bill of Rights is meant to assist governments and the private sector in \n", + "moving principles into practice. The expectations given in the Technical Companion are meant to serve as a blueprint for the development of \n", + "additional technical standards and practices that should be tailored for particular sectors and contexts. While \n", + "existing laws informed the development of the Blueprint for an AI Bill of Rights, this framework does not detail \n", + "those laws beyond providing them as examples, where appropriate, of existing protective measures. This \n", + "framework instead shares a broad, forward-leaning vision of recommended principles for automated system \n", + "development and use to inform private and public involvement with these systems where they have the potenĀ­\n", + "tial to meaningfully impact rights, opportunities, or access. Additionally, this framework does not analyze or \n", + "take a position on legislative and regulatory proposals in municipal, state, and federal government, or those in \n", + "other countries. We have seen modest progress in recent years, with some state and local governments responding to these probĀ­\n", + "lems with legislation, and some courts extending longstanding statutory protections to new and emerging techĀ­\n", + "nologies. There are companies working to incorporate additional protections in their design and use of autoĀ­\n", + "mated systems, and researchers developing innovative guardrails. Advocates, researchers, and government \n", + "organizations have proposed principles for the ethical use of AI and other automated systems. These include \n", + "the Organization for Economic Co-operation and Developmentā€™s (OECDā€™s) 2019 Recommendation on Artificial \n", + "Intelligence, which includes principles for responsible stewardship of trustworthy AI and which the United \n", + "States adopted, and Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \n", + "Federal Government, which sets out principles that govern the federal governmentā€™s use of AI. The Blueprint \n", + "for an AI Bill of Rights is fully consistent with these principles and with the direction in Executive Order 13985 \n", + "on Advancing Racial Equity and Support for Underserved Communities Through the Federal Government. These principles find kinship in the Fair Information Practice Principles (FIPPs), derived from the 1973 report \n", + "of an advisory committee to the U.S. Department of Health, Education, and Welfare, Records, Computers, \n", + "and the Rights of Citizens.4 While there is no single, universal articulation of the FIPPs, these core \n", + "principles for managing information about individuals have been incorporated into data privacy laws and \n", + "policies across the globe.5 The Blueprint for an AI Bill of Rights embraces elements of the FIPPs that are \n", + "particularly relevant to automated systems, without articulating a specific set of FIPPs or scoping \n", + "applicability or the interests served to a single particular domain, like privacy, civil rights and civil liberties, \n", + "ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \n", + "steps to move these principles into practice and promote common approaches that allow technological \n", + "innovation to flourish while protecting people from harm.\n", + "1: Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile\n", + " \n", + "26 \n", + "MAP 4.1: Approaches for mapping AI technology and legal risks of its components ā€“ including the use of third-party data or \n", + "software ā€“ are in place, followed, and documented, as are risks of infringement of a third-partyā€™s intellectual property or other \n", + "rights. Action ID \n", + "Suggested Action \n", + "GAI Risks \n", + "MP-4.1-001 Conduct periodic monitoring of AI-generated content for privacy risks; address any \n", + "possible instances of PII or sensitive data exposure. Data Privacy \n", + "MP-4.1-002 Implement processes for responding to potential intellectual property infringement \n", + "claims or other rights. Intellectual Property \n", + "MP-4.1-003 \n", + "Connect new GAI policies, procedures, and processes to existing model, data, \n", + "software development, and IT governance and to legal, compliance, and risk \n", + "management activities. Information Security; Data Privacy \n", + "MP-4.1-004 Document training data curation policies, to the extent possible and according to \n", + "applicable laws and policies. Intellectual Property; Data Privacy; \n", + "Obscene, Degrading, and/or \n", + "Abusive Content \n", + "MP-4.1-005 \n", + "Establish policies for collection, retention, and minimum quality of data, in \n", + "consideration of the following risks: Disclosure of inappropriate CBRN information; \n", + "Use of Illegal or dangerous content; Oļ¬€ensive cyber capabilities; Training data \n", + "imbalances that could give rise to harmful biases; Leak of personally identiļ¬able \n", + "information, including facial likenesses of individuals. CBRN Information or Capabilities; \n", + "Intellectual Property; Information \n", + "Security; Harmful Bias and \n", + "Homogenization; Dangerous, \n", + "Violent, or Hateful Content; Data \n", + "Privacy \n", + "MP-4.1-006 Implement policies and practices deļ¬ning how third-party intellectual property and \n", + "training data will be used, stored, and protected. Intellectual Property; Value Chain \n", + "and Component Integration \n", + "MP-4.1-007 Re-evaluate models that were ļ¬ne-tuned or enhanced on top of third-party \n", + "models.\n", + "2: Blueprint for an AI Bill of Rights\n", + "Data should \n", + "only be collected or used for the purposes of training or testing machine learning models if such collection and \n", + "use is legal and consistent with the expectations of the people whose data is collected. User experience \n", + "research should be conducted to confirm that people understand what data is being collected about them and \n", + "how it will be used, and that this collection matches their expectations and desires. Data collection and use-case scope limits. Data collection should be limited in scope, with specific, \n", + "narrow identified goals, to avoid \"mission creep.\" Anticipated data collection should be determined to be \n", + "strictly necessary to the identified goals and should be minimized as much as possible. Data collected based on \n", + "these identified goals and for a specific context should not be used in a different context without assessing for \n", + "new privacy risks and implementing appropriate mitigation measures, which may include express consent. Clear timelines for data retention should be established, with data deleted as soon as possible in accordance \n", + "with legal or policy-based limitations. Determined data retention timelines should be documented and justiĀ­\n", + "fied. Risk identification and mitigation. Entities that collect, use, share, or store sensitive data should \n", + "attempt to proactively identify harms and seek to manage them so as to avoid, mitigate, and respond appropriĀ­\n", + "ately to identified risks. Appropriate responses include determining not to process data when the privacy risks \n", + "outweigh the benefits or implementing measures to mitigate acceptable risks. Appropriate responses do not \n", + "include sharing or transferring the privacy risks to users via notice or consent requests where users could not \n", + "reasonably be expected to understand the risks without further support. Privacy-preserving security. Entities creating, using, or governing automated systems should follow \n", + "privacy and security best practices designed to ensure data and metadata do not leak beyond the specific \n", + "consented use case. Best practices could include using privacy-enhancing cryptography or other types of \n", + "privacy-enhancing technologies or fine-grained permissions and access control mechanisms, along with \n", + "conventional system security protocols.\n", + "3: Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile\n", + " \n", + "27 \n", + "MP-4.1-010 \n", + "Conduct appropriate diligence on training data use to assess intellectual property, \n", + "and privacy, risks, including to examine whether use of proprietary or sensitive \n", + "training data is consistent with applicable laws. Intellectual Property; Data Privacy \n", + "AI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n", + " \n", + "MAP 5.1: Likelihood and magnitude of each identiļ¬ed impact (both potentially beneļ¬cial and harmful) based on expected use, past \n", + "uses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \n", + "the AI system, or other data are identiļ¬ed and documented. Action ID \n", + "Suggested Action \n", + "GAI Risks \n", + "MP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \n", + "data generation capabilities for potential misuse or vulnerabilities. Information Integrity; Information \n", + "Security \n", + "MP-5.1-002 \n", + "Identify potential content provenance harms of GAI, such as misinformation or \n", + "disinformation, deepfakes, including NCII, or tampered content. Enumerate and \n", + "rank risks based on their likelihood and potential impact, and determine how well \n", + "provenance solutions address speciļ¬c risks and/or harms. Information Integrity; Dangerous, \n", + "Violent, or Hateful Content; \n", + "Obscene, Degrading, and/or \n", + "Abusive Content \n", + "MP-5.1-003 \n", + "Consider disclosing use of GAI to end users in relevant contexts, while considering \n", + "the objective of disclosure, the context of use, the likelihood and magnitude of the \n", + "risk posed, the audience of the disclosure, as well as the frequency of the \n", + "disclosures. Human-AI Conļ¬guration \n", + "MP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \n", + "estimates. Information Integrity; CBRN \n", + "Information or Capabilities; \n", + "Dangerous, Violent, or Hateful \n", + "Content; Harmful Bias and \n", + "Homogenization \n", + "MP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \n", + "identify anomalous or unforeseen failure modes. Information Security \n", + "MP-5.1-006 \n", + "Proļ¬le threats and negative impacts arising from GAI systems interacting with, \n", + "manipulating, or generating content, and outlining known and potential \n", + "vulnerabilities and the likelihood of their occurrence.\n", + "4: Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile\n", + " \n", + "35 \n", + "MEASURE 2.9: The AI model is explained, validated, and documented, and AI system output is interpreted within its context ā€“ as \n", + "identiļ¬ed in the MAP function ā€“ to inform responsible use and governance. Action ID \n", + "Suggested Action \n", + "GAI Risks \n", + "MS-2.9-001 \n", + "Apply and document ML explanation results such as: Analysis of embeddings, \n", + "Counterfactual prompts, Gradient-based attributions, Model \n", + "compression/surrogate models, Occlusion/term reduction. Confabulation \n", + "MS-2.9-002 \n", + "Document GAI model details including: Proposed use and organizational value; \n", + "Assumptions and limitations, Data collection methodologies; Data provenance; \n", + "Data quality; Model architecture (e.g., convolutional neural network, \n", + "transformers, etc.); Optimization objectives; Training algorithms; RLHF \n", + "approaches; Fine-tuning or retrieval-augmented generation approaches; \n", + "Evaluation data; Ethical considerations; Legal and regulatory requirements. Information Integrity; Harmful Bias \n", + "and Homogenization \n", + "AI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n", + " \n", + "MEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. Action ID \n", + "Suggested Action \n", + "GAI Risks \n", + "MS-2.10-001 \n", + "Conduct AI red-teaming to assess issues such as: Outputting of training data \n", + "samples, and subsequent reverse engineering, model extraction, and \n", + "membership inference risks; Revealing biometric, conļ¬dential, copyrighted, \n", + "licensed, patented, personal, proprietary, sensitive, or trade-marked information; \n", + "Tracking or revealing location information of users or members of training \n", + "datasets. Human-AI Conļ¬guration; \n", + "Information Integrity; Intellectual \n", + "Property \n", + "MS-2.10-002 \n", + "Engage directly with end-users and other stakeholders to understand their \n", + "expectations and concerns regarding content provenance.\n", + "5: Blueprint for an AI Bill of Rights\n", + " \n", + " \n", + " \n", + " \n", + " \n", + "SECTION TITLE\n", + "DATA PRIVACY\n", + "You should be protected from abusive data practices via built-in protections and you \n", + "should have agency over how data about you is used. You should be protected from violations of \n", + "privacy through design choices that ensure such protections are included by default, including ensuring that \n", + "data collection conforms to reasonable expectations and that only data strictly necessary for the specific \n", + "context is collected. Designers, developers, and deployers of automated systems should seek your permission \n", + "and respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \n", + "ways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \n", + "used. Systems should not employ user experience and design decisions that obfuscate user choice or burden \n", + "users with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \n", + "where it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \n", + "in plain language, and give you agency over data collection and the specific context of use; current hard-toĀ­\n", + "understand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \n", + "restrictions for data and inferences related to sensitive domains, including health, work, education, criminal \n", + "justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \n", + "related inferences should only be used for necessary functions, and you should be protected by ethical review \n", + "and use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \n", + "technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \n", + "potential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \n", + "should not be used in education, work, housing, or in other contexts where the use of such surveillance \n", + "technologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \n", + "reporting that confirms your data decisions have been respected and provides an assessment of the \n", + "potential impact of surveillance technologies on your rights, opportunities, or access. NOTICE AND EXPLANATION\n", + "You should know that an automated system is being used and understand how and why it \n", + "contributes to outcomes that impact you.\n", + "6: Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile\n", + " \n", + "48 \n", + "ā€¢ Data protection \n", + "ā€¢ Data retention \n", + "ā€¢ Consistency in use of deļ¬ning key terms \n", + "ā€¢ Decommissioning \n", + "ā€¢ Discouraging anonymous use \n", + "ā€¢ Education \n", + "ā€¢ Impact assessments \n", + "ā€¢ Incident response \n", + "ā€¢ Monitoring \n", + "ā€¢ Opt-outs \n", + "ā€¢ Risk-based controls \n", + "ā€¢ Risk mapping and measurement \n", + "ā€¢ Science-backed TEVV practices \n", + "ā€¢ Secure software development practices \n", + "ā€¢ Stakeholder engagement \n", + "ā€¢ Synthetic content detection and \n", + "labeling tools and techniques \n", + "ā€¢ Whistleblower protections \n", + "ā€¢ Workforce diversity and \n", + "interdisciplinary teams\n", + "Establishing acceptable use policies and guidance for the use of GAI in formal human-AI teaming settings \n", + "as well as diļ¬€erent levels of human-AI conļ¬gurations can help to decrease risks arising from misuse, \n", + "abuse, inappropriate repurpose, and misalignment between systems and users. These practices are just \n", + "one example of adapting existing governance protocols for GAI contexts.\n", + "7: Blueprint for an AI Bill of Rights\n", + "Federal government surveillance and other collection and \n", + "use of data is governed by legal protections that help to protect civil liberties and provide for limits on data retention \n", + "in some cases. Many states have also enacted consumer data privacy protection regimes to address some of these \n", + "harms. However, these are not yet standard practices, and the United States lacks a comprehensive statutory or regulatory \n", + "framework governing the rights of the public when it comes to personal data. While a patchwork of laws exists to \n", + "guide the collection and use of personal data in specific contexts, including health, employment, education, and credit, \n", + "it can be unclear how these laws apply in other contexts and in an increasingly automated society. Additional protecĀ­\n", + "tions would assure the American public that the automated systems they use are not monitoring their activities, \n", + "collecting information on their lives, or otherwise surveilling them without context-specific consent or legal authoriĀ­\n", + "ty. 31\n", + "\n", + "8: Blueprint for an AI Bill of Rights\n", + " \n", + " \n", + " \n", + "SECTION TITLE\n", + "Applying The Blueprint for an AI Bill of Rights \n", + "While many of the concerns addressed in this framework derive from the use of AI, the technical \n", + "capabilities and specific definitions of such systems change with the speed of innovation, and the potential \n", + "harms of their use occur even with less technologically sophisticated tools. Thus, this framework uses a two-\n", + "part test to determine what systems are in scope. This framework applies to (1) automated systems that (2) \n", + "have the potential to meaningfully impact the American publicā€™s rights, opportunities, or access to \n", + "critical resources or services. These rights, opportunities, and access to critical resources of services should \n", + "be enjoyed equally and be fully protected, regardless of the changing role that automated systems may play in \n", + "our lives. This framework describes protections that should be applied with respect to all automated systems that \n", + "have the potential to meaningfully impact individuals' or communities' exercise of: \n", + "RIGHTS, OPPORTUNITIES, OR ACCESS\n", + "Civil rights, civil liberties, and privacy, including freedom of speech, voting, and protections from discrimiĀ­\n", + "nation, excessive punishment, unlawful surveillance, and violations of privacy and other freedoms in both \n", + "public and private sector contexts; \n", + "Equal opportunities, including equitable access to education, housing, credit, employment, and other \n", + "programs; or, \n", + "Access to critical resources or services, such as healthcare, financial services, safety, social services, \n", + "non-deceptive information about goods and services, and government benefits. A list of examples of automated systems for which these principles should be considered is provided in the \n", + "Appendix. The Technical Companion, which follows, offers supportive guidance for any person or entity that \n", + "creates, deploys, or oversees automated systems. Considered together, the five principles and associated practices of the Blueprint for an AI Bill of \n", + "Rights form an overlapping set of backstops against potential harms. This purposefully overlapping \n", + "framework, when taken as a whole, forms a blueprint to help protect the public from harm. The measures taken to realize the vision set forward in this framework should be proportionate \n", + "with the extent and nature of the harm, or risk of harm, to people's rights, opportunities, and \n", + "access. RELATIONSHIP TO EXISTING LAW AND POLICY\n", + "The Blueprint for an AI Bill of Rights is an exercise in envisioning a future where the American public is \n", + "protected from the potential harms, and can fully enjoy the benefits, of automated systems. It describes princiĀ­\n", + "ples that can help ensure these protections.\n", + "9: Blueprint for an AI Bill of Rights\n", + "In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. Automated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informaĀ­\n", + "tion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposefulĀ­\n", + "ly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \n", + "or unintended uses lead to unintended harms. Many of the harms resulting from these technologies are preventable, and actions are already being taken to protect \n", + "the public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \n", + "key development decisions are vetted by an ethics review; others have identified and mitigated harms found through \n", + "pre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consultaĀ­\n", + "tion processes that may be applied when considering the use of new automated systems, and existing product developĀ­\n", + "ment and testing practices already protect the American public from many potential harms. Still, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \n", + "these existing practices, increase confidence in the use of automated systems, and protect the American public. InnoĀ­\n", + "vators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \n", + "from unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consisĀ­\n", + "tently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harmĀ­\n", + "ful outcomes.\n" + ] + } + ], + "source": [ + "docs = retriver.invoke(\"How can companies ensure AI does not violate data privacy laws?\")\n", + "for i, doc in enumerate(docs):\n", + " print(f\"{i}: {doc.metadata.get('title')}\")\n", + " print(doc.page_content)" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "metadata": {}, + "outputs": [], + "source": [ + "# Trying Compression retriver\n", + "from langchain.retrievers import ContextualCompressionRetriever\n", + "from langchain.retrievers.document_compressors import LLMChainExtractor\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "base_retriever = retriver\n", + "\n", + "#Create a contextual compressor\n", + "compressor_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\", max_tokens=4000)\n", + "compressor = LLMChainExtractor.from_llm(compressor_llm)\n", + "\n", + "#Combine the retriever with the compressor\n", + "compression_retriever = ContextualCompressionRetriever(\n", + " base_compressor=compressor,\n", + " base_retriever=base_retriever\n", + ")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 29, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '1ba549ad3b9b488ab15014c90b909ad1', '_collection_name': 'ai-safety'}, page_content='Conduct periodic monitoring of AI-generated content for privacy risks; address any possible instances of PII or sensitive data exposure. \\n\\nConnect new GAI policies, procedures, and processes to existing model, data, software development, and IT governance and to legal, compliance, and risk management activities. \\n\\nDocument training data curation policies, to the extent possible and according to applicable laws and policies. \\n\\nEstablish policies for collection, retention, and minimum quality of data, in consideration of the following risks: Disclosure of inappropriate CBRN information; Use of Illegal or dangerous content; Offensive cyber capabilities; Training data imbalances that could give rise to harmful biases; Leak of personally identifiable information, including facial likenesses of individuals.'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 32, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '04acfe508f5c428caeeb90944c1d7386', '_collection_name': 'ai-safety'}, page_content='Data should \\nonly be collected or used for the purposes of training or testing machine learning models if such collection and \\nuse is legal and consistent with the expectations of the people whose data is collected. User experience \\nresearch should be conducted to confirm that people understand what data is being collected about them and \\nhow it will be used, and that this collection matches their expectations and desires. Data collection and use-case scope limits. Data collection should be limited in scope, with specific, \\nnarrow identified goals, to avoid \"mission creep.\" Anticipated data collection should be determined to be \\nstrictly necessary to the identified goals and should be minimized as much as possible. Data collected based on \\nthese identified goals and for a specific context should not be used in a different context without assessing for \\nnew privacy risks and implementing appropriate mitigation measures, which may include express consent. Clear timelines for data retention should be established, with data deleted as soon as possible in accordance \\nwith legal or policy-based limitations. Determined data retention timelines should be documented and justi\\xad\\nfied. Risk identification and mitigation. Entities that collect, use, share, or store sensitive data should \\nattempt to proactively identify harms and seek to manage them so as to avoid, mitigate, and respond appropri\\xad\\nately to identified risks. Appropriate responses include determining not to process data when the privacy risks \\noutweigh the benefits or implementing measures to mitigate acceptable risks. Appropriate responses do not \\ninclude sharing or transferring the privacy risks to users via notice or consent requests where users could not \\nreasonably be expected to understand the risks without further support. Privacy-preserving security. Entities creating, using, or governing automated systems should follow \\nprivacy and security best practices designed to ensure data and metadata do not leak beyond the specific \\nconsented use case. Best practices could include using privacy-enhancing cryptography or other types of \\nprivacy-enhancing technologies or fine-grained permissions and access control mechanisms, along with \\nconventional system security protocols.'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '7e66f3997e2243ad97a378ad1f2d9cd0', '_collection_name': 'ai-safety'}, page_content='Conduct appropriate diligence on training data use to assess intellectual property, \\nand privacy, risks, including to examine whether use of proprietary or sensitive \\ntraining data is consistent with applicable laws. Intellectual Property; Data Privacy \\nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '907f59c504d44945ae865021b6e9c713', '_collection_name': 'ai-safety'}, page_content='MEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. Action ID \\nSuggested Action \\nGAI Risks \\nMS-2.10-001 \\nConduct AI red-teaming to assess issues such as: Outputting of training data \\nsamples, and subsequent reverse engineering, model extraction, and \\nmembership inference risks; Revealing biometric, conļ¬dential, copyrighted, \\nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \\nTracking or revealing location information of users or members of training \\ndatasets. Human-AI Conļ¬guration; \\nInformation Integrity; Intellectual \\nProperty \\nMS-2.10-002 \\nEngage directly with end-users and other stakeholders to understand their \\nexpectations and concerns regarding content provenance.'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 5, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '498d23d9347f49ac9c61216ea042aefd', '_collection_name': 'ai-safety'}, page_content='SECTION TITLE\\nDATA PRIVACY\\nYou should be protected from abusive data practices via built-in protections and you \\nshould have agency over how data about you is used. You should be protected from violations of \\nprivacy through design choices that ensure such protections are included by default, including ensuring that \\ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \\ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \\nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \\nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \\nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \\nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \\nwhere it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \\nin plain language, and give you agency over data collection and the specific context of use; current hard-to\\xad\\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \\nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \\njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \\nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \\nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \\ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \\npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \\nshould not be used in education, work, housing, or in other contexts where the use of such surveillance \\ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \\nreporting that confirms your data decisions have been respected and provides an assessment of the \\npotential impact of surveillance technologies on your rights, opportunities, or access.')]" + ] + }, + "execution_count": 130, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "compression_retriever.invoke(\"How can companies ensure AI does not violate data privacy laws?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "\n", + "base_rag_prompt_template = \"\"\"\\\n", + "You are a helpful assistant that can answer questions related to the provided context. Repond I don't have that information if outside context.\n", + "\n", + "Context:\n", + "{context}\n", + "\n", + "Question:\n", + "{question}\n", + "\"\"\"\n", + "\n", + "base_rag_prompt = ChatPromptTemplate.from_template(base_rag_prompt_template)" + ] + }, + { + "cell_type": "code", + "execution_count": 132, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai.chat_models import ChatOpenAI\n", + "\n", + "base_llm = ChatOpenAI(model=\"gpt-4o\", tags=[\"base_llm\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 133, + "metadata": {}, + "outputs": [], + "source": [ + "from operator import itemgetter\n", + "from langchain.schema.output_parser import StrOutputParser\n", + "from langchain.schema.runnable import RunnablePassthrough\n", + "\n", + "retrieval_augmented_qa_chain = (\n", + " {\"context\": itemgetter(\"question\") | compression_retriever, \"question\": itemgetter(\"question\")}\n", + " | RunnablePassthrough.assign(context=itemgetter(\"context\"))\n", + " | {\"response\": base_rag_prompt | base_llm, \"context\": itemgetter(\"context\")}\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 134, + "metadata": {}, + "outputs": [], + "source": [ + "result = retrieval_augmented_qa_chain.invoke({\"question\" : \"How can companies ensure AI does not violate data privacy laws?\"})" + ] + }, + { + "cell_type": "code", + "execution_count": 135, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Companies can ensure that AI does not violate data privacy laws by implementing several strategies and practices as mentioned in the provided context:\\n\\n1. **Periodic Monitoring**: Conduct regular monitoring of AI-generated content to identify and address any potential instances of personally identifiable information (PII) or sensitive data exposure. \\n\\n2. **Integration with Existing Policies**: Connect new AI policies, procedures, and processes with existing model, data, software development, and IT governance, as well as legal, compliance, and risk management activities.\\n\\n3. **Training Data Curation Policies**: Document training data curation policies in accordance with applicable laws and policies. This includes policies for the collection, retention, and minimum quality of data to mitigate risks such as the disclosure of inappropriate information, use of illegal or dangerous content, offensive cyber capabilities, data imbalances leading to harmful biases, and leaks of PII.\\n\\n4. **Diligence on Training Data**: Conduct appropriate diligence on the use of training data to assess intellectual property and privacy risks, ensuring that the use of proprietary or sensitive data is consistent with applicable laws.\\n\\n5. **User Experience Research**: Conduct user experience research to confirm that individuals understand what data is being collected about them and how it will be used, ensuring that this collection matches their expectations and desires.\\n\\n6. **Scope Limits on Data Collection**: Limit data collection to specific, narrow goals to avoid \"mission creep.\" Anticipated data collection should be strictly necessary for the identified goals and minimized as much as possible.\\n\\n7. **Risk Identification and Mitigation**: Proactively identify and manage privacy risks to avoid, mitigate, and respond appropriately to identified risks. This includes determining not to process data when privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks.\\n\\n8. **Privacy-Preserving Security**: Follow privacy and security best practices to ensure that data and metadata do not leak beyond the specific consented use case. This can include using privacy-enhancing cryptography, privacy-enhancing technologies, fine-grained permissions, and access control mechanisms.\\n\\n9. **Consent and Privacy by Design**: Seek user permission and respect user decisions regarding data collection, use, access, transfer, and deletion to the greatest extent possible. Implement privacy by design safeguards where consent is not feasible, ensuring that systems do not employ user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults.\\n\\n10. **Enhanced Protections for Sensitive Data**: Implement enhanced protections and restrictions for data and inferences related to sensitive domains such as health, work, education, criminal justice, and finance. Ensure that data pertaining to youth is protected, and any use in sensitive domains is subject to ethical review and use prohibitions.\\n\\n11. **Surveillance and Monitoring**: Ensure that surveillance technologies are subject to heightened oversight, including pre-deployment assessment of potential harms and scope limits to protect privacy and civil liberties. Avoid continuous surveillance and monitoring in contexts where it could limit rights, opportunities, or access.\\n\\nBy adopting these measures, companies can better ensure that their AI systems comply with data privacy laws and protect the privacy of individuals.'" + ] + }, + "execution_count": 135, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result.get('response').content" + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'response': AIMessage(content='Companies can ensure that AI does not violate data privacy laws by implementing several strategies and practices as mentioned in the provided context:\\n\\n1. **Periodic Monitoring**: Conduct regular monitoring of AI-generated content to identify and address any potential instances of personally identifiable information (PII) or sensitive data exposure. \\n\\n2. **Integration with Existing Policies**: Connect new AI policies, procedures, and processes with existing model, data, software development, and IT governance, as well as legal, compliance, and risk management activities.\\n\\n3. **Training Data Curation Policies**: Document training data curation policies in accordance with applicable laws and policies. This includes policies for the collection, retention, and minimum quality of data to mitigate risks such as the disclosure of inappropriate information, use of illegal or dangerous content, offensive cyber capabilities, data imbalances leading to harmful biases, and leaks of PII.\\n\\n4. **Diligence on Training Data**: Conduct appropriate diligence on the use of training data to assess intellectual property and privacy risks, ensuring that the use of proprietary or sensitive data is consistent with applicable laws.\\n\\n5. **User Experience Research**: Conduct user experience research to confirm that individuals understand what data is being collected about them and how it will be used, ensuring that this collection matches their expectations and desires.\\n\\n6. **Scope Limits on Data Collection**: Limit data collection to specific, narrow goals to avoid \"mission creep.\" Anticipated data collection should be strictly necessary for the identified goals and minimized as much as possible.\\n\\n7. **Risk Identification and Mitigation**: Proactively identify and manage privacy risks to avoid, mitigate, and respond appropriately to identified risks. This includes determining not to process data when privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks.\\n\\n8. **Privacy-Preserving Security**: Follow privacy and security best practices to ensure that data and metadata do not leak beyond the specific consented use case. This can include using privacy-enhancing cryptography, privacy-enhancing technologies, fine-grained permissions, and access control mechanisms.\\n\\n9. **Consent and Privacy by Design**: Seek user permission and respect user decisions regarding data collection, use, access, transfer, and deletion to the greatest extent possible. Implement privacy by design safeguards where consent is not feasible, ensuring that systems do not employ user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults.\\n\\n10. **Enhanced Protections for Sensitive Data**: Implement enhanced protections and restrictions for data and inferences related to sensitive domains such as health, work, education, criminal justice, and finance. Ensure that data pertaining to youth is protected, and any use in sensitive domains is subject to ethical review and use prohibitions.\\n\\n11. **Surveillance and Monitoring**: Ensure that surveillance technologies are subject to heightened oversight, including pre-deployment assessment of potential harms and scope limits to protect privacy and civil liberties. Avoid continuous surveillance and monitoring in contexts where it could limit rights, opportunities, or access.\\n\\nBy adopting these measures, companies can better ensure that their AI systems comply with data privacy laws and protect the privacy of individuals.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 627, 'prompt_tokens': 2398, 'total_tokens': 3025, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_3537616b13', 'finish_reason': 'stop', 'logprobs': None}, id='run-8739df9f-0dc0-4aea-a089-5fa12ac6189e-0', usage_metadata={'input_tokens': 2398, 'output_tokens': 627, 'total_tokens': 3025}),\n", + " 'context': [Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 29, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '1ba549ad3b9b488ab15014c90b909ad1', '_collection_name': 'ai-safety'}, page_content='Conduct periodic monitoring of AI-generated content for privacy risks; address any possible instances of PII or sensitive data exposure. \\n\\nConnect new GAI policies, procedures, and processes to existing model, data, software development, and IT governance and to legal, compliance, and risk management activities. \\n\\nDocument training data curation policies, to the extent possible and according to applicable laws and policies. \\n\\nEstablish policies for collection, retention, and minimum quality of data, in consideration of the following risks: Disclosure of inappropriate CBRN information; Use of Illegal or dangerous content; Offensive cyber capabilities; Training data imbalances that could give rise to harmful biases; Leak of personally identifiable information, including facial likenesses of individuals.'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 32, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '04acfe508f5c428caeeb90944c1d7386', '_collection_name': 'ai-safety'}, page_content='Data should \\nonly be collected or used for the purposes of training or testing machine learning models if such collection and \\nuse is legal and consistent with the expectations of the people whose data is collected. User experience \\nresearch should be conducted to confirm that people understand what data is being collected about them and \\nhow it will be used, and that this collection matches their expectations and desires. Data collection and use-case scope limits. Data collection should be limited in scope, with specific, \\nnarrow identified goals, to avoid \"mission creep.\" Anticipated data collection should be determined to be \\nstrictly necessary to the identified goals and should be minimized as much as possible. Data collected based on \\nthese identified goals and for a specific context should not be used in a different context without assessing for \\nnew privacy risks and implementing appropriate mitigation measures, which may include express consent. Clear timelines for data retention should be established, with data deleted as soon as possible in accordance \\nwith legal or policy-based limitations. Determined data retention timelines should be documented and justi\\xad\\nfied. Risk identification and mitigation. Entities that collect, use, share, or store sensitive data should \\nattempt to proactively identify harms and seek to manage them so as to avoid, mitigate, and respond appropri\\xad\\nately to identified risks. Appropriate responses include determining not to process data when the privacy risks \\noutweigh the benefits or implementing measures to mitigate acceptable risks. Appropriate responses do not \\ninclude sharing or transferring the privacy risks to users via notice or consent requests where users could not \\nreasonably be expected to understand the risks without further support. Privacy-preserving security. Entities creating, using, or governing automated systems should follow \\nprivacy and security best practices designed to ensure data and metadata do not leak beyond the specific \\nconsented use case. Best practices could include using privacy-enhancing cryptography or other types of \\nprivacy-enhancing technologies or fine-grained permissions and access control mechanisms, along with \\nconventional system security protocols.'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '7e66f3997e2243ad97a378ad1f2d9cd0', '_collection_name': 'ai-safety'}, page_content='Conduct appropriate diligence on training data use to assess intellectual property, \\nand privacy, risks, including to examine whether use of proprietary or sensitive \\ntraining data is consistent with applicable laws. Intellectual Property; Data Privacy \\nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities'),\n", + " Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '907f59c504d44945ae865021b6e9c713', '_collection_name': 'ai-safety'}, page_content='>>>\\nMEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. Action ID \\nSuggested Action \\nGAI Risks \\nMS-2.10-001 \\nConduct AI red-teaming to assess issues such as: Outputting of training data \\nsamples, and subsequent reverse engineering, model extraction, and \\nmembership inference risks; Revealing biometric, conļ¬dential, copyrighted, \\nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \\nTracking or revealing location information of users or members of training \\ndatasets. Human-AI Conļ¬guration; \\nInformation Integrity; Intellectual \\nProperty \\nMS-2.10-002 \\nEngage directly with end-users and other stakeholders to understand their \\nexpectations and concerns regarding content provenance.\\n>>>'),\n", + " Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 5, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '498d23d9347f49ac9c61216ea042aefd', '_collection_name': 'ai-safety'}, page_content='SECTION TITLE\\nDATA PRIVACY\\nYou should be protected from abusive data practices via built-in protections and you \\nshould have agency over how data about you is used. You should be protected from violations of \\nprivacy through design choices that ensure such protections are included by default, including ensuring that \\ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \\ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \\nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \\nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \\nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \\nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \\nwhere it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \\nin plain language, and give you agency over data collection and the specific context of use; current hard-to\\xad\\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \\nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \\njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \\nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \\nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \\ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \\npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \\nshould not be used in education, work, housing, or in other contexts where the use of such surveillance \\ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \\nreporting that confirms your data decisions have been respected and provides an assessment of the \\npotential impact of surveillance technologies on your rights, opportunities, or access.')]}" + ] + }, + "execution_count": 136, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/Task 2/Qdrant_cloud.png b/Tasks/Task 2/Qdrant_cloud.png new file mode 100644 index 0000000000000000000000000000000000000000..31158967ecc8c2afa44ea7fbf1ab8838374fb9a8 Binary files /dev/null and b/Tasks/Task 2/Qdrant_cloud.png differ diff --git a/Tasks/Task 2/Task2.md b/Tasks/Task 2/Task2.md new file mode 100644 index 0000000000000000000000000000000000000000..de291975b8e64eb7f12516aaa9a7ab899fd913bd --- /dev/null +++ b/Tasks/Task 2/Task2.md @@ -0,0 +1,58 @@ +# Deliverable 1 +Build a prototype and deploy to a Hugging Face Space, and create a short (< 2 min) loom video demonstrating some initial testing inputs and outputs. + + +# Deliverable 2 +How did you choose your stack, and why did you select each tool the way you did? + +In choosing my stack, I aimed to balance performance, scalability, and the ability to handle both structured and unstructured data. The tools I selected provide flexibility to handle diverse document types, while ensuring effective chunking and retrieval. Here's the breakdown of each tool and why it was chosen: + +Qdrant ??? +## 1. **PyMuPDFLoader**: + - **Reason for Selection**: + - PyMuPDFLoader is fast, lightweight, and efficient for parsing PDFs. It offers good performance with respect to speed and memory usage, which is crucial when dealing with large documents. + - In my use case, the AI Bill of Rights and NIST RMF documents are both structured and relatively dense. PyMuPDFLoader allows for quick loading and extraction of content without compromising on accuracy. + - **Why Not Another Option**: + - While I considered `PyPDFium2Loader`, it is slower (2 minutes 30 seconds for loading the same document) and the performance difference didnā€™t justify the switch since the output quality between the two loaders was almost identical. + +## 2. **RecursiveCharacterTextSplitter**: + - **Reason for Selection**: + - This splitter allows for flexible chunking of documents into manageable pieces while preserving context and meaning. It is particularly effective because it prevents breaking up the text in the middle of a thought, ensuring that the chunks remain semantically coherent. + - It also enables me to adjust the chunk sizes dynamically based on the document's structure. For instance, with the Blueprint for AI Bill of Rights, I can chunk based on sections, principles, and subsections, while still applying the RecursiveCharacter strategy within each chunk. + - **Why Not a Static Chunking Strategy**: + - A simple page or sentence-based chunking would not retain the full context in many cases. The recursive strategy ensures that chunks are more comprehensive, making retrieval more effective. + +## 3. **SemanticChunker**: + - **Reason for Selection**: + - This chunker allows for semantically rich divisions of the text, meaning that chunks are more likely to contain entire ideas or thoughts. This approach enhances coherence and leads to better retrieval outcomes. + - It is also adaptable and can be used to refine chunking strategies for documents that may not be as well-structured as the AI Bill of Rights or NIST RMF documents. + - **Why This Over Simple Chunking**: + - Semantic chunking provides better retrieval precision, especially in answering complex questions, since the context is more meaningful. This is particularly important when documents do not follow a clear structure. + +## 4. **Snowflake-Arctic-Embed-L Embedding Model**: + - **Reason for Selection**: + - This model offers a good balance between performance and accuracy. With 334 million parameters and 1024-dimension embeddings, it is a smaller model but ranks competitively on the MTEB leaderboard (27th), suggesting its efficiency. + - For a retrieval-augmented generation (RAG) setup, the embedding model plays a critical role in vectorizing chunks accurately, and this model is performant for both speed and relevance in retrieval tasks. + - **Why Not a Larger Model**: + - Larger models with more parameters may improve accuracy slightly but come at a much higher computational cost. For enterprise applications, the smaller yet efficient `snowflake-arctic-embed-l` model provides a good trade-off between speed and accuracy, allowing for scalability without major infrastructure demands. + +## 5. **Context Enrichment and Contextual Compression**: + - **Reason for Selection**: + - These advanced retrieval techniques aim to enhance the quality of responses by improving the retrieval process. Context enrichment allows for richer, more informed responses, while contextual compression ensures that responses remain concise and relevant. + - **Why Not Pure Contextual Retrieval**: + - Pure retrieval may lead to irrelevant or verbose results. By applying these techniques, I ensure that the retrieval process generates more targeted and meaningful answers, which is essential when dealing with complex or nuanced questions (e.g., AI ethics, privacy, and risk management). + +## 6. **Grouping by Similar Context**: + - **Reason for Selection**: + - Grouping documents by similar context improves retrieval accuracy. When a user asks about a specific topic like data privacy, the system can retrieve relevant chunks from different documents (e.g., both the AI Bill of Rights and NIST RMF), ensuring that responses are comprehensive. + - **Why This Strategy**: + - Grouping chunks by similar context ensures that even when documents are diverse or cover multiple topics, the right content is prioritized during retrieval. This helps improve answer quality, especially when dealing with detailed or nuanced questions. + +## 7. **Vector Store**: + - **Reason for Selection**: + - Using a vector store enables efficient storage and retrieval of embeddings, ensuring fast lookups and scalable operations. It also allows for advanced similarity search, making sure the most relevant chunks are retrieved based on the query embeddings. + - **Why Not Traditional Indexing**: + - Traditional indexing methods are less effective in handling semantic content and would not allow for the nuanced retrieval that RAG applications require. Vector stores enable better handling of embeddings and can scale with large datasets. + +## Conclusion: +Each tool in this stack was chosen to ensure **speed**, **scalability**, and **accuracy** while dealing with structured and unstructured documents. By balancing performance with precision (e.g., fast document loading via PyMuPDFLoader, efficient chunking strategies, and a small but powerful embedding model), this stack provides a robust framework for building ethical and useful AI applications. \ No newline at end of file diff --git a/Tasks/Task 3/Colab-task3-generate-dataset-ragas-eval.ipynb b/Tasks/Task 3/Colab-task3-generate-dataset-ragas-eval.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..32d38bb0a51525fc88dccf2f45cee99a6780b4d6 --- /dev/null +++ b/Tasks/Task 3/Colab-task3-generate-dataset-ragas-eval.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"markdown","metadata":{"id":"HY6bq51C8KWj"},"source":["# Synthetic data generation using Ragas framework"]},{"cell_type":"markdown","metadata":{"id":"AEGq6fJY8KWl"},"source":["> Python packages are installed from `requirements.txt` file into virtual environment"]},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":254,"status":"ok","timestamp":1727036080153,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"ExrmtPzF8rmE","outputId":"aaf19e2b-b0a3-49b0-e5fe-159c6e8b8080"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.10.12\n"]}],"source":["!python --version"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":37857,"status":"ok","timestamp":1727036120093,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"1f9vAwoV8KWl","outputId":"f8e770ea-c4e9-442e-ee08-d485b12ef0d6"},"outputs":[{"name":"stdout","output_type":"stream","text":["\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m50.4/50.4 kB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m290.2/290.2 kB\u001b[0m \u001b[31m10.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m397.0/397.0 kB\u001b[0m \u001b[31m23.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m3.5/3.5 MB\u001b[0m \u001b[31m94.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m185.7/185.7 kB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m38.0/38.0 MB\u001b[0m \u001b[31m27.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m15.9/15.9 MB\u001b[0m \u001b[31m110.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m82.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m207.2/207.2 kB\u001b[0m \u001b[31m16.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m51.5/51.5 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m76.4/76.4 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m46.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m375.6/375.6 kB\u001b[0m \u001b[31m25.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m13.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m71.1/71.1 kB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m258.9/258.9 kB\u001b[0m \u001b[31m19.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m245.3/245.3 kB\u001b[0m \u001b[31m19.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m60.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m519.3/519.3 kB\u001b[0m \u001b[31m36.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m78.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m318.9/318.9 kB\u001b[0m \u001b[31m23.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m11.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m194.1/194.1 kB\u001b[0m \u001b[31m16.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m57.5/57.5 kB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h"]}],"source":["!pip install -qU langsmith==0.1.125 \\\n"," langchain-core==0.2.41 \\\n"," langchain-community \\\n"," langchain-qdrant==0.1.4 \\\n"," langchain-experimental \\\n"," langchain-openai \\\n"," langchain_huggingface \\\n"," PyMuPDF==1.24.10 \\\n"," ragas==0.1.18 \\\n"," protobuf==3.20.3 \\\n"," pyarrow==14.0.1 \\\n"," fsspec==2024.6.1\n"]},{"cell_type":"code","execution_count":17,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":40546,"status":"ok","timestamp":1727037137047,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"SN3ZYQhb8KWm","outputId":"29071820-566a-4870-9a0e-edee357a9151"},"outputs":[{"name":"stdout","output_type":"stream","text":["LangChain API Key:Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n","OpenAI API Key:Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n","Enter Your Qdrant API Key: Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n","Enter Your Qdrant URL: Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n"]}],"source":["import os\n","import getpass\n","from uuid import uuid4\n","\n","os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n","os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangChain API Key:\")\n","\n","os.environ[\"LANGCHAIN_PROJECT\"] = \"AIM-SDG-MidTerm - AI Safety\"\n","os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n","\n","os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")\n","os.environ[\"QDRANT_URL\"] = getpass.getpass(\"Enter Your Qdrant URL: \")\n"]},{"cell_type":"code","execution_count":4,"metadata":{"executionInfo":{"elapsed":2813,"status":"ok","timestamp":1727036151748,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"TsdSQCDS8KWm"},"outputs":[],"source":["from langchain_experimental.text_splitter import SemanticChunker\n","from enum import Enum\n","from typing import List\n","from langchain_community.document_loaders import PyMuPDFLoader\n","from langchain_core.documents import Document\n","import asyncio\n","\n","class PDFLoaderWrapper():\n"," class LoaderType(str, Enum):\n"," PYMUPDF = \"pymupdf\"\n","\n"," def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n"," self.file_path = file_path if isinstance(file_path, list) else [file_path]\n"," self.loader_type = loader_type\n","\n"," async def aload(self) -> List[Document]:\n"," all_docs = []\n"," for file_path in self.file_path:\n"," if self.loader_type == self.LoaderType.PYMUPDF:\n"," try:\n"," loader = PyMuPDFLoader(file_path)\n"," docs = await loader.aload()\n"," all_docs.extend(docs)\n"," except Exception as e:\n"," print(f\"Error loading file {file_path}: {e}\")\n"," continue\n"," return all_docs\n","\n","\n","\n","\n","BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n","NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n","SMALL_DOC = \"https://arxiv.org/pdf/1908.10084\"\n","documents_to_preload = [\n"," BOR_FILE_PATH,\n"," NIST_FILE_PATH\n"," # SMALL_DOC\n","]\n","\n","pdf_loader = PDFLoaderWrapper(\n"," documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n",")\n","documents = await pdf_loader.aload()\n","\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":491,"referenced_widgets":["49fa14ed71b24d85b0dc2cf9d12e36d8","dc4ba5a620274f73afa7027726232693","fa9ba492445740f2a7388d05a8a0a995","e112800f289e4e3a8be21ba1f7db68b5","4747b53781fb4d37adea4c21b68cee33","2165a51b7d0a4440900a3fdb4152b1ff","8142cd3e211940a49391a3bb8414aae1","3da23de9f9b54994b79b6155565185e6","72df565437094492a84512d4a4be07f8","699b9a47c86a4c839f29f35325f5976a","aea9933aa1d0479cb64b2b87f25b971e","6301ae6e47e14ee3a965e672c7ae0e61","eddb3decc41c45dcb94b229d4f2b43a9","73f2f74333fa4334aa69bebf4a14c4de","f73c1d3c8c784131ae97d78185091ea2","a6c08a77b7cf4b57bccf7ae6840b1824","3e9c125122bf4a4697c4c66d0081db02","4b1fa36e00af4a63b4eaca5f84b61472","56d1f705145e4278b5c5666136464c2f","1415f7cb259f461a8554c9f68680633f","6b4568d54d3d44d6b969f9d7edb4a3e6","f46e5e9931884fe790fc846ba512cd56","ba302bbc5a1e43488831ee875550a224","6bcb3569f5144b4fb0fcaa55671b167e","58353a440c6a4adc8c0fc4af31973076","e7c0a7dfc59642bca13873ff5544e08a","178c185d1fb74f2aaac9f9d09612ba18","78cd857069834e34b84c73e0147b5e9f","290fd0c27c6945f18e2b1a8dbbd8338b","80f3b196e67747918589c7c0942e77c4","38b4e993c113467d8adae56f03009831","502dbd5a88af4028b17cc7ee59995879","fcba7005ef174314ac6a607c97291fcb","c59709620808466b9d65b390758b4898","0ef794ecfa144a84b34592353ad7a6ea","453cbd863055429cbe6bacba0af0ed0f","6dcb21c8ad064ef2a0ba4f8c308bb955","57e5dc406f7e4b289bab86df40762019","79fdfe0e7a9e4e38855567b3b204686a","c8dab5e9f6a94fddb82d81135b433cef","ee5b9d6e096b46ae88e0537c3a6e8b24","d9d573c7dd5b491dac2fc3643b452200","b477f5488ddc4bf991a9476c094454d9","f69fc7fda80d4bec984617f3c804a723","03671faa6b0a44bd8b3e637138c896eb","ac18871e773c416795edfad4e34bd739","1774f38ee20c401c9fee5f5c719c55fa","ff75bde6e9e34ddda3ab14358e32c9d5","376b21af9a924f9fac4d4a76adb2c279","9ad3152de7294fd8aed49ccc1d33b60c","4bd26e6ae79f4f3d8e9cf2f42ad3ac26","e69e65df44a644b1a2c61a62547f218e","d227481530984a52bb58265ffbf3095a","26e4773ba74a4fb1a079616d1dfe3475","49bebe2cfede420e8f18ff548cd99a53","54a35e95d97c441d88cf79665735ecd7","05f5187bbe264efd994ca80561f3f4e1","cf975ac612d94ae0824774b5e667512d","c4970fd5d1f24a32a78e9135774d8838","0bc3ed75fd084091b307f878b1d1d959","e73a9f990c1747aaaeef8b429d09eee1","ac85f34239514dac9a1f0d4930df51cf","fc2fd4df43344c8f83519be8338ea79c","cae690e4a646486eba8ec346b0f8cf78","4d66c93a9b3a49659b24dc3236ac61fb","280ffc9a3dc84cfeabcf938df8ba418d","ae9a7864070c409ea1e88e8fefc0ae27","9398b54a42924fbfa50dc9a2935831d8","b20bbe5fa22d43e8bd18f862fada7385","954a7f2aea624788ad86b83448033998","96b1e666843b4e45a310ec18bf750e65","a664c729149648f79971ad62b4bfab3a","0c8d2cc782dc405fa280f281f7c74df6","f6b50191cb834e0d8428c15aa7f74067","c05587b017a3468a955ffeaf88c4a5e1","f71f7bfb17cf48708222cd9b196c252e","549e74620f154e2d81e5242b4b09d5d4","0d6056d406f9496b92228ebc3e5a6fcf","ceb1fe3552024c309dad1392ced3b721","8d5bfdc58a2a4b14baae6e73e3a703b6","e256341c3df04d0a87b3865fdce29d22","ab176aa8c4884f069ae0e19e65760070","9ef5264e4c8344b19f8c63d13710b16a","16caf75cbaf84be08ab28e4af7c80460","ef1e019ced5a42aeaed7dc903158b2fa","1e85b700cd66441897a35f9018efd73c","8c3bafba10694ca28921512ae856ae0e","7f0fda7f909043b68762830fc945d649","812ef2fab08d41e9bb0b0a6607f06491","946552ad2b50452789b313730bc86022","47baaee191a24711a678c45960ba3d7b","ba11b7c4f3dd4c7c96300718c3ab2d9a","7dc5e6226139451c987d61a102743bbe","6bac147a849b4c86bab8e2d0598529d1","7a24922aee63498a804d0979c8043499","d3f2eca4202040f1b02458f135db7feb","b62d4c81354c478487b09905b9f4e73f","69529419f3e74cbe957ebcabdabd6245","46db05876ac44068a90d9ee879686fc0","e8967112977f4511baf4841c45245f14","4b817c1894314519a6d33a543e3ece72","0b5b8501d1c64430997aade25c414422","786341e37e1340f4ac793a8b81803129","3f046ecb4256477da5a045e0ea65fd71","7f7b3318b9a8470bb06db047193dbae1","8279fd35681c425e8377a135933df2b7","fe62df73225f4b39a40780daf353f669","e79cc22d087b4f07b727f4b8b7dd81a6","03fbf3aba7874ce5bc30bf447ba6f015","19a009a472e34399a4857a80bb34da71","651a63696f3546d19fa7fc457e90acd0","a9e5c2b666cd4b1bb1f8e2d3f724b909","b3e98c6b6bd54b018709f71e00abe2cf","1f259f3cd5b544efbca85fc58b1a46b1","7860820e7de9416b8549de4710af99fc","33aef8c6f108473fa69075c96fb05afa","f7a51fbbe5ab4687b391e70c4412ed5d","bc0209a663504ae7b139286f2acac053","e407bdc397df41f283d3c9ef933b13aa","08f9e2ef423343d0ba69769ba75ecccd","ec34c675cd944c92bd81071fbc1abdd6"]},"executionInfo":{"elapsed":108008,"status":"ok","timestamp":1727036267071,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"fM0getah8KWm","outputId":"e4163428-6d96-4b62-d60b-401a13c3cc75"},"outputs":[{"name":"stdout","output_type":"stream","text":["Importing packages\n","Packages import complete\n","Getting the Embedding model from Huggingface\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"49fa14ed71b24d85b0dc2cf9d12e36d8","version_major":2,"version_minor":0},"text/plain":["modules.json: 0%| | 0.00/349 [00:00\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsground_truthevolution_typemetadataepisode_done
0What actions did the OSTP take to engage with ...[APPENDIX\\nLisa Feldman Barrett \\nMadeline Owe...OSTP engaged with stakeholders regarding the u...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
1What are the potential issues associated with ...[ \\n \\n \\n \\nHUMAN ALTERNATIVES, \\nCONSIDERAT...The potential issues associated with automated...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
2What role does synthetic content detection pla...[ \\n51 \\ngeneral public participants. For exam...Synthetic content detection plays a crucial ro...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
3What role does risk management play in the imp...[ \\n50 \\nParticipatory Engagement Methods \\nOn...The answer to given question is not present in...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
4What concerns arise from companies using surve...[ \\n \\n \\n \\nDATA PRIVACY \\nWHY THIS PRINCIPL...Concerns arise from companies using surveillan...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
.....................
269What drives extra data protections in health a...[ \\n \\n \\n \\nDATA PRIVACY \\nEXTRA PROTECTIONS ...Extra data protections in health and finance a...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
270What insights did OSTP seek from experts in AI...[ \\n \\n \\n \\n \\nSECTION TITLE\\nAPPENDIX\\nListe...OSTP sought insights and analysis on the risks...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
271What key elements ensure clarity in docs about...[ \\nYou should know that an automated system i...Key elements that ensure clarity in documentat...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
272What biases to note for pre-deployment measure...[ \\n38 \\nMEASURE 2.13: Eļ¬€ectiveness of the emp...The context mentions documenting biases or sta...reasoning[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
273Which automated systems affect equal opportuni...[ \\n \\n \\n \\n \\n \\n \\n \\n \\nAPPENDIX\\nExamples...Automated systems that affect equal opportunit...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
\n","

274 rows Ɨ 6 columns

\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"],"text/plain":[" question \\\n","0 What actions did the OSTP take to engage with ... \n","1 What are the potential issues associated with ... \n","2 What role does synthetic content detection pla... \n","3 What role does risk management play in the imp... \n","4 What concerns arise from companies using surve... \n",".. ... \n","269 What drives extra data protections in health a... \n","270 What insights did OSTP seek from experts in AI... \n","271 What key elements ensure clarity in docs about... \n","272 What biases to note for pre-deployment measure... \n","273 Which automated systems affect equal opportuni... \n","\n"," contexts \\\n","0 [APPENDIX\\nLisa Feldman Barrett \\nMadeline Owe... \n","1 [ \\n \\n \\n \\nHUMAN ALTERNATIVES, \\nCONSIDERAT... \n","2 [ \\n51 \\ngeneral public participants. For exam... \n","3 [ \\n50 \\nParticipatory Engagement Methods \\nOn... \n","4 [ \\n \\n \\n \\nDATA PRIVACY \\nWHY THIS PRINCIPL... \n",".. ... \n","269 [ \\n \\n \\n \\nDATA PRIVACY \\nEXTRA PROTECTIONS ... \n","270 [ \\n \\n \\n \\n \\nSECTION TITLE\\nAPPENDIX\\nListe... \n","271 [ \\nYou should know that an automated system i... \n","272 [ \\n38 \\nMEASURE 2.13: Eļ¬€ectiveness of the emp... \n","273 [ \\n \\n \\n \\n \\n \\n \\n \\n \\nAPPENDIX\\nExamples... \n","\n"," ground_truth evolution_type \\\n","0 OSTP engaged with stakeholders regarding the u... simple \n","1 The potential issues associated with automated... simple \n","2 Synthetic content detection plays a crucial ro... simple \n","3 The answer to given question is not present in... simple \n","4 Concerns arise from companies using surveillan... simple \n",".. ... ... \n","269 Extra data protections in health and finance a... reasoning \n","270 OSTP sought insights and analysis on the risks... reasoning \n","271 Key elements that ensure clarity in documentat... reasoning \n","272 The context mentions documenting biases or sta... reasoning \n","273 Automated systems that affect equal opportunit... reasoning \n","\n"," metadata episode_done \n","0 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","1 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","2 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","3 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","4 [{'source': 'https://www.whitehouse.gov/wp-con... True \n",".. ... ... \n","269 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","270 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","271 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","272 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","273 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","\n","[274 rows x 6 columns]"]},"execution_count":8,"metadata":{},"output_type":"execute_result"}],"source":["testset.to_pandas()"]},{"cell_type":"code","execution_count":42,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":423},"executionInfo":{"elapsed":283,"status":"ok","timestamp":1727044937826,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"InvKT0YR5e6C","outputId":"1d451ade-0440-43b0-e4fc-294fe9ecbff1"},"outputs":[{"data":{"application/vnd.google.colaboratory.intrinsic+json":{"summary":"{\n \"name\": \"testset_df\",\n \"rows\": 274,\n \"fields\": [\n {\n \"column\": \"question\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 252,\n \"samples\": [\n \"How do real-time auditing tools help with AI content authenticity and system monitoring?\",\n \"What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?\",\n \"What role does user consent play in the collection and use of personal data?\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"contexts\",\n \"properties\": {\n \"dtype\": \"object\",\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"ground_truth\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 235,\n \"samples\": [\n \"Risks can arise during the design, development, deployment, operation, and/or decommissioning stages of the AI lifecycle.\",\n \"The risks of collecting sensitive student data include concerns about the lack of express parental consent, the lack of transparency in how the data is being used, and the potential for resulting discriminatory impacts. Additionally, the data collected can include sensitive information such as demographic details, drug use, and interest in LGBTQI+ groups, which may lead to inappropriate forecasting of student success and flagging of students with disabilities as cheating.\",\n \"Panelists discussed several challenges at the tech-health equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense associated with health monitoring devices, which can exacerbate equity issues. They also highlighted the need for accountability in the technologies used in medical care, particularly regarding racial biases and the use of race in medicine, which perpetuate harms and embed prior discrimination.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"evolution_type\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n \"simple\",\n \"multi_context\",\n \"reasoning\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metadata\",\n \"properties\": {\n \"dtype\": \"object\",\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"episode_done\",\n \"properties\": {\n \"dtype\": \"boolean\",\n \"num_unique_values\": 1,\n \"samples\": [\n true\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}","type":"dataframe","variable_name":"testset_df"},"text/html":["\n","
\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsground_truthevolution_typemetadataepisode_done
0What actions did the OSTP take to engage with ...[APPENDIX\\nLisa Feldman Barrett \\nMadeline Owe...OSTP engaged with stakeholders regarding the u...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
1What are the potential issues associated with ...[ \\n \\n \\n \\nHUMAN ALTERNATIVES, \\nCONSIDERAT...The potential issues associated with automated...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
2What role does synthetic content detection pla...[ \\n51 \\ngeneral public participants. For exam...Synthetic content detection plays a crucial ro...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
3What role does risk management play in the imp...[ \\n50 \\nParticipatory Engagement Methods \\nOn...The answer to given question is not present in...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
4What concerns arise from companies using surve...[ \\n \\n \\n \\nDATA PRIVACY \\nWHY THIS PRINCIPL...Concerns arise from companies using surveillan...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
.....................
269What drives extra data protections in health a...[ \\n \\n \\n \\nDATA PRIVACY \\nEXTRA PROTECTIONS ...Extra data protections in health and finance a...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
270What insights did OSTP seek from experts in AI...[ \\n \\n \\n \\n \\nSECTION TITLE\\nAPPENDIX\\nListe...OSTP sought insights and analysis on the risks...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
271What key elements ensure clarity in docs about...[ \\nYou should know that an automated system i...Key elements that ensure clarity in documentat...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
272What biases to note for pre-deployment measure...[ \\n38 \\nMEASURE 2.13: Eļ¬€ectiveness of the emp...The context mentions documenting biases or sta...reasoning[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
273Which automated systems affect equal opportuni...[ \\n \\n \\n \\n \\n \\n \\n \\n \\nAPPENDIX\\nExamples...Automated systems that affect equal opportunit...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
\n","

274 rows Ɨ 6 columns

\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"," \n"," \n","
\n","\n","
\n","
\n"],"text/plain":[" question \\\n","0 What actions did the OSTP take to engage with ... \n","1 What are the potential issues associated with ... \n","2 What role does synthetic content detection pla... \n","3 What role does risk management play in the imp... \n","4 What concerns arise from companies using surve... \n",".. ... \n","269 What drives extra data protections in health a... \n","270 What insights did OSTP seek from experts in AI... \n","271 What key elements ensure clarity in docs about... \n","272 What biases to note for pre-deployment measure... \n","273 Which automated systems affect equal opportuni... \n","\n"," contexts \\\n","0 [APPENDIX\\nLisa Feldman Barrett \\nMadeline Owe... \n","1 [ \\n \\n \\n \\nHUMAN ALTERNATIVES, \\nCONSIDERAT... \n","2 [ \\n51 \\ngeneral public participants. For exam... \n","3 [ \\n50 \\nParticipatory Engagement Methods \\nOn... \n","4 [ \\n \\n \\n \\nDATA PRIVACY \\nWHY THIS PRINCIPL... \n",".. ... \n","269 [ \\n \\n \\n \\nDATA PRIVACY \\nEXTRA PROTECTIONS ... \n","270 [ \\n \\n \\n \\n \\nSECTION TITLE\\nAPPENDIX\\nListe... \n","271 [ \\nYou should know that an automated system i... \n","272 [ \\n38 \\nMEASURE 2.13: Eļ¬€ectiveness of the emp... \n","273 [ \\n \\n \\n \\n \\n \\n \\n \\n \\nAPPENDIX\\nExamples... \n","\n"," ground_truth evolution_type \\\n","0 OSTP engaged with stakeholders regarding the u... simple \n","1 The potential issues associated with automated... simple \n","2 Synthetic content detection plays a crucial ro... simple \n","3 The answer to given question is not present in... simple \n","4 Concerns arise from companies using surveillan... simple \n",".. ... ... \n","269 Extra data protections in health and finance a... reasoning \n","270 OSTP sought insights and analysis on the risks... reasoning \n","271 Key elements that ensure clarity in documentat... reasoning \n","272 The context mentions documenting biases or sta... reasoning \n","273 Automated systems that affect equal opportunit... reasoning \n","\n"," metadata episode_done \n","0 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","1 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","2 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","3 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","4 [{'source': 'https://www.whitehouse.gov/wp-con... True \n",".. ... ... \n","269 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","270 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","271 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","272 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","273 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","\n","[274 rows x 6 columns]"]},"execution_count":42,"metadata":{},"output_type":"execute_result"}],"source":["testset_df = testset.to_pandas()\n","testset_df"]},{"cell_type":"code","execution_count":69,"metadata":{"executionInfo":{"elapsed":10,"status":"ok","timestamp":1727049574641,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"St_J9v80Gi6v"},"outputs":[],"source":["testset_df.to_csv('ai-safety-sdg.csv', index=False)"]},{"cell_type":"code","execution_count":43,"metadata":{"executionInfo":{"elapsed":227,"status":"ok","timestamp":1727044962465,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"XsWm4J9f5nyR"},"outputs":[],"source":["test_questions = testset_df[\"question\"].values.tolist()\n","test_groundtruths = testset_df[\"ground_truth\"].values.tolist()"]},{"cell_type":"markdown","metadata":{"id":"XwVBYJgF8KWn"},"source":["# Create Rag chain to generate answers for above questions in the dataset"]},{"cell_type":"markdown","metadata":{"id":"9g99v4Uq8KWn"},"source":["> Note that we are usig Qdrant cloud where the pdf document is processed and saved for us to consume. For the RAG pipeline we use the same embedding model originally used to populate the Qdrant vectorstore."]},{"cell_type":"code","execution_count":32,"metadata":{"executionInfo":{"elapsed":917,"status":"ok","timestamp":1727042496507,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"cFEpa5eo8KWn"},"outputs":[],"source":["from langchain_qdrant import QdrantVectorStore\n","from langchain_core.documents import Document\n","from qdrant_client import QdrantClient\n","from qdrant_client.http.models import Distance, VectorParams\n","\n","dimension = 1024\n","collection_name = \"ai-safety-sr-arctic-embed-l-recursive\"\n","qdrant_server = os.environ[\"QDRANT_URL\"]\n","qdrant_client = QdrantClient(url=qdrant_server,api_key=os.environ[\"QDRANT_API_KEY\"])\n","\n","# qdrant_client.create_collection(\n","# collection_name=collection_name,\n","# vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n","# )\n","\n","vector_store = QdrantVectorStore(\n"," client=qdrant_client,\n"," collection_name=collection_name,\n"," embedding=embedding_model,\n",")\n","\n","retriever = vector_store.as_retriever(search_type=\"similarity_score_threshold\",\n"," search_kwargs={'k':10,'score_threshold': 0.8})"]},{"cell_type":"code","execution_count":29,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":575,"status":"ok","timestamp":1727040138184,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"jbI38AHQa8ar","outputId":"11dcdc05-a6f7-4d25-bd31-66f48ca188db"},"outputs":[{"data":{"text/plain":["[Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': 'b6779e22-20c4-44d3-8741-c06cc2bb380c', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Human-AI Conļ¬guration \\n'),\n"," Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '26f3917c-c227-4e99-8e11-5212273fe2ba', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Human-AI Conļ¬guration \\n'),\n"," Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 11, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '07a8b461-51e7-4641-b97a-d823ef91082f', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content=' \\n \\n \\nFROM \\nPRINCIPLES \\nTO PRACTICE \\nA TECHINCAL COMPANION TO\\nTHE Blueprint for an \\nAI BILL OF RIGHTS\\n12\\n'),\n"," Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '1d43c4ca-f83b-4708-97e9-6410f4dae5ee', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Human-AI Conļ¬guration \\nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \\n \\n'),\n"," Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 61, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '73cf1599-b76d-4061-874e-660228ca5f06', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='et al. (2023) Whose Opinions Do Language Models Reļ¬‚ect? arXiv.'),\n"," Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '00ca0c39-98b6-4339-874a-d036983a0922', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Make sure these tests cover various scenarios, such as crisis \\nsituations or ethically sensitive contexts. Human-AI Conļ¬guration; \\nInformation Integrity; Harmful Bias \\nand Homogenization; Dangerous, \\nViolent, or Hateful Content \\nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \\n \\n'),\n"," Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 59, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '81516460-f657-40e9-aef0-f4babc29b2f1', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='https://www.rand.org/pubs/research_reports/RRA2977-2.html. Nicoletti, L. et al. (2023) Humans Are Biased. Generative Ai Is Even Worse. Bloomberg. https://www.bloomberg.com/graphics/2023-generative-ai-bias/. National Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \\nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework. https://www.nist.gov/itl/ai-risk-management-framework \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \\nRisks and Trustworthiness. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \\nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \\nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \\nDescriptions of AI Actor Tasks. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product \\n'),\n"," Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 57, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': \"D:20240805141702-04'00'\", 'modDate': \"D:20240805143048-04'00'\", 'trapped': '', '_id': '1b34b9f6-1f16-4993-b738-7a73e961bf2b', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='(2020) Overcoming Failures of Imagination in AI Infused System Development and \\nDeployment. arXiv.'),\n"," Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 0, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '8dd5b1e7-fd46-4e2a-90c2-8a8eea8b0cb9', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content=' \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nBLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKING AUTOMATED \\nSYSTEMS WORK FOR \\nTHE AMERICAN PEOPLE \\nOCTOBER 2022 \\n'),\n"," Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \"D:20220920133035-04'00'\", 'modDate': \"D:20221003104118-04'00'\", 'trapped': '', '_id': '8c64aecd-850b-48b3-bac2-16e73ebad1e0', '_collection_name': 'ai-safety-sr-arctic-embed-l-semantic'}, page_content='Some companies have instituted bias testing as part of their product \\nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \\nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \\nfor the use of automated systems in order to help prevent bias.')]"]},"execution_count":29,"metadata":{},"output_type":"execute_result"}],"source":["retriever.invoke(\"What steps can organizations take to minimize bias in AI models?\")"]},{"cell_type":"code","execution_count":53,"metadata":{"executionInfo":{"elapsed":246,"status":"ok","timestamp":1727045858752,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"R4_jjdEjwT-O"},"outputs":[],"source":["from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n","from langchain.retrievers.document_compressors import LLMChainExtractor\n","from langchain_openai import ChatOpenAI\n","\n","async def get_contextual_compressed_retriever(retriver):\n","\n"," base_retriever = retriver\n"," compressor_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\", max_tokens=1500)\n"," compressor = LLMChainExtractor.from_llm(compressor_llm)\n","\n"," #Combine the retriever with the compressor\n"," compression_retriever = ContextualCompressionRetriever(\n"," base_compressor=compressor,\n"," base_retriever=base_retriever\n"," )\n"," return compression_retriever"]},{"cell_type":"code","execution_count":54,"metadata":{"executionInfo":{"elapsed":251,"status":"ok","timestamp":1727045861538,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"rtCArydrwhFq"},"outputs":[],"source":["contextual_compressed_retriever = await get_contextual_compressed_retriever(retriever)"]},{"cell_type":"code","execution_count":20,"metadata":{"executionInfo":{"elapsed":275,"status":"ok","timestamp":1727037197004,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"ugjsVcab8KWo"},"outputs":[],"source":["from langchain.prompts import ChatPromptTemplate\n","\n","RAG_PROMPT = \"\"\"\\\n","Given a provided context and question, you must answer the question based only on context.\n","\n","If you cannot answer the question based on the context - you must say \"I don't know\".\n","\n","Context: {context}\n","Question: {question}\n","\"\"\"\n","\n","rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)"]},{"cell_type":"code","execution_count":21,"metadata":{"executionInfo":{"elapsed":235,"status":"ok","timestamp":1727037200213,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"a3MuU4ZX8KWo"},"outputs":[],"source":["from langchain_openai import ChatOpenAI\n","\n","# Using the same model used in the app.\n","chat_model_name = \"gpt-4o\"\n","llm = ChatOpenAI(model=chat_model_name,temperature=0)"]},{"cell_type":"code","execution_count":55,"metadata":{"executionInfo":{"elapsed":239,"status":"ok","timestamp":1727045866301,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"-yGYL2fQ8KWo"},"outputs":[],"source":["from operator import itemgetter\n","from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n","from langchain.schema import StrOutputParser\n","\n","ai_safety_rag_chain = (\n"," {\"context\": itemgetter(\"question\") | contextual_compressed_retriever, \"question\": itemgetter(\"question\")}\n"," | rag_prompt | llm | StrOutputParser()\n",")"]},{"cell_type":"code","execution_count":56,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":105},"executionInfo":{"elapsed":8298,"status":"ok","timestamp":1727045877567,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"12Sa5g598KWo","outputId":"916240c3-da0c-4bc6-f20b-1384a162cf52"},"outputs":[{"data":{"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"},"text/plain":["'Companies can ensure AI does not violate data privacy laws by implementing the following measures:\\n\\n1. **Built-in Protections**: Incorporate built-in protections to guard against abusive data practices.\\n2. **User Agency**: Provide users with control over how their data is used.\\n3. **Privacy by Design**: Make design choices that protect user privacy by default.\\n4. **Reasonable Data Collection**: Ensure that data collection conforms to reasonable expectations and only collect data strictly necessary for the specific context.\\n5. **User Permission and Respect**: Seek user permission and respect their decisions regarding the collection, use, access, transfer, and deletion of their data.\\n6. **Alternative Safeguards**: Use alternative privacy by design safeguards where user permission is not possible.\\n7. **Avoid Obfuscation**: Avoid user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults.\\n\\nThese measures ensure that the systems are aligned with data privacy laws and protect user privacy effectively.'"]},"execution_count":56,"metadata":{},"output_type":"execute_result"}],"source":["ai_safety_rag_chain.invoke({\"question\" : \"How can companies ensure AI does not violate data privacy laws?\"})"]},{"cell_type":"code","execution_count":37,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":35},"executionInfo":{"elapsed":6680,"status":"ok","timestamp":1727042613356,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"p85CK56HcQaW","outputId":"25e27ff0-abae-43a0-f44d-acdec7c05a0d"},"outputs":[{"data":{"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"},"text/plain":["\"I don't know.\""]},"execution_count":37,"metadata":{},"output_type":"execute_result"}],"source":["ai_safety_rag_chain.invoke({\"question\" :\"What are the implications of using GAI systems for organizations in terms of risk management and compliance?\"})"]},{"cell_type":"code","execution_count":66,"metadata":{"executionInfo":{"elapsed":354,"status":"ok","timestamp":1727047034136,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"Zd-lNRE9-Sex"},"outputs":[],"source":["# Rag chain used in the app\n","from langchain.chains.combine_documents import create_stuff_documents_chain\n","from langchain.prompts import MessagesPlaceholder\n","from langchain.prompts import ChatPromptTemplate\n","from langchain.chains.history_aware_retriever import create_history_aware_retriever\n","from langchain.chains.retrieval import create_retrieval_chain\n","from langchain_core.runnables.history import RunnableWithMessageHistory\n","from langchain_core.chat_history import BaseChatMessageHistory\n","from langchain_community.chat_message_histories import ChatMessageHistory\n","\n","def create_history_aware_retriever_self(chat_model, retriever):\n"," contextualize_q_system_prompt = (\n"," \"Given a chat history and the latest user question which might reference context in the chat history, \"\n"," \"formulate a standalone question which can be understood without the chat history. Do NOT answer the question, \"\n"," \"just reformulate it if needed and otherwise return it as is.\"\n"," )\n"," contextualize_q_prompt = ChatPromptTemplate.from_messages(\n"," [\n"," (\"system\", contextualize_q_system_prompt),\n"," MessagesPlaceholder(\"chat_history\"),\n"," (\"human\", \"{input}\"),\n"," ]\n"," )\n"," return create_history_aware_retriever(chat_model, retriever, contextualize_q_prompt)\n","\n","def create_qa_chain(chat_model):\n"," qa_system_prompt = (\n"," \"You are an helpful assistant named 'Shield' and your task is to answer any questions related to AI Safety for the given context.\"\n"," \"Use the following pieces of retrieved context to answer the question.\"\n"," # \"If any questions asked outside AI Safety context, just say that you are a specialist in AI Safety and can't answer that.\"\n"," # f\"When introducing you, just say that you are an AI assistant powered by embedding model {embedding_model_name} and chat model {chat_model_name} and your knowledge is limited to 'Blueprint for an AI Bill of Rights' and 'NIST AI Standards' documents.\"\n"," \"If you don't know the answer, just say that you don't know.\\n\\n\"\n"," \"{context}\"\n"," )\n"," qa_prompt = ChatPromptTemplate.from_messages(\n"," [\n"," (\"system\", qa_system_prompt),\n"," MessagesPlaceholder(\"chat_history\"),\n"," (\"human\", \"{input}\"),\n"," ]\n"," )\n"," return create_stuff_documents_chain(chat_model, qa_prompt)\n","\n","def create_rag_chain(chat_model, retriever):\n"," history_aware_retriever = create_history_aware_retriever_self(chat_model, retriever)\n"," question_answer_chain = create_qa_chain(chat_model)\n"," return create_retrieval_chain(history_aware_retriever, question_answer_chain)\n","\n","def get_session_history(session_id: str) -> BaseChatMessageHistory:\n"," if session_id not in store:\n"," store[session_id] = ChatMessageHistory()\n"," return store[session_id]\n","\n","history_ai_safety_rag_chain = create_rag_chain(llm, contextual_compressed_retriever)\n","\n","conversational_rag_chain = RunnableWithMessageHistory(\n"," history_ai_safety_rag_chain,\n"," get_session_history,\n"," input_messages_key=\"input\",\n"," history_messages_key=\"chat_history\",\n"," output_messages_key=\"answer\",\n"," )"]},{"cell_type":"markdown","metadata":{"id":"xFc9_Npg8KWo"},"source":["# Ragas Evaluation"]},{"cell_type":"code","execution_count":68,"metadata":{"executionInfo":{"elapsed":2486324,"status":"ok","timestamp":1727049574640,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"CtCmZ3K950sr"},"outputs":[],"source":["import time\n","import uuid\n","\n","answers = []\n","contexts = []\n","\n","for question in test_questions:\n"," store = {}\n"," session_id = str(uuid.uuid4())\n","\n"," response = conversational_rag_chain.invoke({\"input\" : question}, config={\"configurable\": {\"session_id\": session_id}})\n"," # time.sleep(1)\n"," answers.append(response[\"answer\"])\n"," contexts.append([context.page_content for context in response[\"context\"]])"]},{"cell_type":"code","execution_count":70,"metadata":{"executionInfo":{"elapsed":243,"status":"ok","timestamp":1727049621003,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"aB6BhqHG57mj"},"outputs":[],"source":["from datasets import Dataset\n","\n","response_dataset = Dataset.from_dict({\n"," \"question\" : test_questions,\n"," \"answer\" : answers,\n"," \"contexts\" : contexts,\n"," \"ground_truth\" : test_groundtruths\n","})"]},{"cell_type":"code","execution_count":71,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":249,"status":"ok","timestamp":1727049626623,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"mfCULw8U5-ej","outputId":"21f14944-325e-44b8-901b-0a0bf6644c16"},"outputs":[{"data":{"text/plain":["{'question': 'What actions did the OSTP take to engage with stakeholders regarding the use of artificial intelligence and biometric technologies?',\n"," 'answer': 'The Office of Science and Technology Policy (OSTP) took the following actions to engage with stakeholders regarding the use of artificial intelligence and biometric technologies:\\n\\n1. **Soliciting Public Comments**: OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and other data-driven technologies in their lives.\\n\\n2. **Request for Information (RFI)**: OSTP issued a Request for Information (RFI) on the use and governance of biometric technologies. The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation.\\n\\nThe 130 responses to this RFI are available online and were submitted by a diverse range of organizations and individuals, including Accenture, Access Now, ACT | The App Association, AHIP, AIethicist.org, Airlines for America, Alliance for Automotive Innovation, Amelia Winger-Bearskin, and the American Civil Liberties Union, among others.',\n"," 'contexts': ['ā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and other data-driven technologies in their lives.\\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below listed organizations and individuals:\\nAccenture \\nAccess Now \\nACT | The App Association \\nAHIP \\nAIethicist.org \\nAirlines for America \\nAlliance for Automotive Innovation \\nAmelia Winger-Bearskin \\nAmerican Civil Liberties Union'],\n"," 'ground_truth': 'OSTP engaged with stakeholders regarding the use of artificial intelligence and biometric technologies by conducting two listening sessions for members of the public, which drew upwards of 300 participants. Additionally, OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and issued a Request For Information (RFI) on the use and governance of biometric technologies to understand their extent, variety, and the stakeholders impacted by their use or regulation.'}"]},"execution_count":71,"metadata":{},"output_type":"execute_result"}],"source":["response_dataset[0]"]},{"cell_type":"code","execution_count":72,"metadata":{"executionInfo":{"elapsed":2,"status":"ok","timestamp":1727049660277,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"hkJIiVw08KWo"},"outputs":[],"source":["from ragas import evaluate\n","from ragas.metrics import (\n"," faithfulness,\n"," answer_relevancy,\n"," answer_correctness,\n"," context_recall,\n"," context_precision,\n",")\n","\n","metrics = [\n"," faithfulness,\n"," answer_relevancy,\n"," context_recall,\n"," context_precision,\n"," answer_correctness,\n","]"]},{"cell_type":"code","execution_count":73,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":49,"referenced_widgets":["de1d0a7c196b43f2896f7328b51d0722","f3408e40282f45a7bae1d28c9089155c","2b9653890ab449ddb8463d2c896ef93c","2404dc4e606f4829af0a2196e2028092","89a1bb37e141423395bacf6e997e9bad","4cb6117209d540faa71410c223299f41","0ecafc8c711d4532b6eff44e9aa9c9aa","975aa2789cee4d2f857c1a22306f3e35","b196574881d242fb9cd9cc1dbb91787e","de6399b9118541bb97029ca88be886f4","e0dfd110049c4c7ab6e56fb6b485ed5c"]},"executionInfo":{"elapsed":583665,"status":"ok","timestamp":1727050255211,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"SsWmvE1h8KWo","outputId":"16301a9c-d73f-4ed3-c6c1-5a9760a22492"},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"de1d0a7c196b43f2896f7328b51d0722","version_major":2,"version_minor":0},"text/plain":["Evaluating: 0%| | 0/1370 [00:00\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsanswerground_truthfaithfulnessanswer_relevancycontext_recallcontext_precisionanswer_correctness
0What actions did the OSTP take to engage with ...[ā€¢ OSTP created an email address (ai-equity@os...The Office of Science and Technology Policy (O...OSTP engaged with stakeholders regarding the u...1.0000000.9714210.6666671.00.832265
1What are the potential issues associated with ...[]Automated performance evaluation systems in th...The potential issues associated with automated...1.0000000.9884790.0000000.00.289107
2What role does synthetic content detection pla...[]Synthetic content detection plays a crucial ro...Synthetic content detection plays a crucial ro...0.9444441.0000000.0000000.00.308811
3What role does risk management play in the imp...[risk identification and management assessment...Risk management plays a crucial role in the im...The answer to given question is not present in...0.2333331.0000000.0000000.00.920685
4What concerns arise from companies using surve...[Companies use surveillance software to track ...The use of surveillance software by companies ...Concerns arise from companies using surveillan...0.5652170.9898121.0000001.00.612930
..............................
269What drives extra data protections in health a...[]Extra data protections in health and finance a...Extra data protections in health and finance a...0.9354840.9824600.0000000.00.759702
270What insights did OSTP seek from experts in AI...[]The Office of Science and Technology Policy (O...OSTP sought insights and analysis on the risks...0.0000000.9106770.0000000.00.688606
271What key elements ensure clarity in docs about...[Designers, developers, and deployers of autom...To ensure clarity in documentation about an au...Key elements that ensure clarity in documentat...0.9629630.9499621.0000001.00.510159
272What biases to note for pre-deployment measure...[]In the context of pre-deployment measurement e...The context mentions documenting biases or sta...0.0000000.9514810.0000000.00.884850
273Which automated systems affect equal opportuni...[Education-related systems such as algorithms ...Automated systems that can affect equal opport...Automated systems that affect equal opportunit...0.4761900.9832240.2500001.00.885252
\n","

274 rows Ɨ 9 columns

\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"," \n"," \n","
\n","\n","
\n"," \n"],"text/plain":[" question \\\n","0 What actions did the OSTP take to engage with ... \n","1 What are the potential issues associated with ... \n","2 What role does synthetic content detection pla... \n","3 What role does risk management play in the imp... \n","4 What concerns arise from companies using surve... \n",".. ... \n","269 What drives extra data protections in health a... \n","270 What insights did OSTP seek from experts in AI... \n","271 What key elements ensure clarity in docs about... \n","272 What biases to note for pre-deployment measure... \n","273 Which automated systems affect equal opportuni... \n","\n"," contexts \\\n","0 [ā€¢ OSTP created an email address (ai-equity@os... \n","1 [] \n","2 [] \n","3 [risk identification and management assessment... \n","4 [Companies use surveillance software to track ... \n",".. ... \n","269 [] \n","270 [] \n","271 [Designers, developers, and deployers of autom... \n","272 [] \n","273 [Education-related systems such as algorithms ... \n","\n"," answer \\\n","0 The Office of Science and Technology Policy (O... \n","1 Automated performance evaluation systems in th... \n","2 Synthetic content detection plays a crucial ro... \n","3 Risk management plays a crucial role in the im... \n","4 The use of surveillance software by companies ... \n",".. ... \n","269 Extra data protections in health and finance a... \n","270 The Office of Science and Technology Policy (O... \n","271 To ensure clarity in documentation about an au... \n","272 In the context of pre-deployment measurement e... \n","273 Automated systems that can affect equal opport... \n","\n"," ground_truth faithfulness \\\n","0 OSTP engaged with stakeholders regarding the u... 1.000000 \n","1 The potential issues associated with automated... 1.000000 \n","2 Synthetic content detection plays a crucial ro... 0.944444 \n","3 The answer to given question is not present in... 0.233333 \n","4 Concerns arise from companies using surveillan... 0.565217 \n",".. ... ... \n","269 Extra data protections in health and finance a... 0.935484 \n","270 OSTP sought insights and analysis on the risks... 0.000000 \n","271 Key elements that ensure clarity in documentat... 0.962963 \n","272 The context mentions documenting biases or sta... 0.000000 \n","273 Automated systems that affect equal opportunit... 0.476190 \n","\n"," answer_relevancy context_recall context_precision answer_correctness \n","0 0.971421 0.666667 1.0 0.832265 \n","1 0.988479 0.000000 0.0 0.289107 \n","2 1.000000 0.000000 0.0 0.308811 \n","3 1.000000 0.000000 0.0 0.920685 \n","4 0.989812 1.000000 1.0 0.612930 \n",".. ... ... ... ... \n","269 0.982460 0.000000 0.0 0.759702 \n","270 0.910677 0.000000 0.0 0.688606 \n","271 0.949962 1.000000 1.0 0.510159 \n","272 0.951481 0.000000 0.0 0.884850 \n","273 0.983224 0.250000 1.0 0.885252 \n","\n","[274 rows x 9 columns]"]},"execution_count":74,"metadata":{},"output_type":"execute_result"},{"name":"stderr","output_type":"stream","text":["WARNING:root:Quickchart encountered unexpected dtypes in columns: \"(['contexts'],)\"\n"]},{"data":{"text/html":["

Distributions

\n",""],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_0['index'].plot(kind='hist', bins=20, title='index')\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_1['faithfulness'].plot(kind='hist', bins=20, title='faithfulness')\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_2['answer_relevancy'].plot(kind='hist', bins=20, title='answer_relevancy')\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_3['context_recall'].plot(kind='hist', bins=20, title='context_recall')\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["

2-d distributions

\n",""],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_4.plot(kind='scatter', x='index', y='faithfulness', s=32, alpha=.8)\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_5.plot(kind='scatter', x='faithfulness', y='answer_relevancy', s=32, alpha=.8)\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_6.plot(kind='scatter', x='answer_relevancy', y='context_recall', s=32, alpha=.8)\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_7.plot(kind='scatter', x='context_recall', y='context_precision', s=32, alpha=.8)\n","plt.gca().spines[['top', 'right',]].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["

Time series

\n",""],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","import seaborn as sns\n","def _plot_series(series, series_name, series_index=0):\n"," palette = list(sns.palettes.mpl_palette('Dark2'))\n"," xs = series['index']\n"," ys = series['faithfulness']\n"," \n"," plt.plot(xs, ys, label=series_name, color=palette[series_index % len(palette)])\n","\n","fig, ax = plt.subplots(figsize=(10, 5.2), layout='constrained')\n","df_sorted = _df_8.sort_values('index', ascending=True)\n","_plot_series(df_sorted, '')\n","sns.despine(fig=fig, ax=ax)\n","plt.xlabel('index')\n","_ = plt.ylabel('faithfulness')"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","import seaborn as sns\n","def _plot_series(series, series_name, series_index=0):\n"," palette = list(sns.palettes.mpl_palette('Dark2'))\n"," xs = series['index']\n"," ys = series['answer_relevancy']\n"," \n"," plt.plot(xs, ys, label=series_name, color=palette[series_index % len(palette)])\n","\n","fig, ax = plt.subplots(figsize=(10, 5.2), layout='constrained')\n","df_sorted = _df_9.sort_values('index', ascending=True)\n","_plot_series(df_sorted, '')\n","sns.despine(fig=fig, ax=ax)\n","plt.xlabel('index')\n","_ = plt.ylabel('answer_relevancy')"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","import seaborn as sns\n","def _plot_series(series, series_name, series_index=0):\n"," palette = list(sns.palettes.mpl_palette('Dark2'))\n"," xs = series['index']\n"," ys = series['context_recall']\n"," \n"," plt.plot(xs, ys, label=series_name, color=palette[series_index % len(palette)])\n","\n","fig, ax = plt.subplots(figsize=(10, 5.2), layout='constrained')\n","df_sorted = _df_10.sort_values('index', ascending=True)\n","_plot_series(df_sorted, '')\n","sns.despine(fig=fig, ax=ax)\n","plt.xlabel('index')\n","_ = plt.ylabel('context_recall')"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","import seaborn as sns\n","def _plot_series(series, series_name, series_index=0):\n"," palette = list(sns.palettes.mpl_palette('Dark2'))\n"," xs = series['index']\n"," ys = series['context_precision']\n"," \n"," plt.plot(xs, ys, label=series_name, color=palette[series_index % len(palette)])\n","\n","fig, ax = plt.subplots(figsize=(10, 5.2), layout='constrained')\n","df_sorted = _df_11.sort_values('index', ascending=True)\n","_plot_series(df_sorted, '')\n","sns.despine(fig=fig, ax=ax)\n","plt.xlabel('index')\n","_ = plt.ylabel('context_precision')"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["

Values

\n",""],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_12['index'].plot(kind='line', figsize=(8, 4), title='index')\n","plt.gca().spines[['top', 'right']].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_13['faithfulness'].plot(kind='line', figsize=(8, 4), title='faithfulness')\n","plt.gca().spines[['top', 'right']].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_14['answer_relevancy'].plot(kind='line', figsize=(8, 4), title='answer_relevancy')\n","plt.gca().spines[['top', 'right']].set_visible(False)"]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["
\n"," \n","
\n"," \n"," "],"text/plain":["from matplotlib import pyplot as plt\n","_df_15['context_recall'].plot(kind='line', figsize=(8, 4), title='context_recall')\n","plt.gca().spines[['top', 'right']].set_visible(False)"]},"metadata":{},"output_type":"display_data"}],"source":["results_df = results.to_pandas()\n","results_df"]},{"cell_type":"code","execution_count":75,"metadata":{"executionInfo":{"elapsed":333,"status":"ok","timestamp":1727050524966,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"-eFf7n13Osf7"},"outputs":[],"source":["results_df.to_csv('ai-safety-ragas-evaluation-result.csv', index=False)"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"},"widgets":{"application/vnd.jupyter.widget-state+json":{"02b8a307cf454eb2a20b274b7116225f":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"03671faa6b0a44bd8b3e637138c896eb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ac18871e773c416795edfad4e34bd739","IPY_MODEL_1774f38ee20c401c9fee5f5c719c55fa","IPY_MODEL_ff75bde6e9e34ddda3ab14358e32c9d5"],"layout":"IPY_MODEL_376b21af9a924f9fac4d4a76adb2c279"}},"03fbf3aba7874ce5bc30bf447ba6f015":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"05f5187bbe264efd994ca80561f3f4e1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e73a9f990c1747aaaeef8b429d09eee1","placeholder":"ā€‹","style":"IPY_MODEL_ac85f34239514dac9a1f0d4930df51cf","value":"model.safetensors:ā€‡100%"}},"068a2d12c56247a9b3eb7c7dc28f77ff":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"08f9e2ef423343d0ba69769ba75ecccd":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b5b8501d1c64430997aade25c414422":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_fe62df73225f4b39a40780daf353f669","max":695,"min":0,"orientation":"horizontal","style":"IPY_MODEL_e79cc22d087b4f07b727f4b8b7dd81a6","value":695}},"0bc3ed75fd084091b307f878b1d1d959":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0c8d2cc782dc405fa280f281f7c74df6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0d6056d406f9496b92228ebc3e5a6fcf":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ceb1fe3552024c309dad1392ced3b721","IPY_MODEL_8d5bfdc58a2a4b14baae6e73e3a703b6","IPY_MODEL_e256341c3df04d0a87b3865fdce29d22"],"layout":"IPY_MODEL_ab176aa8c4884f069ae0e19e65760070"}},"0ecafc8c711d4532b6eff44e9aa9c9aa":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0ef794ecfa144a84b34592353ad7a6ea":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_79fdfe0e7a9e4e38855567b3b204686a","placeholder":"ā€‹","style":"IPY_MODEL_c8dab5e9f6a94fddb82d81135b433cef","value":"sentence_bert_config.json:ā€‡100%"}},"1415f7cb259f461a8554c9f68680633f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"1698e05b2ca34fc3ab29e8958487e41a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":"hidden","width":null}},"16caf75cbaf84be08ab28e4af7c80460":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"1774f38ee20c401c9fee5f5c719c55fa":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e69e65df44a644b1a2c61a62547f218e","max":704,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d227481530984a52bb58265ffbf3095a","value":704}},"178c185d1fb74f2aaac9f9d09612ba18":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"19a009a472e34399a4857a80bb34da71":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"1ac8e96e17174598806649d207a7c83a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"","description":"","description_tooltip":null,"layout":"IPY_MODEL_b33f5a46b833403f8ff7c46f52885c5a","max":284,"min":0,"orientation":"horizontal","style":"IPY_MODEL_068a2d12c56247a9b3eb7c7dc28f77ff","value":284}},"1e85b700cd66441897a35f9018efd73c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"1f259f3cd5b544efbca85fc58b1a46b1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_08f9e2ef423343d0ba69769ba75ecccd","placeholder":"ā€‹","style":"IPY_MODEL_ec34c675cd944c92bd81071fbc1abdd6","value":"ā€‡297/297ā€‡[00:00<00:00,ā€‡12.1kB/s]"}},"2165a51b7d0a4440900a3fdb4152b1ff":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"221f65af387442f0bf76fe785345ec38":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2404dc4e606f4829af0a2196e2028092":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_de6399b9118541bb97029ca88be886f4","placeholder":"ā€‹","style":"IPY_MODEL_e0dfd110049c4c7ab6e56fb6b485ed5c","value":"ā€‡1370/1370ā€‡[09:42<00:00,ā€‡ā€‡2.43s/it]"}},"26e4773ba74a4fb1a079616d1dfe3475":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"280ffc9a3dc84cfeabcf938df8ba418d":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"28c229dc3f944e6c906fdb419745de82":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"290fd0c27c6945f18e2b1a8dbbd8338b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2b9653890ab449ddb8463d2c896ef93c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_975aa2789cee4d2f857c1a22306f3e35","max":1370,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b196574881d242fb9cd9cc1dbb91787e","value":1370}},"33aef8c6f108473fa69075c96fb05afa":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"376b21af9a924f9fac4d4a76adb2c279":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"38b4e993c113467d8adae56f03009831":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"3da23de9f9b54994b79b6155565185e6":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3e9c125122bf4a4697c4c66d0081db02":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3f046ecb4256477da5a045e0ea65fd71":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3fe327e21036451abe9b8b28cecf0841":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_caa69772cb2b4d3cbd1ec60e7899e823","placeholder":"ā€‹","style":"IPY_MODEL_d6b8024119f046c080d3956104b9107b","value":"ā€‡274/274ā€‡[03:10<00:00,ā€‡ā€‡1.11s/it]"}},"43df5336d3db4cf8879d4dcdb40b658c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_bef06a1d1d454345a1a5a9a1f5aeafab","IPY_MODEL_829848e303ed4608ac7a229a4fa8f1de","IPY_MODEL_3fe327e21036451abe9b8b28cecf0841"],"layout":"IPY_MODEL_851e7747d1844e97a1b45117224ca722"}},"453cbd863055429cbe6bacba0af0ed0f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ee5b9d6e096b46ae88e0537c3a6e8b24","max":107,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d9d573c7dd5b491dac2fc3643b452200","value":107}},"46db05876ac44068a90d9ee879686fc0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"47079c20689f41beb821576cb7382df6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4747b53781fb4d37adea4c21b68cee33":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"47baaee191a24711a678c45960ba3d7b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_d3f2eca4202040f1b02458f135db7feb","max":711649,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b62d4c81354c478487b09905b9f4e73f","value":711649}},"49bebe2cfede420e8f18ff548cd99a53":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"49fa14ed71b24d85b0dc2cf9d12e36d8":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_dc4ba5a620274f73afa7027726232693","IPY_MODEL_fa9ba492445740f2a7388d05a8a0a995","IPY_MODEL_e112800f289e4e3a8be21ba1f7db68b5"],"layout":"IPY_MODEL_4747b53781fb4d37adea4c21b68cee33"}},"4b1fa36e00af4a63b4eaca5f84b61472":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4b817c1894314519a6d33a543e3ece72":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7f7b3318b9a8470bb06db047193dbae1","placeholder":"ā€‹","style":"IPY_MODEL_8279fd35681c425e8377a135933df2b7","value":"special_tokens_map.json:ā€‡100%"}},"4bd26e6ae79f4f3d8e9cf2f42ad3ac26":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4cb6117209d540faa71410c223299f41":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d046a1ecbef4a20a8d662a57de340e7":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d66c93a9b3a49659b24dc3236ac61fb":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"502dbd5a88af4028b17cc7ee59995879":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"549e74620f154e2d81e5242b4b09d5d4":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"54a35e95d97c441d88cf79665735ecd7":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_05f5187bbe264efd994ca80561f3f4e1","IPY_MODEL_cf975ac612d94ae0824774b5e667512d","IPY_MODEL_c4970fd5d1f24a32a78e9135774d8838"],"layout":"IPY_MODEL_0bc3ed75fd084091b307f878b1d1d959"}},"555b75f0e8e74ecab149d30af40222fd":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_abb5eb80075d47bd924df7f8aac33f97","IPY_MODEL_1ac8e96e17174598806649d207a7c83a","IPY_MODEL_b524d79fbdca441d8cc51f90a5ba9413"],"layout":"IPY_MODEL_1698e05b2ca34fc3ab29e8958487e41a"}},"56d1f705145e4278b5c5666136464c2f":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"57e5dc406f7e4b289bab86df40762019":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"58353a440c6a4adc8c0fc4af31973076":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_80f3b196e67747918589c7c0942e77c4","max":84541,"min":0,"orientation":"horizontal","style":"IPY_MODEL_38b4e993c113467d8adae56f03009831","value":84541}},"5d83067e2d36472d9a7fbb6dbe54ec9a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6301ae6e47e14ee3a965e672c7ae0e61":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_eddb3decc41c45dcb94b229d4f2b43a9","IPY_MODEL_73f2f74333fa4334aa69bebf4a14c4de","IPY_MODEL_f73c1d3c8c784131ae97d78185091ea2"],"layout":"IPY_MODEL_a6c08a77b7cf4b57bccf7ae6840b1824"}},"651a63696f3546d19fa7fc457e90acd0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a9e5c2b666cd4b1bb1f8e2d3f724b909","IPY_MODEL_b3e98c6b6bd54b018709f71e00abe2cf","IPY_MODEL_1f259f3cd5b544efbca85fc58b1a46b1"],"layout":"IPY_MODEL_7860820e7de9416b8549de4710af99fc"}},"69529419f3e74cbe957ebcabdabd6245":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"699b9a47c86a4c839f29f35325f5976a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6b4568d54d3d44d6b969f9d7edb4a3e6":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6bac147a849b4c86bab8e2d0598529d1":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6bcb3569f5144b4fb0fcaa55671b167e":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_78cd857069834e34b84c73e0147b5e9f","placeholder":"ā€‹","style":"IPY_MODEL_290fd0c27c6945f18e2b1a8dbbd8338b","value":"README.md:ā€‡100%"}},"6dcb21c8ad064ef2a0ba4f8c308bb955":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b477f5488ddc4bf991a9476c094454d9","placeholder":"ā€‹","style":"IPY_MODEL_f69fc7fda80d4bec984617f3c804a723","value":"ā€‡107/107ā€‡[00:00<00:00,ā€‡7.87kB/s]"}},"72df565437094492a84512d4a4be07f8":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"73f2f74333fa4334aa69bebf4a14c4de":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_56d1f705145e4278b5c5666136464c2f","max":252,"min":0,"orientation":"horizontal","style":"IPY_MODEL_1415f7cb259f461a8554c9f68680633f","value":252}},"7860820e7de9416b8549de4710af99fc":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"786341e37e1340f4ac793a8b81803129":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_03fbf3aba7874ce5bc30bf447ba6f015","placeholder":"ā€‹","style":"IPY_MODEL_19a009a472e34399a4857a80bb34da71","value":"ā€‡695/695ā€‡[00:00<00:00,ā€‡45.6kB/s]"}},"78cd857069834e34b84c73e0147b5e9f":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"79fdfe0e7a9e4e38855567b3b204686a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7a24922aee63498a804d0979c8043499":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7c010e9e49d54c8a9039728f738bce85":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7dc5e6226139451c987d61a102743bbe":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7f0fda7f909043b68762830fc945d649":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7f7b3318b9a8470bb06db047193dbae1":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"80f3b196e67747918589c7c0942e77c4":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"812ef2fab08d41e9bb0b0a6607f06491":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_946552ad2b50452789b313730bc86022","IPY_MODEL_47baaee191a24711a678c45960ba3d7b","IPY_MODEL_ba11b7c4f3dd4c7c96300718c3ab2d9a"],"layout":"IPY_MODEL_7dc5e6226139451c987d61a102743bbe"}},"8142cd3e211940a49391a3bb8414aae1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"8279fd35681c425e8377a135933df2b7":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"829848e303ed4608ac7a229a4fa8f1de":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_7c010e9e49d54c8a9039728f738bce85","max":274,"min":0,"orientation":"horizontal","style":"IPY_MODEL_28c229dc3f944e6c906fdb419745de82","value":274}},"851e7747d1844e97a1b45117224ca722":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"89a1bb37e141423395bacf6e997e9bad":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8c3bafba10694ca28921512ae856ae0e":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8d5bfdc58a2a4b14baae6e73e3a703b6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ef1e019ced5a42aeaed7dc903158b2fa","max":231508,"min":0,"orientation":"horizontal","style":"IPY_MODEL_1e85b700cd66441897a35f9018efd73c","value":231508}},"9398b54a42924fbfa50dc9a2935831d8":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a664c729149648f79971ad62b4bfab3a","placeholder":"ā€‹","style":"IPY_MODEL_0c8d2cc782dc405fa280f281f7c74df6","value":"tokenizer_config.json:ā€‡100%"}},"946552ad2b50452789b313730bc86022":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6bac147a849b4c86bab8e2d0598529d1","placeholder":"ā€‹","style":"IPY_MODEL_7a24922aee63498a804d0979c8043499","value":"tokenizer.json:ā€‡100%"}},"954a7f2aea624788ad86b83448033998":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f71f7bfb17cf48708222cd9b196c252e","placeholder":"ā€‹","style":"IPY_MODEL_549e74620f154e2d81e5242b4b09d5d4","value":"ā€‡1.38k/1.38kā€‡[00:00<00:00,ā€‡103kB/s]"}},"96b1e666843b4e45a310ec18bf750e65":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"975aa2789cee4d2f857c1a22306f3e35":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9ad3152de7294fd8aed49ccc1d33b60c":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9ef5264e4c8344b19f8c63d13710b16a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a1825027f0e94ae180c1cb7f93b73b7a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a664c729149648f79971ad62b4bfab3a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a6c08a77b7cf4b57bccf7ae6840b1824":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a9e5c2b666cd4b1bb1f8e2d3f724b909":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_33aef8c6f108473fa69075c96fb05afa","placeholder":"ā€‹","style":"IPY_MODEL_f7a51fbbe5ab4687b391e70c4412ed5d","value":"1_Pooling/config.json:ā€‡100%"}},"ab176aa8c4884f069ae0e19e65760070":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"abb5eb80075d47bd924df7f8aac33f97":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4d046a1ecbef4a20a8d662a57de340e7","placeholder":"ā€‹","style":"IPY_MODEL_47079c20689f41beb821576cb7382df6","value":"embeddingā€‡nodes:ā€‡100%"}},"ac18871e773c416795edfad4e34bd739":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9ad3152de7294fd8aed49ccc1d33b60c","placeholder":"ā€‹","style":"IPY_MODEL_4bd26e6ae79f4f3d8e9cf2f42ad3ac26","value":"config.json:ā€‡100%"}},"ac85f34239514dac9a1f0d4930df51cf":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ae9a7864070c409ea1e88e8fefc0ae27":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_9398b54a42924fbfa50dc9a2935831d8","IPY_MODEL_b20bbe5fa22d43e8bd18f862fada7385","IPY_MODEL_954a7f2aea624788ad86b83448033998"],"layout":"IPY_MODEL_96b1e666843b4e45a310ec18bf750e65"}},"aea9933aa1d0479cb64b2b87f25b971e":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b196574881d242fb9cd9cc1dbb91787e":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b20bbe5fa22d43e8bd18f862fada7385":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_f6b50191cb834e0d8428c15aa7f74067","max":1381,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c05587b017a3468a955ffeaf88c4a5e1","value":1381}},"b33f5a46b833403f8ff7c46f52885c5a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b3e98c6b6bd54b018709f71e00abe2cf":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_bc0209a663504ae7b139286f2acac053","max":297,"min":0,"orientation":"horizontal","style":"IPY_MODEL_e407bdc397df41f283d3c9ef933b13aa","value":297}},"b477f5488ddc4bf991a9476c094454d9":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b524d79fbdca441d8cc51f90a5ba9413":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5d83067e2d36472d9a7fbb6dbe54ec9a","placeholder":"ā€‹","style":"IPY_MODEL_a1825027f0e94ae180c1cb7f93b73b7a","value":"ā€‡284/284ā€‡[00:15<00:00,ā€‡ā€‡8.19it/s]"}},"b62d4c81354c478487b09905b9f4e73f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ba11b7c4f3dd4c7c96300718c3ab2d9a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_69529419f3e74cbe957ebcabdabd6245","placeholder":"ā€‹","style":"IPY_MODEL_46db05876ac44068a90d9ee879686fc0","value":"ā€‡712k/712kā€‡[00:00<00:00,ā€‡2.11MB/s]"}},"ba302bbc5a1e43488831ee875550a224":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_6bcb3569f5144b4fb0fcaa55671b167e","IPY_MODEL_58353a440c6a4adc8c0fc4af31973076","IPY_MODEL_e7c0a7dfc59642bca13873ff5544e08a"],"layout":"IPY_MODEL_178c185d1fb74f2aaac9f9d09612ba18"}},"bc0209a663504ae7b139286f2acac053":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bef06a1d1d454345a1a5a9a1f5aeafab":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_02b8a307cf454eb2a20b274b7116225f","placeholder":"ā€‹","style":"IPY_MODEL_221f65af387442f0bf76fe785345ec38","value":"Generating:ā€‡100%"}},"c05587b017a3468a955ffeaf88c4a5e1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"c4970fd5d1f24a32a78e9135774d8838":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4d66c93a9b3a49659b24dc3236ac61fb","placeholder":"ā€‹","style":"IPY_MODEL_280ffc9a3dc84cfeabcf938df8ba418d","value":"ā€‡1.34G/1.34Gā€‡[00:14<00:00,ā€‡174MB/s]"}},"c59709620808466b9d65b390758b4898":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0ef794ecfa144a84b34592353ad7a6ea","IPY_MODEL_453cbd863055429cbe6bacba0af0ed0f","IPY_MODEL_6dcb21c8ad064ef2a0ba4f8c308bb955"],"layout":"IPY_MODEL_57e5dc406f7e4b289bab86df40762019"}},"c8dab5e9f6a94fddb82d81135b433cef":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"caa69772cb2b4d3cbd1ec60e7899e823":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cae690e4a646486eba8ec346b0f8cf78":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ceb1fe3552024c309dad1392ced3b721":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9ef5264e4c8344b19f8c63d13710b16a","placeholder":"ā€‹","style":"IPY_MODEL_16caf75cbaf84be08ab28e4af7c80460","value":"vocab.txt:ā€‡100%"}},"cf975ac612d94ae0824774b5e667512d":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_fc2fd4df43344c8f83519be8338ea79c","max":1336413848,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cae690e4a646486eba8ec346b0f8cf78","value":1336413848}},"d227481530984a52bb58265ffbf3095a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"d3f2eca4202040f1b02458f135db7feb":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d6b8024119f046c080d3956104b9107b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d9d573c7dd5b491dac2fc3643b452200":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"dc4ba5a620274f73afa7027726232693":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2165a51b7d0a4440900a3fdb4152b1ff","placeholder":"ā€‹","style":"IPY_MODEL_8142cd3e211940a49391a3bb8414aae1","value":"modules.json:ā€‡100%"}},"de1d0a7c196b43f2896f7328b51d0722":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f3408e40282f45a7bae1d28c9089155c","IPY_MODEL_2b9653890ab449ddb8463d2c896ef93c","IPY_MODEL_2404dc4e606f4829af0a2196e2028092"],"layout":"IPY_MODEL_89a1bb37e141423395bacf6e997e9bad"}},"de6399b9118541bb97029ca88be886f4":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e0dfd110049c4c7ab6e56fb6b485ed5c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e112800f289e4e3a8be21ba1f7db68b5":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_699b9a47c86a4c839f29f35325f5976a","placeholder":"ā€‹","style":"IPY_MODEL_aea9933aa1d0479cb64b2b87f25b971e","value":"ā€‡349/349ā€‡[00:00<00:00,ā€‡22.8kB/s]"}},"e256341c3df04d0a87b3865fdce29d22":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8c3bafba10694ca28921512ae856ae0e","placeholder":"ā€‹","style":"IPY_MODEL_7f0fda7f909043b68762830fc945d649","value":"ā€‡232k/232kā€‡[00:00<00:00,ā€‡9.38MB/s]"}},"e407bdc397df41f283d3c9ef933b13aa":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e69e65df44a644b1a2c61a62547f218e":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e73a9f990c1747aaaeef8b429d09eee1":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e79cc22d087b4f07b727f4b8b7dd81a6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e7c0a7dfc59642bca13873ff5544e08a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_502dbd5a88af4028b17cc7ee59995879","placeholder":"ā€‹","style":"IPY_MODEL_fcba7005ef174314ac6a607c97291fcb","value":"ā€‡84.5k/84.5kā€‡[00:00<00:00,ā€‡965kB/s]"}},"e8967112977f4511baf4841c45245f14":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_4b817c1894314519a6d33a543e3ece72","IPY_MODEL_0b5b8501d1c64430997aade25c414422","IPY_MODEL_786341e37e1340f4ac793a8b81803129"],"layout":"IPY_MODEL_3f046ecb4256477da5a045e0ea65fd71"}},"ec34c675cd944c92bd81071fbc1abdd6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"eddb3decc41c45dcb94b229d4f2b43a9":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3e9c125122bf4a4697c4c66d0081db02","placeholder":"ā€‹","style":"IPY_MODEL_4b1fa36e00af4a63b4eaca5f84b61472","value":"config_sentence_transformers.json:ā€‡100%"}},"ee5b9d6e096b46ae88e0537c3a6e8b24":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ef1e019ced5a42aeaed7dc903158b2fa":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f3408e40282f45a7bae1d28c9089155c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4cb6117209d540faa71410c223299f41","placeholder":"ā€‹","style":"IPY_MODEL_0ecafc8c711d4532b6eff44e9aa9c9aa","value":"Evaluating:ā€‡100%"}},"f46e5e9931884fe790fc846ba512cd56":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f69fc7fda80d4bec984617f3c804a723":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f6b50191cb834e0d8428c15aa7f74067":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f71f7bfb17cf48708222cd9b196c252e":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f73c1d3c8c784131ae97d78185091ea2":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6b4568d54d3d44d6b969f9d7edb4a3e6","placeholder":"ā€‹","style":"IPY_MODEL_f46e5e9931884fe790fc846ba512cd56","value":"ā€‡252/252ā€‡[00:00<00:00,ā€‡15.9kB/s]"}},"f7a51fbbe5ab4687b391e70c4412ed5d":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"fa9ba492445740f2a7388d05a8a0a995":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3da23de9f9b54994b79b6155565185e6","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_72df565437094492a84512d4a4be07f8","value":349}},"fc2fd4df43344c8f83519be8338ea79c":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fcba7005ef174314ac6a607c97291fcb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"fe62df73225f4b39a40780daf353f669":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ff75bde6e9e34ddda3ab14358e32c9d5":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_26e4773ba74a4fb1a079616d1dfe3475","placeholder":"ā€‹","style":"IPY_MODEL_49bebe2cfede420e8f18ff548cd99a53","value":"ā€‡704/704ā€‡[00:00<00:00,ā€‡42.2kB/s]"}}}}},"nbformat":4,"nbformat_minor":0} diff --git a/Tasks/Task 3/RAGAS_Key_Metrics_Summary.csv b/Tasks/Task 3/RAGAS_Key_Metrics_Summary.csv new file mode 100644 index 0000000000000000000000000000000000000000..9765bc5074e4fb8d650454a4a2c17ccef184fe12 --- /dev/null +++ b/Tasks/Task 3/RAGAS_Key_Metrics_Summary.csv @@ -0,0 +1,6 @@ +,Mean,Standard Deviation,25th Percentile,50th Percentile (Median),75th Percentile +faithfulness,0.5825636951112327,0.40610877667716644,0.08083333333333333,0.7,1.0 +answer_relevancy,0.9421626112319366,0.1563145734421645,0.9458433953002494,0.974098728728228,0.9933414393143032 +context_recall,0.27157629475147727,0.3958207397121211,0.0,0.0,0.5 +context_precision,0.44603102186282584,0.48784278923685864,0.0,0.0,0.9999999999 +answer_correctness,0.6178986199147979,0.2533140961114008,0.38881797261860573,0.6183298094933991,0.8654874927504908 diff --git a/Tasks/Task 3/SDG-Generation-logs b/Tasks/Task 3/SDG-Generation-logs new file mode 100644 index 0000000000000000000000000000000000000000..7e5c44bb994702ae36348e1daea9eff833bd19c8 --- /dev/null +++ b/Tasks/Task 3/SDG-Generation-logs @@ -0,0 +1,2939 @@ +WARNING:ragas.testset.docstore:Filename and doc_id are the same for all nodes. +Generating:ā€‡100% +ā€‡274/274ā€‡[03:10<00:00,ā€‡ā€‡1.11s/it] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Synthetic training data', 'Model collapse', 'Environmental impact', 'GAI systems', 'Carbon capture programs'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Equitable design', 'Automated systems', 'Legal protections', 'Proactive equity assessments'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Ongoing monitoring', 'Clear organizational oversight', 'High-quality data', 'Governance procedures'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Automated systems', 'Bias testing', 'Equitable design', 'Systemic biases'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Sensitive domains', 'Enhanced data protections', 'Automated systems', 'Historical discrimination'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Predictive policing system', 'Gun violence risk assessment', 'Watch list transparency', 'System flaws in benefit allocation', 'Lack of explanation for decisions'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data Privacy', 'Privacy Act of 1974', 'NIST Privacy Framework', 'Biometric identifying technology', 'Workplace surveillance'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Sensitive data', 'Sensitive domains', 'Surveillance technology', 'Underserved communities'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Automated systems', 'Timely human consideration', 'Fallback and escalation process', 'Sensitive domains'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Risk assessment', 'Explanatory mechanisms', 'Transparency in decision-making', 'Summary reporting'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Automated systems', 'Bias testing', 'Equitable design', 'Systemic biases'] +[ragas.testset.evolutions.INFO] seed question generated: "What role do legal protections play in addressing algorithmic discrimination?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to assess the environmental impact of AI model training and management activities?" +[ragas.testset.evolutions.INFO] seed question generated: "What role has historical discrimination played in the need for enhanced data protections in sensitive domains?" +[ragas.testset.evolutions.INFO] seed question generated: "What actions were taken by the New York state legislature regarding biometric identifying technology in schools?" +[ragas.testset.evolutions.INFO] seed question generated: "What should be included in the governance procedures for the development or use of automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential consequences of using automated systems without protections against algorithmic discrimination?" +[ragas.testset.evolutions.INFO] seed question generated: "What should be included in the summary reporting for automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of transparency in the context of watch lists used by predictive policing systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?" +[ragas.testset.evolutions.INFO] seed question generated: "What does the term 'underserved communities' refer to in the context of the AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential consequences of using automated systems without protections against algorithmic discrimination?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'User consent', 'Automated systems', 'Surveillance technologies', 'Sensitive domains'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic and data-driven harms', 'AI Bill of Rights', 'Panel discussions', 'Consumer rights and protections', 'Automated systems'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Algorithmic discrimination', 'Equity assessments', 'Representative data', 'Proactive testing'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Sensitive domains', 'Enhanced data protections', 'Automated systems', 'Historical discrimination'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the reasons for implementing enhanced data protections in sensitive domains?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to ensure that surveillance technologies do not infringe on privacy and civil liberties?" +[ragas.testset.evolutions.INFO] seed question generated: "What was the purpose of the panel discussions organized by the OSTP in relation to the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of proactive testing in the context of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for using automated systems in sensitive domains. It is clear in its intent, seeking information on factors to consider, and does not rely on external references or unspecified contexts. The question is specific enough to be understood and answered by someone with domain knowledge in automated systems and sensitive domains.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the actions taken by the New York state legislature regarding biometric identifying technology in schools. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge and clearly seeks information about legislative actions in a specific context.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the elements that should be included in governance procedures for the development or use of automated systems. It is clear in its intent, seeking specific information about governance procedures, and does not rely on external references or unspecified contexts. The question is self-contained and understandable, making it answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What should be included in the governance procedures for the development or use of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of historical discrimination in the need for enhanced data protections in sensitive domains. It is clear in its intent, seeking an explanation of the connection between past discrimination and current data protection needs. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role has historical discrimination played in the need for enhanced data protections in sensitive domains?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the definition of 'underserved communities' specifically within the context of the AI Bill of Rights. It is clear in its intent, seeking a specific explanation of a term within a defined context. The question is self-contained and does not rely on external references or prior knowledge beyond what is provided in the question itself. Therefore, it meets the criteria for clarity and answerability.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of legal protections in addressing algorithmic discrimination. It is clear in specifying the topic of interest (legal protections and algorithmic discrimination) and seeks information on the impact or function of these protections. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to assess the environmental impact of AI model training and management activities. It is clear in specifying the topic of interest (environmental impact) and the context (AI model training and management activities). The intent is straightforward, seeking specific measures or methods for assessment. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the measures proposed in the 'Blueprint for an AI Bill of Rights' to protect the rights of the American public. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information about the proposed measures in the specified document.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential consequences of using automated systems without protections against algorithmic discrimination. It is clear in its intent, seeking information on the outcomes or risks associated with the lack of safeguards against bias in automated systems. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential consequences of using automated systems without protections against algorithmic discrimination?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential consequences of using automated systems without protections against algorithmic discrimination. It is clear in its intent, seeking information on the outcomes or impacts of a specific scenario (lack of protections against algorithmic discrimination in automated systems). The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of transparency in the context of watch lists used by predictive policing systems. It is clear in specifying the topic of interest (transparency) and the specific context (watch lists in predictive policing systems). The intent is to understand the significance of transparency within this specific application, making it understandable and answerable based on the details provided. No additional context or external references are needed to comprehend or respond to the question.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what should be included in the summary reporting for automated systems. While it is clear in its intent to seek information on the components of summary reporting, it is somewhat broad and could benefit from specifying the type of automated systems or the context in which the summary reporting is being used (e.g., performance metrics, error rates, user interactions). Providing more detail would help narrow down the scope and make the question more specific and answerable.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What should be included in the summary reporting for automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the purpose of the panel discussions organized by the OSTP in relation to the Blueprint for an AI Bill of Rights. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of the panel discussions.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What was the purpose of the panel discussions organized by the OSTP in relation to the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure that surveillance technologies do not infringe on privacy and civil liberties. It is clear in its intent, seeking specific actions or strategies to address the potential conflict between surveillance and individual rights. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of proactive testing within the context of automated systems. It is clear in specifying the topic of interest (proactive testing) and the context (automated systems), making the intent straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of proactive testing in the context of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the reasons behind implementing enhanced data protections in sensitive domains. It is clear in its intent, seeking an explanation for the rationale behind such measures. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. However, it could be improved by specifying what is meant by 'sensitive domains' (e.g., healthcare, finance) to provide more context and focus for the answer.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The term 'underserved communities' refers to communities that have been systematically denied a full opportunity to participate in aspects of economic, social, and civic life.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'When using automated systems in sensitive domains, considerations should include tailoring the systems to their intended purpose, providing meaningful access for oversight, ensuring training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions. Additionally, there should be a focus on accessibility, equity, effectiveness, and the maintenance of these systems, along with public reporting on human governance processes and their outcomes.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key elements that ensure effective governance in the development and use of automated systems. It is clear in its intent, seeking specific information about governance elements, and does not rely on external references or unspecified contexts. The question is self-contained and understandable, making it answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The New York state legislature banned the use of facial recognition systems and other biometric identifying technology in schools until July 1, 2022. Additionally, the law requires that a report on the privacy, civil rights, and civil liberties implications of the use of such technologies be issued before biometric identification technologies can be used in New York schools.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key elements ensure effective governance in automated system development and use?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['OSTP', 'Artificial intelligence', 'Biometric technologies', 'Request For Information (RFI)', 'Public comments'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Transparency is important in the context of watch lists used by predictive policing systems because both police and the public deserve to understand why and how the system makes its determinations. Without transparency, individuals may be placed on a watch list without explanation, leading to a lack of accountability and understanding of the system's conclusions.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The Blueprint for an AI Bill of Rights proposes a set of five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It includes expectations for automated systems, practical steps for implementation, and emphasizes transparency through reporting to ensure that rights, opportunities, and access are respected.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The potential consequences of using automated systems without protections against algorithmic discrimination include inequitable outcomes, wrongful and discriminatory arrests due to facial recognition technology, discriminatory hiring decisions informed by biased algorithms, and healthcare algorithms that may discount the severity of diseases in certain racial groups. These issues can lead to systemic biases being amplified and harm to underserved communities.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Enhanced data protections in sensitive domains are implemented due to the intimate nature of these domains, the inability of individuals to opt out meaningfully, and the historical discrimination that has often accompanied data knowledge. Additionally, the protections afforded by current legal guidelines may be inadequate given the misuse of tracking technologies and the extensive data footprints individuals leave behind. The American public deserves assurances that data related to sensitive domains is protected and used appropriately, only in narrowly defined contexts with clear benefits to individuals and society.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for proactive steps to ensure that automated systems avoid algorithmic discrimination and promote equity. It is clear in its intent, specifying the desired outcome (avoiding discrimination and promoting equity) and the context (automated systems). The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the outcomes of automated systems that lack safeguards against bias in the contexts of hiring and justice. It is clear in its intent, seeking information on the consequences of such a lack of safeguards. The question is specific and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the main principles outlined in the AI Bill of Rights and how do they aim to protect the rights of the American public?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does artificial intelligence play in the governance and use of biometric technologies according to the OSTP's Request For Information?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Surveillance technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their potential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring should not be used in education, work, housing, or in other contexts where the use of such surveillance technologies is likely to limit rights, opportunities, or access.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions that algorithmic discrimination may violate legal protections depending on specific circumstances, indicating that legal protections play a role in addressing algorithmic discrimination.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Surveillance and data collection', 'Consumer data protection', 'Automated systems', 'Mental health impacts'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What proactive steps ensure automated systems avoid algorithmic discrimination and promote equity?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What outcomes arise from automated systems lacking safeguards against bias in hiring and justice?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks for what should be included in governance procedures, implying a detailed list or framework. The second question is broader, asking what ensures good governance, which could include principles, practices, or outcomes. Thus, they differ in depth and breadth.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the insights that the Office of Science and Technology Policy (OSTP) aimed to gather from diverse experts during panel discussions for the AI Bill of Rights. It is clear in specifying the context (OSTP, AI Bill of Rights) and the type of information sought (insights from panel discussions). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The suggested measures to assess the environmental impact of AI model training and management activities include: 1) Assessing safety to physical environments when deploying GAI systems, 2) Documenting anticipated environmental impacts of model development, maintenance, and deployment in product design decisions, 3) Measuring or estimating environmental impacts such as energy and water consumption for training, fine-tuning, and deploying models, and verifying trade-offs between resources used at inference time versus additional resources required at training time, and 4) Verifying the effectiveness of carbon capture or offset programs for GAI training and applications, while addressing green-washing concerns.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the mental health impacts associated with increased use of surveillance technologies in schools and workplaces?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Disinformation and misinformation', 'Generative AI models', 'Information security risks', 'Cybersecurity attacks'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 3, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Algorithmic discrimination protections', 'Equitable design', 'Independent evaluation and reporting'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What insights did OSTP aim to gather from diverse experts during panel discussions for the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Reporting expectations', 'Transparency', 'Artificial Intelligence ethics', 'Traffic calming measures', 'AI Risk Management Framework'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Sensitive domains', 'Predictive analytics', 'Student data collection', 'Employee data transfer'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with generative AI models in the context of disinformation and cybersecurity?" +[ragas.testset.evolutions.INFO] seed question generated: "What protections does the AI Bill of Rights provide against algorithmic discrimination?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'AI Bill of Rights', 'Civil rights and liberties', 'Equal opportunities', 'Access to critical resources'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of proactive testing in automated systems, while the second question addresses methods to prevent algorithmic bias. These are distinct topics with different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question addresses the broader consequences of using automated systems without protections against algorithmic discrimination, while the second question focuses specifically on issues in hiring and justice, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about historical factors that necessitate extra data protections in sensitive domains like health and finance. It is clear in specifying the domains of interest (health and finance) and the type of information sought (historical factors necessitating extra data protections). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are some concerns related to data privacy in the context of sensitive domains?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Generative AI Public Working Group', 'GAI risk management', 'Governance', 'Content Provenance', 'AI lifecycle risks'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the main principles outlined in the AI Bill of Rights and how they aim to protect the rights of the American public. It is clear in specifying the document of interest (AI Bill of Rights) and seeks detailed information on both the principles and their protective measures. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the main principles outlined in the AI Bill of Rights and how do they aim to protect the rights of the American public?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do technical protections play in the implementation of the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What historical factors necessitate extra data protections in sensitive domains like health and finance?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the different stages of the AI lifecycle where risks can arise?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of the panel discussions, while the second question seeks specific insights from experts. These inquiries have different depths and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of artificial intelligence in the governance and use of biometric technologies according to the OSTP's Request For Information. While it specifies the topic (AI's role in biometric technologies) and the source (OSTP's Request For Information), it assumes familiarity with the specific document without providing its content or context. This makes the question unclear for those who do not have access to or knowledge of the OSTP's Request For Information. To improve clarity and answerability, the question could include a brief summary or key points from the OSTP's document, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What role does artificial intelligence play in the governance and use of biometric technologies according to the OSTP's Request For Information?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the mental health impacts associated with the increased use of surveillance technologies in schools and workplaces. It is clear in specifying the context (schools and workplaces) and the focus (mental health impacts), making the intent straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what should be included in the summary reporting for automated systems. It is clear in its intent, seeking specific information about the components or elements that should be part of such a summary report. The question is independent and does not rely on external references or additional context to be understood. However, it could be improved by specifying the type of automated systems (e.g., software, industrial automation) to provide more precise guidance.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Good governance in automated systems is ensured by laying out clear governance structures and procedures, which include clearly-stated governance procedures before deploying the system, as well as the responsibility of specific individuals or entities to oversee ongoing assessment and mitigation. Organizational stakeholders should be involved in establishing these governance procedures, and responsibility should rest high enough in the organization to allow for prompt decision-making regarding resources, mitigation, incident response, and potential rollback. Additionally, those in charge should be aware of any use cases with the potential for meaningful impact on people's rights, opportunities, or access, and it may be appropriate for an independent ethics review to be conducted before deployment.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the potential risks associated with generative AI models in the context of disinformation and cybersecurity. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on risks related to two specific areas: disinformation and cybersecurity.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential risks associated with generative AI models in the context of disinformation and cybersecurity?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the protections provided by the AI Bill of Rights against algorithmic discrimination. It is specific in its focus on the AI Bill of Rights and the particular issue of algorithmic discrimination. The intent is clear, seeking information on the protections offered. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of the AI Bill of Rights and algorithmic discrimination.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology (NIST). It is specific, independent, and has a clear intent, seeking information about the purpose of a particular framework from a specific organization. The question does not rely on external references or additional context beyond what is provided within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically focuses on the impact of historical discrimination on the need for enhanced data protections, while the second question broadly asks about the drivers of extra data protections in health and finance without mentioning historical discrimination. This leads to different depths and focuses of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about concerns related to data privacy in sensitive domains. It is clear in its intent, seeking information on potential issues or challenges in this area. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. However, it could be improved by specifying what is meant by 'sensitive domains' (e.g., healthcare, finance) to narrow down the scope and provide a more focused answer.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The mental health impacts associated with increased use of surveillance technologies in schools and workplaces include lowered self-confidence, anxiety, depression, and a reduced ability to use analytical reasoning.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of technical protections in the implementation of the Blueprint for an AI Bill of Rights. It is clear in specifying the topic of interest (technical protections) and the context (Blueprint for an AI Bill of Rights). The intent is to understand the specific contributions or functions of technical protections within this framework. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights. It is clear in its intent, seeking specific examples of automated systems relevant to the AI Bill of Rights. The question is independent and does not rely on external references or prior knowledge beyond a general understanding of automated systems and the concept of an AI Bill of Rights. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the five principles of the AI Bill of Rights and how they ensure public protection. It is specific, independent, and has a clear intent. The question does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Trustworthy AI', 'Transparency policies', 'Risk management activities', 'Information integrity', 'GAI capabilities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the different stages of the AI lifecycle where risks can arise. It is clear in specifying the topic of interest (stages of the AI lifecycle) and seeks detailed information on potential risks at each stage. The question is self-contained and does not rely on external references or prior knowledge not shared within the question itself. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Biased automated systems in hiring can lead to discriminatory decisions, such as hiring tools that reject women applicants for spurious reasons, penalizing resumes with the word 'womenā€™s'. In the justice system, predictive models can disproportionately label Black students as high risk of dropping out, and risk assessment tools can overpredict recidivism for some groups of color, leading to unfair treatment and outcomes.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To prevent algorithmic bias in automated systems, proactive equity assessments should be conducted during the design phase to identify potential discrimination and effects on equity. Data used in system development should be representative and reviewed for bias, and the use of demographic information should be avoided to prevent algorithmic discrimination. Proactive testing should be performed to identify and remove proxies that may lead to discrimination, and organizations should monitor systems closely for any resulting algorithmic discrimination.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'OSTP sought insights and analysis on the risks, harms, benefits, and policy opportunities of automated systems from a variety of experts, practitioners, advocates, and federal government officials during the AI Bill of Rights panels. The discussions focused on consumer rights and protections, the criminal justice system, equal opportunities and civil justice, artificial intelligence and democratic values, social welfare and development, and the healthcare system.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What are the five principles of the AI Bill of Rights and how do they ensure public protection?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of artificial intelligence in the governance and use of biometric technologies according to the OSTP's Request For Information. While it specifies the context (OSTP's Request For Information), it assumes the reader has access to or knowledge of this specific document, which is not provided within the question itself. To improve clarity and answerability, the question could include a brief summary or key points from the OSTP's Request For Information relevant to AI and biometric technologies, or it could be rephrased to ask about general trends or findings in this area without relying on a specific document.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What factors should be considered to ensure information integrity in the context of GAI risk management?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Incident response plans', 'Third-party GAI technologies', 'Data privacy', 'Continuous monitoring', 'Vendor contracts'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The summary reporting for automated systems should include: the responsible entities for accountability purposes; the goal and use cases for the system; identified users and impacted populations; the assessment of notice clarity and timeliness; the assessment of the explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment of how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of risk.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The AI Bill of Rights provides protections against algorithmic discrimination by ensuring that individuals should not face discrimination by algorithms. It mandates that systems should be designed and used in an equitable way, taking proactive and continuous measures to protect individuals and communities from algorithmic discrimination. This includes conducting proactive equity assessments, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing, and providing clear organizational oversight. Additionally, independent evaluation and reporting, including algorithmic impact assessments and disparity testing results, should be made public whenever possible to confirm these protections.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Extra data protections in health and finance are driven by the intimate nature of these domains, the inability of individuals to opt out in a meaningful way, and the historical discrimination that has often accompanied data knowledge. Additionally, the potential for material harms, including significant adverse effects on human rights such as autonomy and dignity, civil liberties, and civil rights, necessitates enhanced protections.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Technical protections and practices laid out in the Blueprint for an AI Bill of Rights help guard the American public against many potential and actual harms associated with automated systems. They provide a framework for the design, use, and deployment of these systems to protect the rights of individuals, ensuring transparency and accountability in their operation.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of regularly assessing and verifying security measures in information security?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Risks can arise during the design, development, deployment, operation, and/or decommissioning stages of the AI lifecycle.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the principles of the AI Bill of Rights and their role in protecting the public, requiring similar depth and breadth of information.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when reviewing vendor contracts for third-party GAI technologies?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connection between misinformation risks from GAI (General Artificial Intelligence) and cybersecurity threats in malicious contexts. While it is clear in its intent to explore the relationship between these two areas, it lacks sufficient context to be fully self-contained. The term 'GAI' might not be universally understood without further explanation, and 'malicious contexts' could be interpreted in various ways. To improve clarity and answerability, the question could specify what is meant by 'GAI' and provide examples or a brief description of the 'malicious contexts' being referred to.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What links misinformation risks from GAI to cybersecurity threats in malicious contexts?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to ensure effective human-AI configuration in the context of GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI risks management', 'Risk response options', 'Model release approaches', 'Information security', 'Harmful bias mitigation'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key aspects that ensure transparency in AI systems according to the NIST framework. It is specific in its focus on transparency and the NIST framework, making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role do automated systems play in the protection of civil rights and democratic values according to the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights include: speech-related systems such as automated content moderation tools; surveillance and criminal justice system algorithms like risk assessments and predictive policing; voting-related systems such as signature matching tools; privacy-impacting systems like smart home systems and health-related data systems; education-related systems such as algorithms for detecting student cheating; housing-related systems like tenant screening algorithms; and employment-related systems that inform terms of employment.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key aspects ensure transparency in AI systems as per the NIST framework?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Concerns related to data privacy in sensitive domains include the lack of awareness among patients regarding the use of their medical data by insurance companies, the revelation of personal information (such as pregnancy) through targeted advertising, the monitoring of student conversations which may limit emotional expression and unfairly flag students with disabilities, the use of location data to identify individuals visiting abortion clinics, the collection of sensitive student data without parental consent, and the potential for discriminatory impacts from such data usage. Additionally, there are concerns about the accuracy of employee data transferred to third parties, which can affect job opportunities.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What techniques can be employed to mitigate harmful bias in AI-generated content?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about factors to consider for ensuring information integrity in the context of GAI (General Artificial Intelligence) risk management. It is clear in its intent, specifying the topic (information integrity) and the context (GAI risk management). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated system', 'Plain language documentation', 'System functioning', 'Outcome explanations', 'User notification'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Participatory engagement methods', 'Field testing', 'AI red-teaming', 'User feedback', 'Risk management'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Lisa Feldman Barrett', 'Microsoft Corporation', 'National Association for the Advancement of Colored People', 'University of Michigan Ann Arbor', 'OSTP listening sessions'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human subject protection', 'Content provenance', 'Data privacy', 'AI system performance', 'Anonymization techniques'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.evolutions.INFO] seed question generated: "What methods can organizations use to collect user feedback during product development?" +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account regarding data privacy when deploying a GAI system?" +[ragas.testset.evolutions.INFO] seed question generated: "What should designers and developers provide to ensure clear understanding of system functioning in automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of using digital content transparency solutions in AI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of regularly assessing and verifying security measures in information security. It is clear in its intent, seeking an explanation of the reasons behind these practices. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of regularly assessing and verifying security measures in information security?" +[ragas.testset.evolutions.INFO] seed question generated: "What program is associated with the University of Michigan Ann Arbor mentioned in the context?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'AI Actors', 'Unanticipated impacts', 'Information integrity', 'Content provenance'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for considerations when reviewing vendor contracts for third-party GAI (General Artificial Intelligence) technologies. It is clear in its intent, specifying the context (vendor contracts) and the subject matter (third-party GAI technologies). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What criteria are used to measure AI system performance or assurance in deployment settings?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure effective human-AI configuration in the context of GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (human-AI configuration) and the context (GAI systems), making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the overall purpose of the AI Risk Management Framework by NIST, while the second question specifically focuses on AI transparency as per NIST. These questions have different constraints and requirements, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to ensure information integrity in the context of AI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for automated systems regarding safety and effectiveness?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of automated systems in protecting civil rights and democratic values as outlined in the 'Blueprint for an AI Bill of Rights'. It is specific in its focus on automated systems and their role, and it clearly references a particular document (the Blueprint for an AI Bill of Rights). The intent is clear, seeking an explanation or summary of the role described in the specified document. The question is self-contained and does not rely on external references beyond the mentioned document, making it understandable and answerable given sufficient domain knowledge.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role do automated systems play in the protection of civil rights and democratic values according to the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for techniques to mitigate harmful bias in AI-generated content. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge and clearly seeks information on methods to address bias in AI content generation.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What techniques can be employed to mitigate harmful bias in AI-generated content?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connection between misinformation risks from GAI (General Artificial Intelligence) and cybersecurity threats in malicious contexts. While it specifies the two areas of interest (misinformation risks from GAI and cybersecurity threats), it is somewhat vague in its phrasing. The term 'malicious contexts' is broad and could benefit from further specification. Additionally, the question could be clearer by defining what is meant by 'links' (e.g., mechanisms, examples, impacts). To improve clarity and answerability, the question could be reframed to specify the type of links or mechanisms being inquired about and provide more context on what is meant by 'malicious contexts'.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Factors to consider to ensure information integrity in the context of GAI risk management include abuses and impacts to information integrity, dependencies between GAI and other IT or data systems, harm to fundamental rights or public safety, presentation of obscene, objectionable, offensive, discriminatory, invalid or untruthful output, psychological impacts to humans, possibility for malicious use, introduction of significant new security vulnerabilities, anticipated system impact on some groups compared to others, and unreliable decision-making capabilities.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for methods that organizations can use to collect user feedback during product development. It does not rely on external references or prior knowledge and has a clear intent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What methods can organizations use to collect user feedback during product development?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what designers and developers should provide to ensure a clear understanding of system functioning in automated systems. It is specific and independent, as it does not rely on external references or prior knowledge. The intent is clear, seeking information on best practices or necessary elements for clarity in automated systems. No improvements are necessary.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for data privacy when deploying a GAI (General Artificial Intelligence) system. It is clear in its intent, seeking specific information on data privacy aspects related to GAI deployment. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What considerations should be taken into account regarding data privacy when deploying a GAI system?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of using digital content transparency solutions in AI systems. It is clear in specifying the topic of interest (digital content transparency solutions in AI systems) and seeks information on the purpose or rationale behind their use. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of using digital content transparency solutions in AI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about a program associated with the University of Michigan Ann Arbor mentioned in an unspecified context. It is unclear because it refers to 'the context' without providing any details or description of what this context entails. This makes the question dependent on external information that is not included within the query itself. To improve clarity and answerability, the question should specify the context or provide more details about the program of interest.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What program is associated with the University of Michigan Ann Arbor mentioned in the context?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The suggested measures to ensure effective human-AI configuration in the context of GAI systems include documenting the instructions given to data annotators or AI red-teamers (MS-2.8-002) and verifying the adequacy of GAI system user instructions through user testing (MS-2.8-004).', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the criteria used to measure AI system performance or assurance in deployment settings. It is clear in specifying the topic of interest (criteria for measuring AI system performance or assurance) and the context (deployment settings). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What criteria are used to measure AI system performance or assurance in deployment settings?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Algorithmic discrimination', 'Equity assessments', 'Representative data', 'Proactive testing'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What guidelines does the Blueprint for an AI Bill of Rights propose to ensure that automated systems uphold civil rights and democratic principles in the face of technological challenges?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the connections between assessing security measures and ensuring information integrity. It is clear in its intent, seeking to understand the relationship between two specific concepts: security measures and information integrity. The question is self-contained and does not rely on external references or additional context to be understood. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for automated systems in terms of safety and effectiveness. It is clear in its intent, seeking information on the standards or criteria that automated systems should meet regarding these two aspects. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the expectations for automated systems regarding safety and effectiveness?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of proactive testing in the context of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What diverse strategies can organizations employ to gather user insights during the early stages of product development while ensuring compliance with ethical standards?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Policies and procedures for human-AI configurations', 'Oversight of GAI systems', 'Risk measurement processes', 'Human-AI configuration', 'Threat modeling for GAI systems'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between assessing security measures and ensuring information integrity?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Designers, developers, and deployers of automated systems should provide generally accessible plain language documentation that includes clear descriptions of the overall system functioning and the role automation plays.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'When reviewing vendor contracts for third-party GAI technologies, considerations should include avoiding arbitrary or capricious termination of critical GAI technologies or vendor services, avoiding non-standard terms that may amplify or defer liability in unexpected ways, and preventing unauthorized data collection by vendors or third-parties. Additionally, there should be a clear assignment of liability and responsibility for incidents, acknowledgment of GAI system changes over time, and requirements for notification and disclosure for serious incidents arising from third-party data and systems. Service Level Agreements (SLAs) in vendor contracts should also address incident response, response times, and availability of critical support.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What multifaceted factors regarding data privacy and content integrity must be evaluated when implementing a GAI system, particularly in relation to user feedback and the system's operational transparency?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI lifecycle', 'AI technology risks', 'Organizational practices for AI', 'Impact documentation process', 'Content provenance methodologies'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of engaging in threat modeling for GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for methods to address harmful bias in AI outputs while ensuring content integrity. It is clear in its intent, specifying the problem (harmful bias) and the desired outcome (ensuring content integrity). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about a program associated with the University of Michigan Ann Arbor, but it references 'the context' without providing any specific context within the question itself. This makes the question unclear and dependent on external information that is not included. To improve clarity and answerability, the question should specify the context or provide more details about the program of interest.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the impact documentation process in the context of GAI systems?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What methods can be used to address harmful bias in AI outputs while ensuring content integrity?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What criteria should automated systems meet to ensure both safety and the prevention of algorithmic discrimination, and how should these be independently evaluated and reported?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of digital content transparency solutions in ensuring traceability and integrity in AI. It is clear in specifying the topic of interest (digital content transparency solutions) and the aspects it seeks to address (traceability and integrity in AI). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of proactive testing within the context of automated systems. It is clear in specifying the topic of interest (proactive testing) and the context (automated systems), making the intent straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of proactive testing in the context of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for diverse strategies that organizations can use to gather user insights during the early stages of product development while ensuring compliance with ethical standards. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking actionable strategies that balance user insight collection with ethical considerations.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What role do digital content transparency solutions play in ensuring traceability and integrity in AI?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of regularly assessing and verifying security measures, while the second question asks about the relationship between security measures and information integrity. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for methods that assess AI performance while ensuring human subject protection and data privacy. It is clear in specifying the dual focus on performance assessment and ethical considerations (human subject protection and data privacy). The intent is straightforward, seeking information on methodologies that balance these aspects. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Prompt injection', 'Indirect prompt injection attacks', 'Data poisoning', 'Intellectual property risks', 'Obscene and degrading content'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the issue of bias in AI-generated content and seek methods to mitigate it while maintaining content integrity. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the guidelines proposed by the Blueprint for an AI Bill of Rights to ensure that automated systems uphold civil rights and democratic principles amidst technological challenges. It is specific in its focus on the Blueprint for an AI Bill of Rights and the type of guidelines it seeks (those ensuring civil rights and democratic principles). The intent is clear, and the question is self-contained, not relying on external references or prior knowledge beyond what is mentioned in the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies can orgs use to gather user insights ethically in early product dev?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What methods assess AI performance while ensuring human subject protection and data privacy?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Sensitive data', 'Ethical review', 'Data quality', 'Access limitations'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the multifaceted factors related to data privacy and content integrity that need to be evaluated when implementing a GAI (Generative AI) system, with a particular focus on user feedback and the system's operational transparency. It is clear in specifying the areas of interest (data privacy, content integrity, user feedback, operational transparency) and seeks detailed information on these aspects. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of engaging in threat modeling for GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (threat modeling for GAI systems) and seeks an explanation of the purpose behind this activity. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are indirect prompt injection attacks and how do they exploit vulnerabilities in GAI-integrated applications?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What does the AI Bill of Rights suggest for protecting civil rights in tech?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the impact documentation process specifically in the context of GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (impact documentation process) and the context (GAI systems), making the intent of the question straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the impact documentation process in the context of GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for the criteria that automated systems should meet to ensure safety and prevent algorithmic discrimination. It also inquires about how these criteria should be independently evaluated and reported. The intent is clear, and the question is self-contained, not relying on external references or prior knowledge. It is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures should entities take to maintain data quality in sensitive domains?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What factors on data privacy and content integrity should be considered for a GAI system, especially regarding user feedback and transparency?" +[ragas.testset.evolutions.INFO] seed question generated: "What precautions should be taken when using derived data sources in automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure information integrity in the context of AI systems. It is clear in specifying the topic of interest (information integrity) and the context (AI systems), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What measures are suggested to ensure information integrity in the context of AI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the purpose of using digital content transparency solutions in AI systems, while the second question focuses on how these tools ensure AI traceability and integrity. These questions have different requirements and depths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive measures are essential in the design and evaluation of automated systems to ensure they effectively mitigate algorithmic discrimination and promote equity?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions focus on methods or strategies for collecting user feedback or insights during product development. They share similar constraints and requirements, as well as the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology in social welfare', 'Fraud detection', 'Digital ID systems', 'Healthcare access and delivery', 'Health disparities'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What standards should automated systems follow for safety and fairness, and how to assess them?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the criteria used to measure AI system performance or assurance in deployment settings, which is broader. The second question specifically targets AI performance evaluation with a focus on human safety and privacy, leading to different depths and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.evolutions.INFO] seed question generated: "What concerns were raised by panelists regarding healthcare access and delivery in relation to new technologies?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Engaging in threat modeling for GAI systems is intended to anticipate potential risks from these systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address data privacy in the context of deploying a GAI system, the second question also includes considerations for content integrity, user feedback, and transparency, leading to a broader scope and different depth of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does the documentation of risks and impacts play in ensuring compliance and effective governance throughout the lifecycle of GAI systems, particularly in relation to external feedback mechanisms?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do healthcare navigators play in helping consumers find health coverage options?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question specifically asks about the role of automated systems in the protection of civil rights and democratic values according to the Blueprint for an AI Bill of Rights, requiring a detailed explanation. The second question is broader, asking generally about the AI Bill of Rights' suggestions for protecting civil rights in tech, which may not necessarily focus on automated systems or democratic values.", 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures that entities should take to maintain data quality in sensitive domains. It is clear in its intent, seeking specific actions or strategies for ensuring data quality. The question is independent and does not rely on external references or unspecified contexts. It is specific enough to be understood and answered by someone with domain knowledge in data quality management or sensitive data handling.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for precautions to be taken when using derived data sources in automated systems. It is clear in specifying the topic of interest (precautions, derived data sources, automated systems) and seeks detailed information on safety or best practices. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What precautions should be taken when using derived data sources in automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for proactive measures in the design and evaluation of automated systems to mitigate algorithmic discrimination and promote equity. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on essential measures for addressing algorithmic discrimination and promoting equity in automated systems.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are some real-life examples of how human alternatives can be implemented in practice?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated traffic control systems', 'Smart city technologies', 'Fraud detection algorithms', 'Biometric systems', 'Access control algorithms'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps ensure automated systems reduce bias and promote equity?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information sharing and feedback mechanisms', 'AI impact assessment', 'Organizational policies', 'Third-party rights'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Digital content transparency solutions ensure AI traceability and integrity by enabling the documentation of each instance where content is generated, modified, or shared, providing a tamper-proof history of the content. Additionally, robust version control systems can be applied to track changes across the AI lifecycle over time.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI performance is evaluated with human safety and privacy in mind by implementing measures such as assessing and managing statistical biases related to GAI content provenance, documenting how content provenance data is tracked, providing human subjects with options to withdraw participation or revoke consent, and using techniques like anonymization and differential privacy to minimize risks associated with linking AI-generated content back to individual human subjects.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role do biometric systems play in access control?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the role of healthcare navigators in assisting consumers with finding health coverage options. It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. The intent is clear, seeking information on the functions and contributions of healthcare navigators in the context of health coverage.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role do healthcare navigators play in helping consumers find health coverage options?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the actions that connect AI Actor engagement to measures ensuring content authenticity and integrity. While it is clear in its intent to understand the relationship between AI Actor engagement and content authenticity measures, it is somewhat vague in defining what is meant by 'AI Actor engagement' and the specific 'measures' being referred to. To improve clarity and answerability, the question could specify what is meant by 'AI Actor engagement' (e.g., specific activities or roles of AI systems) and provide examples or types of 'measures' for content authenticity and integrity.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What actions link AI Actor engagement to measures ensuring content authenticity and integrity?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about concerns raised by panelists regarding healthcare access and delivery in relation to new technologies. It is clear in specifying the topic of interest (concerns, healthcare access and delivery, new technologies) and seeks detailed information on the concerns raised. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What concerns were raised by panelists regarding healthcare access and delivery in relation to new technologies?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of information sharing and feedback mechanisms in relation to GAI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What measures should be implemented to ensure the safe use of derived data in automated systems while preventing algorithmic discrimination?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The AI Bill of Rights suggests guiding the design, use, and deployment of automated systems to protect the American public, ensuring that these technologies reinforce civil rights and democratic values. It emphasizes the need to root out inequity, embed fairness in decision-making processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Entities should be especially careful to maintain the quality of data in sensitive domains to avoid adverse consequences arising from decision-making based on flawed or inaccurate data. This includes conducting regular, independent audits and taking prompt corrective measures to maintain accurate, timely, and complete data.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Surveillance and data collection', 'Consumer data protection', 'Automated systems', 'Mental health impacts'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Generative AI Public Working Group', 'GAI risk management', 'Governance', 'Content Provenance', 'AI lifecycle risks'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of proactive testing in automated systems, while the second question is about steps to reduce bias and promote equity in automated systems. These questions have different constraints and requirements, as well as different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Factors on data privacy and content integrity for a GAI system include documenting the extent to which human domain knowledge is employed to improve GAI system performance, reviewing and verifying sources and citations in GAI system outputs, tracking instances of anthropomorphization in GAI system interfaces, verifying GAI system training data and TEVV data provenance, and regularly reviewing security and safety guardrails. Additionally, structured feedback about content provenance should be recorded and integrated from operators, users, and impacted communities, and there should be an emphasis on digital content transparency regarding the societal impacts of AI and the role of diverse and inclusive content generation.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with surveillance and data collection on the American public?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of content provenance in the context of GAI risk management?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of documenting risks and impacts in ensuring compliance and effective governance throughout the lifecycle of GAI (General Artificial Intelligence) systems, with a particular focus on external feedback mechanisms. It is clear in specifying the topic of interest (documentation of risks and impacts, compliance, governance, GAI systems, external feedback mechanisms) and seeks detailed information on the relationship between these elements. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how 'human alternatives' can be implemented in practice. While it is clear that the question seeks practical examples, the term 'human alternatives' is vague and could refer to various concepts such as alternative energy sources, alternative medicine, or even alternative dispute resolution methods. To improve clarity and answerability, the question should specify what is meant by 'human alternatives' and the context in which they are to be implemented (e.g., in technology, healthcare, environmental practices).", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some real-life examples of how human alternatives can be implemented in practice?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Transparency artifacts', 'Explainable AI (XAI)', 'Pre-trained models', 'Harmful bias', 'Content filters'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of biometric systems in access control. It is clear and specific, seeking information on the function and importance of biometric systems within the context of access control. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role do biometric systems play in access control?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about indirect prompt injection attacks and how they exploit vulnerabilities in GAI-integrated applications. It is specific, independent, and has a clear intent. The question does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does risk documentation aid compliance and governance in GAI systems, especially with external feedback?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of bias and discrimination in automated systems on the rights of the American public?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for measures to ensure the safe use of derived data in automated systems while preventing algorithmic discrimination. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking actionable measures or strategies to address the issue of algorithmic discrimination in the context of automated systems using derived data.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to mitigate risks related to harmful bias in generative AI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Synthetic training data', 'Model collapse', 'Environmental impact', 'GAI systems', 'Carbon capture programs'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the purpose of information sharing and feedback mechanisms in relation to GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (information sharing and feedback mechanisms) and the context (GAI systems). The intent is to understand the purpose of these mechanisms, which is straightforward and unambiguous. The question is self-contained and does not rely on external references or prior knowledge beyond a basic understanding of GAI systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of information sharing and feedback mechanisms in relation to GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between Navigators' training and their role in aiding health coverage access. It is clear in specifying the topic of interest (Navigators' training and their role in health coverage access) and seeks information on the relationship between these two aspects. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on expectations for safety and effectiveness, while the second question addresses standards for safety and fairness and how to assess them, indicating different requirements and depths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the issues highlighted by panelists regarding technology's role in healthcare access and equity. It is clear in specifying the topic of interest (tech's role in healthcare access and equity) and seeks detailed information on the issues identified by panelists. The intent is clear, and the question is independent as it does not rely on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps ensure safe use of derived data in automated systems, avoiding bias?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between Navigators' training and their role in aiding health coverage access?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of verifying the effectiveness of carbon capture programs in relation to GAI training and applications?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of content provenance in the context of GAI (General Artificial Intelligence) risk management. It is clear in specifying the topic of interest (content provenance) and the context (GAI risk management), making the intent clear and understandable. The question does not rely on external references or unspecified contexts, making it self-contained and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the significance of content provenance in the context of GAI risk management?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential risks associated with surveillance and data collection on the American public. It is clear in specifying the topic of interest (surveillance and data collection) and the population concerned (the American public). The intent is to understand the risks, which is straightforward and unambiguous. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of surveillance and data collection practices.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the potential risks associated with surveillance and data collection on the American public?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of the impact documentation process, while the second question is about how risk documentation aids compliance and governance, particularly with external feedback. These questions have different constraints and requirements, as well as different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Indirect prompt injection attacks occur when adversaries remotely exploit LLM-integrated applications by injecting prompts into data likely to be retrieved. These attacks can exploit vulnerabilities by stealing proprietary data or running malicious code remotely on a machine.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how 'human alternatives' can be implemented in practice. While it is clear that the question seeks practical examples, the term 'human alternatives' is vague and could refer to various concepts such as alternative energy sources, alternative medicine, or even alternative dispute resolution methods. To improve clarity and answerability, the question should specify what is meant by 'human alternatives' and possibly provide a context or domain (e.g., technology, healthcare, environmental science) in which these alternatives are to be considered.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What issues did panelists highlight about tech's role in healthcare access and equity?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What roles do oversight policies and feedback mechanisms play in mitigating risks associated with GAI systems and ensuring effective communication of their societal impacts?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the actions that connect AI Actor engagement to measures ensuring content authenticity and integrity. While it is clear in its intent to understand the relationship between AI Actor engagement and content authenticity measures, it lacks specificity regarding what is meant by 'AI Actor engagement' and the types of 'measures' being referred to. To improve clarity and answerability, the question could specify the context or examples of AI Actor engagement (e.g., content creation, moderation) and the types of measures (e.g., verification processes, blockchain technology) it is interested in.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To ensure automated systems reduce bias and promote equity, several steps should be taken: 1) Conduct proactive equity assessments during the design phase to identify potential discrimination and effects on equity; 2) Use representative and robust data that reflects local communities and is reviewed for bias; 3) Guard against proxies by avoiding the direct use of demographic information in system design and testing for correlations; 4) Allow independent evaluations of potential algorithmic discrimination; 5) Provide reporting of algorithmic impact assessments that detail consultations, equity assessments, and any disparities found, ensuring transparency and public accountability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to mitigate risks related to harmful bias in generative AI systems. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information on suggested measures for a particular issue in generative AI systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of bias and discrimination in automated systems on the rights of the American public. It is specific in its focus on bias and discrimination within automated systems and their impact on rights, and it is clear in its intent to understand the consequences of these issues. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of automated systems and civil rights, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about systems that assist in adjudicating access control and the role of biometrics within these systems. It is clear in its intent, seeking information on both the types of systems used for access control and the specific application of biometrics. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the specific role of healthcare navigators in helping consumers find health coverage options, while the second question is about the relationship between Navigator training and health coverage access. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems should follow standards that include independent evaluation, regular reporting, and protections against algorithmic discrimination. They should be designed to allow independent evaluators access to assess safety and effectiveness, with regular updates on system performance, data usage, risk management, and independent evaluations. Additionally, entities should conduct algorithmic impact assessments to evaluate potential discrimination and ensure transparency in reporting these assessments.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What multifaceted dangers arise from the intersection of pervasive surveillance practices and the unregulated collection of personal data on the American populace?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does content provenance play in mitigating unique risks associated with GAI, as highlighted by stakeholder consultations?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-generated content', 'Real-time auditing tools', 'User feedback mechanisms', 'Synthetic data', 'Incident response and recovery plans'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not provide specific information on how risk documentation aids compliance and governance in GAI systems, particularly regarding external feedback.', 'verdict': -1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions ask about concerns raised by panelists regarding healthcare access and delivery in relation to new technologies. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What systems assist in adjudicating access control and how do biometrics fit in?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Automated systems', 'Human fallback', 'Critical protections', 'Voting process'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Artificial Intelligence and Democratic Values', 'Non-discriminatory technology', 'Explainable AI', 'Community participation', 'Social welfare systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of using structured feedback mechanisms in relation to AI-generated content?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of having a human fallback system in automated processes?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Predictive policing system', 'Gun violence risk assessment', 'Watch list transparency', 'System flaws in benefit allocation', 'Lack of explanation for decisions'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the purpose of verifying the effectiveness of carbon capture programs in relation to GAI (presumably General Artificial Intelligence) training and applications. While it specifies the topic of interest (carbon capture programs and GAI), it is somewhat ambiguous due to the lack of clarity on how these two areas are related. The term 'GAI' is not commonly used and could be confusing without further context. To improve clarity and answerability, the question could benefit from specifying what 'GAI' stands for and explaining the connection between carbon capture programs and AI training or applications. For example, 'What is the purpose of verifying the effectiveness of carbon capture programs in relation to the environmental impact of training General Artificial Intelligence (GAI) models?'", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What is the purpose of verifying the effectiveness of carbon capture programs in relation to GAI training and applications?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key aspects of designing explainable AI as discussed in the panel on Artificial Intelligence and Democratic Values?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the roles of oversight policies and feedback mechanisms in mitigating risks associated with GAI (General Artificial Intelligence) systems and ensuring effective communication of their societal impacts. It is clear in specifying the elements of interest (oversight policies, feedback mechanisms, GAI systems) and the aspects to be addressed (risk mitigation, communication of societal impacts). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-generated content', 'Real-time auditing tools', 'User feedback mechanisms', 'Synthetic data', 'Incident response and recovery plans'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting or reproducing existing unwanted inequities. These outcomes can threaten people's opportunities, undermine their privacy, and lead to pervasive tracking of their activities, often without their knowledge or consent.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What issues arise from system flaws in benefit allocation?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To mitigate risks related to harmful bias in generative AI systems, the suggested measures include applying explainable AI (XAI) techniques as part of ongoing continuous improvement processes, documenting how pre-trained models have been adapted for specific generative tasks, and documenting sources and types of training data along with potential biases present in the data.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Navigator training is related to health coverage access as it equips individuals or organizations to help consumers, small businesses, and their employees navigate the process of finding and obtaining health coverage options through the Marketplace. This training enables Navigators to assist with completing eligibility and enrollment forms, thereby facilitating access to affordable and comprehensive health coverage for uninsured consumers.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the role of biometric systems in access control, requiring similar depth and breadth of explanation.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What's the role of oversight and feedback in managing GAI risks and communicating their societal effects?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI model explanation', 'GAI risks', 'Privacy risk assessment', 'Data provenance', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the dangers arising from the intersection of pervasive surveillance practices and the unregulated collection of personal data on the American populace. It is specific in its focus on the American context and the combination of surveillance and data collection. The intent is clear, seeking an analysis of the multifaceted dangers involved. The question is self-contained and does not rely on external references or prior knowledge beyond general understanding of surveillance and data collection practices.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of data provenance in the context of AI model documentation and governance?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of content provenance in mitigating unique risks associated with GAI, as highlighted by stakeholder consultations. It is clear in specifying the topic of interest (content provenance, GAI risks) and the context (stakeholder consultations). However, it assumes familiarity with the specific stakeholder consultations and the unique risks they highlighted, which are not provided within the question. To improve clarity and answerability, the question could include a brief description of the unique risks identified by stakeholders or specify the context of these consultations.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What role does content provenance play in mitigating unique risks associated with GAI, as highlighted by stakeholder consultations?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the precautions or steps needed to ensure the safe use of derived data in automated systems, with a focus on avoiding bias. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated traffic control systems', 'Smart city technologies', 'Fraud detection algorithms', 'Biometric systems', 'Access control algorithms'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What risks come from widespread surveillance and unregulated data collection on Americans?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'AI Bill of Rights', 'Civil rights and liberties', 'Equal opportunities', 'Access to critical resources'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of having a human fallback system in automated processes. It is clear in its intent, seeking an explanation of the importance or benefits of such a system. The question is independent and does not rely on external references or additional context to be understood. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the significance of having a human fallback system in automated processes?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI incidents', 'AI Actors', 'Incident reporting', 'Documentation practices', 'AI risk management'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the responsible uses of synthetic data in GAI development?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Notice and explanation', 'Impact on lives', 'Opaque decision-making', 'Algorithmic risk assessment'] +[ragas.testset.evolutions.INFO] seed question generated: "What role do fraud detection algorithms play in the adjudication of benefits and penalties?" +[ragas.testset.evolutions.INFO] seed question generated: "What types of systems are considered under the category of equal opportunities in the context of the AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does incident reporting play in improving GAI risk management across the AI ecosystem?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of information sharing and feedback mechanisms specifically in relation to GAI systems, while the second question addresses the broader role of oversight and feedback in managing GAI risks and communicating their societal effects. The scope and depth of the inquiries differ.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about issues arising from system flaws in benefit allocation. It is clear in its intent, seeking information on the problems caused by flaws in the system used for allocating benefits. The question is independent and does not rely on external references or additional context to be understood. It is specific enough to be answerable by someone with knowledge in the domain of benefit allocation systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What issues arise from system flaws in benefit allocation?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key aspects of designing explainable AI as discussed in a specific panel on Artificial Intelligence and Democratic Values. While it specifies the topic (explainable AI) and the context (a panel discussion), it assumes access to the content of the panel discussion without providing any details or summary of what was discussed. This makes the question unclear for those who did not attend the panel or do not have access to its proceedings. To improve clarity and answerability, the question could include a brief summary of the key points or themes discussed in the panel, or alternatively, frame the question in a way that does not rely on specific, unpublished discussions.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the key aspects of designing explainable AI as discussed in the panel on Artificial Intelligence and Democratic Values?" +[ragas.testset.evolutions.INFO] seed question generated: "What challenges do algorithmic risk assessments pose for individuals in understanding and contesting decisions that affect their lives?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information integrity', 'Human-AI configuration', 'Digital content transparency', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Discrimination in mortgage lending', 'Redlining initiative', 'Algorithmic decision-making', 'Healthcare access disparities', 'Bias in artificial intelligence'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the risks associated with surveillance and data collection on the American public, requiring similar depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the purpose of verifying the effectiveness of carbon capture programs in relation to GAI (presumably General Artificial Intelligence) training and applications. While it specifies the topic of interest (carbon capture programs and GAI), it is somewhat ambiguous due to the lack of clarity on how these two areas are related. The term 'GAI' is not commonly used and could be confusing without further context. To improve clarity and answerability, the question could benefit from specifying what 'GAI' stands for and explaining the connection between carbon capture programs and AI training or applications. For example, it could ask how carbon capture programs impact the environmental footprint of AI training or how they are integrated into AI applications.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are the concerns associated with harmful bias and homogenization in the context of GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What initiatives is the federal government implementing to combat discrimination in mortgage lending?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of data provenance in the context of AI model documentation and governance. It is clear in specifying the topic of interest (data provenance) and the context (AI model documentation and governance). The intent is to understand the importance or role of data provenance within this specific context. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the significance of data provenance in the context of AI model documentation and governance?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of content provenance in mitigating unique risks associated with GAI, as highlighted by stakeholder consultations. It is clear in specifying the topic of interest (content provenance, GAI risks) and the context (stakeholder consultations). However, it assumes familiarity with the specific stakeholder consultations and the unique risks they highlighted, which are not provided within the question. To improve clarity and answerability, the question could include a brief description of the unique risks identified by stakeholders or specify the context of these consultations.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What complications arise in benefit distribution when automated systems operate without transparency and clear explanations?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the responsible uses of synthetic data in the development of General Artificial Intelligence (GAI). It is clear in specifying the topic of interest (responsible uses of synthetic data) and the context (GAI development). The intent is straightforward, seeking information on ethical or appropriate applications of synthetic data within this specific domain. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the responsible uses of synthetic data in GAI development?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Environmental impacts of GAI', 'Harmful bias in AI systems', 'Generative AI energy consumption', 'Disparities in model performance', 'Trustworthy AI characteristics'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the challenges posed by algorithmic risk assessments for individuals in terms of understanding and contesting decisions that impact their lives. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on the difficulties individuals face with these assessments.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What challenges do algorithmic risk assessments pose for individuals in understanding and contesting decisions that affect their lives?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of incident reporting in improving GAI (General Artificial Intelligence) risk management across the AI ecosystem. It is clear in specifying the topic of interest (incident reporting) and the context (GAI risk management within the AI ecosystem). The intent is to understand the impact or contribution of incident reporting to risk management, which is straightforward and does not rely on external references or unspecified contexts. Therefore, the question is specific, independent, and has a clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does incident reporting play in improving GAI risk management across the AI ecosystem?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of fraud detection algorithms in the adjudication of benefits and penalties. It is clear in its intent, seeking to understand the impact or function of these algorithms within a specific context (adjudication of benefits and penalties). The question is self-contained and does not rely on external references or prior knowledge not provided within the question itself. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the roles of human fallback in automated systems and its impact on public access. It is clear in specifying the topic of interest (human fallback in automated systems) and seeks information on both the roles and the impact on public access. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. However, it could be improved by specifying what is meant by 'public access' (e.g., access to services, information, or technology) to ensure a more precise and relevant response.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Oversight and feedback play a crucial role in managing GAI risks by ensuring that organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from external sources regarding the potential individual and societal impacts related to AI risks. This includes establishing oversight functions across the GAI lifecycle and documenting the risks and potential impacts of the AI technology, which facilitates broader communication about these impacts.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the types of systems considered under the category of equal opportunities in the context of the AI Bill of Rights. It is clear in specifying the topic of interest (equal opportunities systems) and the context (AI Bill of Rights). The intent is clear, seeking information on the classification of systems within a specific framework. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What types of systems are considered under the category of equal opportunities in the context of the AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the environmental impacts associated with the energy consumption of generative AI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does data provenance play in ensuring the ethical governance and documentation of AI models, particularly in relation to human subject protection and bias management?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI system context', 'Harmful bias and homogenization', 'Interdisciplinary AI actors', 'Risk measurement plans', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the initiatives the federal government is implementing to combat discrimination in mortgage lending. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge and clearly seeks information on government actions against discrimination in mortgage lending.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What initiatives is the federal government implementing to combat discrimination in mortgage lending?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What are the roles of human fallback in automated systems and its impact on public access?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of using structured feedback mechanisms in relation to AI-generated content. It is clear in specifying the topic of interest (structured feedback mechanisms) and the context (AI-generated content). The intent is straightforward, seeking an explanation of the purpose or benefits of these mechanisms. The question is self-contained and does not rely on external references or prior knowledge beyond what is provided in the question itself.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the key aspects of designing explainable AI as discussed in a specific panel on Artificial Intelligence and Democratic Values. While it specifies the topic (explainable AI) and the context (a panel discussion), it assumes the reader has access to or knowledge of the content of this specific panel discussion. This reliance on external references makes the question unclear for those who did not attend or have access to the panel's details. To improve clarity and answerability, the question could either provide a brief summary of the panel's main points or reframe to ask about general key aspects of designing explainable AI without relying on the specific panel discussion.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI incidents', 'AI Actors', 'Incident reporting', 'Documentation practices', 'AI risk management'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about concerns related to harmful bias and homogenization within the context of GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (harmful bias and homogenization) and the context (GAI systems), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the concerns associated with harmful bias and homogenization in the context of GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What factors should be assessed to determine the expected and acceptable GAI system context of use?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Fraud detection algorithms assist in the adjudication of benefits and penalties by analyzing information and matching records to support decision-makers.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does documentation play in improving AI risk management across the AI ecosystem?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What impact does the systematic documentation and reporting of GAI incidents have on the evaluation and enhancement of risk management practices among AI Actors within the ecosystem?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about practices that ensure synthetic data aligns with real-world statistics while protecting privacy. It is clear in its intent, seeking specific practices or methods. The question is self-contained and does not rely on external references or unspecified contexts. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Ongoing monitoring', 'Clear organizational oversight', 'High-quality data', 'Governance procedures'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What federal initiatives are being undertaken to address algorithmic biases in mortgage lending practices, particularly concerning communities of color?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What practices ensure synthetic data aligns with real-world stats while protecting privacy?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the importance and role of human fallback systems in automated processes, sharing the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the complications in benefit distribution when automated systems lack transparency and clear explanations. It is specific in its focus on 'benefit distribution' and the conditions of 'automated systems' operating without 'transparency and clear explanations'. The intent is clear, seeking information on the potential issues or challenges that arise under these conditions. The question is self-contained and does not rely on external references or prior knowledge beyond general understanding of automated systems and transparency. Therefore, it meets the criteria for clarity and answerability.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of data provenance in the ethical governance and documentation of AI models, with a focus on human subject protection and bias management. It is specific and clear in its intent, seeking information on how data provenance contributes to these particular aspects of AI ethics. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the factors that contribute to individuals' difficulties in contesting algorithmic decisions affecting their lives. It is clear in its intent, seeking specific information about the challenges faced by individuals in this context. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Equitable design', 'Automated systems', 'Legal protections', 'Proactive equity assessments'] +[ragas.testset.evolutions.INFO] seed question generated: "What should be included in the governance procedures for the development and use of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What implications arise from the interplay of harmful bias and content uniformity in GAI systems, particularly regarding data accuracy and user feedback mechanisms?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the environmental impacts associated with the energy consumption of generative AI systems. It is specific in its focus on environmental impacts and energy consumption, and it is clear in its intent to understand the relationship between these two factors. The question is self-contained and does not rely on external references or additional context to be understood or answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the environmental impacts associated with the energy consumption of generative AI systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of using structured feedback mechanisms in relation to AI-generated content is to solicit and capture user input about the content to detect subtle shifts in quality or alignment with community and societal values.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What issues come up in benefit distribution with opaque automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does data provenance support ethical AI governance, especially for human protection and bias?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks which automated systems impact equal opportunities in education, housing, and employment. It is clear in its intent, seeking information on specific types of automated systems and their effects on equal opportunities in these areas. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What factors contribute to individuals' difficulties in contesting algorithmic decisions affecting their lives?" +[ragas.testset.evolutions.INFO] seed question generated: "What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do legal protections play in addressing algorithmic discrimination?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "Which automated systems impact equal opportunities in education, housing, and employment?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Cyberattacks', 'Intellectual Property', 'Obscene and abusive content', 'CBRN weapons', 'Chemical and biological design tools'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the responsible uses of synthetic data in GAI development, while the second question is concerned with aligning synthetic data with real statistics and ensuring privacy. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of documentation in improving AI risk management across the AI ecosystem. It is clear in specifying the topic of interest (documentation) and the context (AI risk management within the AI ecosystem). The intent is straightforward, seeking an explanation of how documentation contributes to risk management. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role does documentation play in improving AI risk management across the AI ecosystem?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for factors to be assessed in determining the expected and acceptable GAI (General Artificial Intelligence) system context of use. It is clear in its intent, seeking specific factors for assessment. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What factors should be assessed to determine the expected and acceptable GAI system context of use?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the impact of systematic documentation and reporting of GAI (General AI Incidents) on the evaluation and enhancement of risk management practices among AI Actors within the ecosystem. It is clear in specifying the topic of interest (systematic documentation and reporting of GAI incidents) and the context (evaluation and enhancement of risk management practices among AI Actors). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'White House Office of Science and Technology Policy', 'Automated systems', 'Civil rights and democratic values', 'National security and defense activities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about federal initiatives aimed at addressing algorithmic biases in mortgage lending practices, with a particular focus on communities of color. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on government actions in a specific area of concern.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address issues arising from flaws in benefit allocation systems, with a focus on automated systems. They share the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What role do chemical and biological design tools play in augmenting design capabilities in chemistry and biology?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What are the energy-related environmental consequences of generative AI systems, particularly in relation to their potential to perpetuate harmful biases and produce undesirable content?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does documenting GAI incidents affect AI risk management?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the significance of data provenance in AI model documentation and governance, while the second question emphasizes how data provenance supports ethical AI governance, particularly in terms of human protection and bias. These questions have different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically addresses the challenges posed by algorithmic risk assessments in understanding and contesting decisions, while the second question is broader and focuses on the general difficulty of challenging algorithmic decisions. The depth and breadth of the inquiries differ.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What role do civil rights and democratic values play in the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the elements that should be included in governance procedures for the development and use of automated systems. It is clear in its intent, seeking specific information about governance procedures, and does not rely on external references or unspecified contexts. The question is self-contained and understandable, making it answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What should be included in the governance procedures for the development and use of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What federal steps are being taken to tackle algorithmic bias in mortgage lending for communities of color?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of harmful bias and content uniformity in GAI (General Artificial Intelligence) systems, specifically focusing on data accuracy and user feedback mechanisms. It is clear in specifying the aspects of interest (harmful bias, content uniformity, data accuracy, user feedback mechanisms) and seeks detailed information on their interplay. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of legal protections in addressing algorithmic discrimination. It is clear in its intent, seeking information on how legal frameworks can mitigate or address issues related to algorithmic bias. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role do legal protections play in addressing algorithmic discrimination?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to address risks associated with intellectual property infringement in organizational GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (intellectual property infringement risks) and the context (organizational GAI systems). The intent is to seek actionable suggestions, making it specific and understandable without requiring additional context or external references.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the types of systems considered under the category of equal opportunities in the context of the AI Bill of Rights, which is a broader inquiry. The second question specifically asks about automated systems affecting equal opportunities in education, housing, and jobs, which is more specific and narrower in scope.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What interdisciplinary factors and ongoing evaluations should be considered to assess the anticipated and acceptable context of use for GAI systems, particularly in relation to socio-cultural impacts and data integrity?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What are the effects of bias and uniformity in GAI on data accuracy and user feedback?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 3, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Incident Database', 'Generative AI security flaws', 'Large Language Models', 'Ethical Tensions in Human-AI Companionship', 'Disinformation Business of Chinese Influence Operations'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Consider opportunities to responsibly use synthetic data and other privacy enhancing techniques in GAI development, where appropriate and applicable, to match the statistical properties of real-world data without disclosing personally identifiable information or contributing to homogenization.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the impact of incident reporting or documenting GAI incidents on AI risk management, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not explicitly mention how data provenance supports ethical AI governance, particularly regarding human protection and bias.', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Incident Database?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address discrimination in mortgage lending, the first question is broader, encompassing all forms of discrimination, whereas the second question specifically focuses on algorithmic bias affecting communities of color. This leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What governance elements should be integrated to ensure ongoing public safety and effective oversight in the development and deployment of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What measures are essential to ensure that automated systems are designed to prevent algorithmic discrimination while also safeguarding community safety and effectiveness?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of chemical and biological design tools in augmenting design capabilities in chemistry and biology. It is clear in specifying the topic of interest (chemical and biological design tools) and seeks information on their impact on design capabilities within these fields. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role do chemical and biological design tools play in augmenting design capabilities in chemistry and biology?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the energy-related environmental consequences of generative AI systems, with a particular focus on their potential to perpetuate harmful biases and produce undesirable content. While the question is clear in its intent to explore the environmental impact and ethical concerns of generative AI, it conflates two distinct issues: environmental consequences and ethical implications. To improve clarity and answerability, the question could be split into two separate queries: one focusing on the environmental impact of generative AI systems and another on their potential to perpetuate biases and produce undesirable content.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the energy-related environmental consequences of generative AI systems, particularly in relation to their potential to perpetuate harmful biases and produce undesirable content?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'People find it hard to challenge algorithmic decisions because they are often denied the knowledge needed to address the impact of automated systems on their lives. The decision-making processes of these systems tend to be opaque and complex, making it difficult for individuals to ascertain how or why a decision was made. Additionally, the lack of clear and timely explanations can hinder their ability to contest decisions effectively.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between GAI incident documentation and AI risk management effectiveness. While it specifies the two areas of interest (GAI incident documentation and AI risk management effectiveness), it lacks clarity on what 'GAI' stands for, which could be ambiguous for those not familiar with the term. Additionally, the question does not specify what kind of connections it is interested in (e.g., statistical correlations, procedural impacts, case studies). To improve clarity and answerability, the question could define 'GAI' and specify the type of connections it seeks to explore.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What connections exist between GAI incident documentation and AI risk management effectiveness?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The suggested action to address risks associated with intellectual property infringement in organizational GAI systems is to compile statistics on actual policy violations, take-down requests, and intellectual property infringement, and analyze transparency reports across demographic and language groups.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of civil rights and democratic values in the Blueprint for an AI Bill of Rights. It is clear in specifying the topic of interest (civil rights and democratic values) and the document in question (Blueprint for an AI Bill of Rights). The intent is clear, seeking an explanation of the relationship or influence of these values within the specified document. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role do civil rights and democratic values play in the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['CBRN Information', 'Confabulation', 'Dangerous content', 'Data Privacy', 'Harmful Bias'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.evolutions.INFO] seed question generated: "What is confabulation and how does it mislead users?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question seeks to identify interdisciplinary factors and ongoing evaluations necessary to assess the anticipated and acceptable context of use for GAI (General Artificial Intelligence) systems, with a focus on socio-cultural impacts and data integrity. It is clear in its intent, specifying the areas of interest (socio-cultural impacts and data integrity) and the type of information sought (interdisciplinary factors and evaluations). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Incident Database. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific database.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the AI Incident Database?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Safety Institute established by NIST?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Automated systems', 'Bias testing', 'Equitable design', 'Systemic biases'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems that affect equal opportunities in education include algorithms that detect student cheating or plagiarism, admissions algorithms, online or virtual reality student monitoring systems, projections of student progress or outcomes, algorithms that determine access to resources or programs, and surveillance of classes. In housing, tenant screening algorithms, automated valuation systems for mortgage underwriting or home insurance, and automated valuations from online aggregator websites are relevant. For employment, workplace algorithms that inform terms and conditions of employment, hiring or termination algorithms, virtual or augmented reality workplace training programs, and electronic workplace surveillance and management systems are included.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What factors should be considered for assessing GAI systems' socio-cultural impacts and data integrity?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for governance elements that should be integrated to ensure public safety and effective oversight in the development and deployment of automated systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on governance elements related to public safety and oversight in the context of automated systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on concerns related to harmful bias and homogenization in GAI systems, while the second question specifically addresses the effects of bias and uniformity on data accuracy and user feedback. These questions have different scopes and depths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for essential measures to ensure that automated systems are designed to prevent algorithmic discrimination while also safeguarding community safety and effectiveness. It is clear in its intent, specifying the dual goals of preventing discrimination and ensuring safety and effectiveness. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What significance do civil rights and democratic principles hold in the framework designed to guide the ethical deployment of automated systems as outlined in the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The federal government is working to combat discrimination in mortgage lending through initiatives such as the Department of Justice's nationwide initiative to combat redlining. This includes reviewing how lenders may be avoiding serving communities of color and conducting targeted marketing and advertising. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from agencies overseeing mortgage lending to include a nondiscrimination standard in proposed rules for Automated Valuation Models.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures are being taken to ensure equitable design in automated systems to protect against algorithmic discrimination?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What governance aspects are key for public safety in automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps ensure automated systems avoid bias and maintain safety?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated sentiment analyzer', 'Bias against Jews and gay people', 'Search engine results for minority groups', 'Advertisement delivery systems and stereotypes', 'Algorithmic discrimination in healthcare'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about specialized AI systems that enhance design in chemistry and biology beyond traditional methods. It is clear in specifying the domain (chemistry and biology) and the context (enhancing design beyond traditional methods). The intent is to identify specific AI systems and their contributions, making it understandable and answerable without needing additional context or external references.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Risk Management Framework', 'Generative AI', 'Cross-sectoral profile', 'Risk management priorities', 'Large language models'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the energy-related environmental consequences of generative AI systems, with a particular focus on their potential to perpetuate harmful biases and produce undesirable content. While the question is clear in its intent to explore the environmental impact and ethical concerns of generative AI, it conflates two distinct issues: environmental consequences and ethical implications. To improve clarity and answerability, the question could be split into two separate queries: one focusing on the energy-related environmental consequences and another on the ethical concerns related to biases and undesirable content. This would make each question more specific and easier to address independently.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does the AI Incident Database play in addressing the challenges posed by AI in cybersecurity and mental health?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between GAI incident documentation and AI risk management effectiveness. While it specifies the two areas of interest (GAI incident documentation and AI risk management effectiveness), it lacks clarity on what 'GAI' stands for, which could be ambiguous for those not familiar with the term. Additionally, it does not specify the type of connections or the context in which these connections should be evaluated. To improve clarity and answerability, the question could define 'GAI' and specify the type of connections (e.g., causal, correlational) and the context (e.g., within a specific industry or study).", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on determining the context of use for GAI systems, while the second question is concerned with assessing socio-cultural impacts and data integrity. These are different areas of inquiry with distinct requirements and depths.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for an explanation of 'confabulation' and how it misleads users. It is clear in specifying the term of interest (confabulation) and seeks detailed information on both the definition and the impact on users. The question is self-contained and does not rely on external references or prior knowledge beyond understanding the term 'confabulation'. Therefore, it meets the criteria of independence and clear intent.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is confabulation and how does it mislead users?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What specialized AI systems enhance design in chemistry and biology beyond traditional methods?" +[ragas.testset.evolutions.INFO] seed question generated: "What issues does the automated sentiment analyzer address regarding bias in online statements?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Safety Institute established by NIST. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific institute.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the AI Safety Institute established by NIST?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Equal Opportunities and Civil Justice', 'Impact of technology on equity', 'AI systems and access limitations', 'Surveillance concerns', 'Community input in technology design'] +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to ensure information integrity in the deployment of GAI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the comprehensive governance procedures for the development and use of automated systems, while the second question specifically targets governance aspects related to public safety. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the ways in which AI systems are being used to limit access to equal opportunities in education, housing, and employment?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Continuous monitoring of GAI system impacts', 'Harmful bias and homogenization', 'Structured human feedback exercises', 'GAI red-teaming', 'Information integrity'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the measures being taken to ensure equitable design in automated systems to protect against algorithmic discrimination. It is clear in its intent, specifying the focus on 'equitable design' and 'algorithmic discrimination'. The question is independent and does not rely on external references or prior knowledge not included within the question itself. Therefore, it is understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What measures are being taken to ensure equitable design in automated systems to protect against algorithmic discrimination?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the role of legal protections in addressing algorithmic discrimination, while the second question is broader, asking about steps to avoid bias and maintain safety in automated systems. They differ in both depth and breadth of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of civil rights and democratic principles within the framework for the ethical deployment of automated systems as outlined in the AI Bill of Rights. It is clear in specifying the topic of interest (civil rights and democratic principles) and the context (AI Bill of Rights). The intent is also clear, seeking an explanation of the importance of these principles within the specified framework. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The effects of bias and uniformity in GAI on data accuracy and user feedback are related to harmful bias and homogenization, which can compromise the representativeness and relevance of data used in AI systems. This can lead to inaccuracies in the information generated and may affect the quality of user feedback, as it may not accurately reflect diverse perspectives or experiences.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the role of chemical and biological design tools in augmenting design capabilities, while the second question specifically asks about AI systems that improve design in chemistry and biology. The scope and depth of the inquiries differ.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of the AI Incident Database in addressing challenges related to AI in cybersecurity and mental health. It is clear in specifying the database and the two areas of interest (cybersecurity and mental health), making the intent understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What erroneous content generation, often termed confabulation, can lead to user deception, particularly in the context of accessing sensitive information or capabilities related to CBRN weapons?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do civil rights and democracy fit into the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What objectives does the U.S. AI Safety Institute aim to achieve in relation to the standards and frameworks for managing AI risks as outlined by NIST?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of structured human feedback exercises in the context of GAI risk measurement and management?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues that an automated sentiment analyzer addresses concerning bias in online statements. It is clear in specifying the topic of interest (automated sentiment analyzer) and the specific aspect (bias in online statements). The intent is clear, and the question is self-contained, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure information integrity in the deployment of GAI (General Artificial Intelligence) systems. It is clear in its intent, seeking specific measures or strategies related to information integrity. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Factors to consider for assessing GAI systems' socio-cultural impacts include assumptions and limitations, direct value to the organization, intended operational environment, observed usage patterns, potential positive and negative impacts to individuals and communities, and social norms and expectations. For data integrity, factors include evaluating the quality and integrity of data used in training, the provenance of AI-generated content, and ensuring that data or benchmarks used in risk measurement are representative of diverse in-context user populations.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does the AI Incident Database help with AI challenges in cybersecurity and mental health?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Key governance aspects for public safety in automated systems include laying out clear governance structures and procedures, establishing responsibility for oversight, involving organizational stakeholders in governance procedures, and ensuring that those in charge are aware of potential impacts on people's rights and opportunities. Additionally, it may be appropriate to conduct an independent ethics review before deployment.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Executive Order 13960', 'Trustworthy Artificial Intelligence', 'AI Bill of Rights', 'NIST AI Risk Management Framework', 'Stakeholder engagement'] +[ragas.testset.evolutions.INFO] seed question generated: "What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive strategies are being implemented to ensure that automated systems are designed and utilized in a manner that prevents unjust treatment based on protected characteristics?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for ways in which AI systems are being used to limit access to equal opportunities in education, housing, and employment. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on the negative impacts of AI systems in these specific areas.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the ways in which AI systems are being used to limit access to equal opportunities in education, housing, and employment?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the cross-sectoral profile within the context of the AI Risk Management Framework for Generative AI. It is clear in specifying the topic of interest (cross-sectoral profile) and the context (AI Risk Management Framework for Generative AI), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it specific and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the role of civil rights and democratic values in the context of the AI Bill of Rights, requiring similar depth and breadth of explanation.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What principles are required for the design and use of trustworthy artificial intelligence in the federal government?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human subject protection', 'Content provenance', 'Data privacy', 'AI system performance', 'Anonymization techniques'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Chemical and biological design tools (BDTs) are highly specialized AI systems trained on scientific data that aid in chemical and biological design, potentially improving design capabilities beyond what text-based LLMs can provide.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To ensure automated systems avoid bias and maintain safety, designers, developers, and deployers should take proactive and continuous measures, including conducting proactive equity assessments as part of system design, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Additionally, independent evaluation and reporting should confirm that the system is safe and effective, including steps taken to mitigate potential harms.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for the general purpose of the AI Incident Database, while the second question specifically inquires about its role in addressing AI challenges in cybersecurity and mental health, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of structured human feedback exercises specifically in the context of GAI (General Artificial Intelligence) risk measurement and management. It is clear in specifying the topic of interest (structured human feedback exercises) and the context (GAI risk measurement and management), making the intent clear and the question self-contained. No additional context or external references are needed to understand or answer the question.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of structured human feedback exercises in the context of GAI risk measurement and management?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to protect data privacy in evaluations involving human subjects?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about erroneous content generation, specifically confabulation, and its potential to deceive users in the context of accessing sensitive information or capabilities related to CBRN (Chemical, Biological, Radiological, and Nuclear) weapons. The intent is clear, seeking to understand the risks and implications of confabulation in this specific context. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the objectives of the U.S. AI Safety Institute in relation to the standards and frameworks for managing AI risks as outlined by NIST. It is specific in mentioning the U.S. AI Safety Institute and NIST, and it clearly seeks information about the objectives related to AI risk management standards and frameworks. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Monitoring system capabilities', 'GAI content interaction', 'Content provenance', 'User feedback integration', 'AI incident tracking'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "In what ways do AI technologies contribute to the reinforcement of inequities in access to education, housing, and employment, while also potentially exacerbating burdens on individuals interacting with social welfare systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Suggested measures to ensure information integrity in the deployment of GAI systems include verifying GAI system training data and TEVV data provenance, and ensuring that fine-tuning or retrieval-augmented generation data is grounded. Additionally, it is recommended to review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The automated sentiment analyzer addresses bias in online statements by identifying that it was found to be biased against Jews and gay people. For instance, it marked the statement 'Iā€™m a Jew' as negative while identifying 'Iā€™m a Christian' as positive. This bias could lead to the preemptive blocking of social media comments such as 'Iā€™m gay.'", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Synthetic training data', 'Model collapse', 'Environmental impact', 'GAI systems', 'Carbon capture programs'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to address risks associated with intellectual property infringement in organizational GAI (General Artificial Intelligence) systems. It is specific in its focus on intellectual property infringement and organizational GAI systems, and it clearly seeks actionable recommendations. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What confabulation might mislead users about CBRN info or capabilities?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI is to assist organizations in deciding how to best manage AI risks in a manner that aligns with their goals, considers legal/regulatory requirements and best practices, and reflects risk management priorities. It offers insights into how risk can be managed across various stages of the AI lifecycle and for Generative AI as a technology.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does user feedback integration play in enhancing the monitoring process for GAI models?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about proactive strategies to ensure automated systems are designed and used to prevent unjust treatment based on protected characteristics. It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. The intent is clear, seeking information on strategies and measures in place for ethical design and use of automated systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What are the U.S. AI Safety Institute's goals for NIST's AI risk standards?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of ballot curing laws in the voting process?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of assessing the proportion of synthetic to non-synthetic training data in AI model development?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the principles required for the design and use of trustworthy artificial intelligence in the federal government. It is clear in specifying the topic of interest (trustworthy artificial intelligence) and the context (federal government). The intent is straightforward, seeking information on the principles that should be followed. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps are taken to ensure fair use of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI models', 'Synthetic NCII and CSAM', 'Trustworthy AI Characteristics', 'Value Chain and Component Integration', 'GAI risks management'] +[ragas.testset.evolutions.INFO] seed question generated: "What are some of the potential harms associated with automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to protect data privacy in evaluations involving human subjects. It is clear in its intent, specifying the context (evaluations involving human subjects) and the type of information sought (measures to protect data privacy). The question is independent and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What combined strategies are recommended for mitigating intellectual property risks in GAI systems while ensuring transparency and accountability in their deployment?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about confabulation in general and its misleading effects, while the second question specifically focuses on how confabulation might mislead users about CBRN (Chemical, Biological, Radiological, and Nuclear) information or capabilities. The scope and context differ significantly.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What role do civil rights play in the context of automated systems and technology according to the foreword?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the challenges associated with value chain and component integration in GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Data privacy', 'Privacy by design', 'Data collection limitations', 'Risk identification and mitigation'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking for specific ways AI technologies contribute to reinforcing inequities in access to education, housing, and employment, and how they might exacerbate burdens on individuals interacting with social welfare systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The question is specific and seeks detailed information on the negative impacts of AI technologies in these areas.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks about the purpose of the AI Safety Institute established by NIST, while the second question inquires about the goals of the U.S. AI Safety Institute specifically related to NIST's AI risk standards. Although related, the questions have different focuses and requirements.", 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically addresses equitable design and protection against algorithmic discrimination, while the second question broadly addresses fair use without specifying discrimination. This leads to different depths and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the roles of structured feedback and red-teaming in General Artificial Intelligence (GAI) risk management. It is clear in specifying the two elements of interest (structured feedback and red-teaming) and the context (GAI risk management). The intent is to understand the contributions of these elements to managing risks associated with GAI. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of ballot curing laws in the voting process. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks an explanation of the purpose of these laws.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How might AI tech reinforce inequities in education, housing, and jobs, and add burdens on those using social welfare?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of user feedback integration in enhancing the monitoring process for GAI (General Artificial Intelligence) models. It is clear in specifying the topic of interest (user feedback integration) and the context (monitoring process for GAI models). The intent is to understand the impact or contribution of user feedback on the monitoring process. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does user feedback integration play in enhancing the monitoring process for GAI models?" +[ragas.testset.evolutions.INFO] seed question generated: "What should entities do to proactively identify and manage risks associated with collecting, using, sharing, or storing sensitive data?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology in social welfare', 'Fraud detection', 'Digital ID systems', 'Healthcare access and delivery', 'Health disparities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of assessing the proportion of synthetic to non-synthetic training data in AI model development. It is clear in specifying the topic of interest (proportion of synthetic to non-synthetic training data) and seeks information on its importance in the context of AI model development. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What roles do structured feedback and red-teaming play in GAI risk mgmt?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The principles required for the design and use of trustworthy artificial intelligence in the federal government include: (a) lawful and respectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) safe, secure, and resilient; (e) understandable; (f) responsible and traceable; (g) regularly monitored; (h) transparent; and (i) accountable.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about potential harms associated with automated systems. It is clear in its intent, seeking information on the negative impacts or risks of automated systems. The question is independent and does not rely on external references or specific prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What concerns were raised by panelists regarding healthcare access and delivery in relation to new technologies?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Suggested measures to protect data privacy in evaluations involving human subjects include: anonymizing data to protect the privacy of human subjects, leveraging privacy output filters, removing any personally identifiable information (PII) to prevent potential harm or misuse, and providing human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Confabulation in the context of CBRN information or capabilities refers to the production of confidently stated but erroneous or false content that may mislead or deceive users regarding the access to or synthesis of nefarious information or design capabilities related to CBRN weapons or other dangerous materials.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for combined strategies to mitigate intellectual property risks in GAI (General Artificial Intelligence) systems while ensuring transparency and accountability in their deployment. It is clear in specifying the topic of interest (intellectual property risks, GAI systems) and the desired outcome (strategies for mitigation, transparency, and accountability). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the impact of AI on equity in education, housing, and employment, but the second question also includes social welfare, adding a broader scope.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the challenges associated with value chain and component integration in GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (value chain and component integration) and the context (GAI systems). The intent is to understand the difficulties or obstacles in these areas. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "How does the integration of user feedback into GAI monitoring enhance the effectiveness of provenance tracking and risk management strategies?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'User consent', 'Automated systems', 'Surveillance technologies', 'Sensitive domains'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of civil rights in the context of automated systems and technology, specifically according to 'the foreword'. While it is clear in specifying the topic of interest (civil rights, automated systems, technology) and the source of information (the foreword), it assumes access to and understanding of 'the foreword' without providing its content or context. This makes the question unclear for those without direct access to the foreword. To improve clarity and answerability, the question could include a brief description or key points from the foreword, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What role do civil rights play in the context of automated systems and technology according to the foreword?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the role of structured feedback in the context of GAI risk management, though the second question also includes red-teaming. However, the depth and breadth of the inquiry are similar as both focus on risk management strategies.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies help manage IP risks in GAI while ensuring transparency?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['TEVV metrics', 'Measurement error models', 'GAI system risks', 'Feedback processes', 'Harmful bias and homogenization'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The importance of assessing the proportion of synthetic to non-synthetic training data in AI model development is to verify that the training data is not overly homogenous or generated by Generative AI (GAI), which helps mitigate concerns of model collapse.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does user consent play in the collection and use of personal data according to data privacy guidelines?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data protection', 'Third-party considerations', 'Risk management', 'Pre-deployment testing', 'GAI systems'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Ballot curing laws are designed to allow voters to correct their ballot and have it counted in cases where a voter signature matching algorithm incorrectly flags their ballot as invalid or when there are other issues with their ballot. These laws ensure that voters have a fallback system to verify the validity of their ballot, which may include direct contact from election officials.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Identity theft', 'Facial recognition system', 'Surveillance software', 'Employee discussions about union activity'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about concerns raised by panelists regarding healthcare access and delivery in relation to new technologies. It is clear in specifying the topic of interest (concerns, healthcare access and delivery, new technologies) and seeks detailed information on the concerns raised. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What concerns were raised by panelists regarding healthcare access and delivery in relation to new technologies?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to changes or prevented harmful product launches. Federal agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have developed best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of pre-deployment testing in the AI lifecycle?" +[ragas.testset.evolutions.INFO] seed question generated: "What concerns arise from companies using surveillance software to track employee discussions about union activity?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Automated systems', 'Human fallback', 'Critical protections', 'Voting process'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Some potential harms associated with automated systems include: reliance on unproven technologies that may not work as intended, causing substantial and unjustified harm; the use of historical data that can lead to irrelevant information affecting decision-making; technologies designed to violate safety, such as those facilitating stalking; unintended harms from intended or unintended uses; and issues like alert fatigue from false alerts, as seen in a sepsis prediction model. Additionally, automated moderation systems may fail to distinguish between counter-speech and hateful messages, silencing critics.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address intellectual property risks in GAI systems, the first question focuses on suggested actions, whereas the second question emphasizes strategies that also ensure transparency. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking what actions entities should take to proactively identify and manage risks related to the collection, use, sharing, or storage of sensitive data. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking practical steps or strategies for risk management in the context of sensitive data handling.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the impact of integrating user feedback into GAI (General Artificial Intelligence) monitoring on the effectiveness of provenance tracking and risk management strategies. It is clear in specifying the elements of interest (user feedback, GAI monitoring, provenance tracking, risk management) and seeks to understand the enhancement in effectiveness. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does human oversight play in the voting process, particularly regarding automated signature matching systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What efforts is NIST making to ensure the development of safe and trustworthy AI?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI technology can reinforce inequities in education, housing, and jobs by being used to limit access to equal opportunities, such as through automated tenant background screening, discrimination in automated hiring screening, and remote proctoring systems. Additionally, these technologies can shift the burden of oversight from employers to workers, schools to students, and landlords to tenants, which diminishes equality of opportunity. In the context of social welfare, AI systems may reduce the burden for government agencies but increase the burden on individuals interacting with these technologies, potentially creating feedback loops that reinforce inequality.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Ethical use of AI systems', 'Department of Energy AI Advancement Council', 'Artificial Intelligence Ethical Principles', 'National Science Foundation research', 'Pretrial risk assessments transparency'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Challenges associated with value chain and component integration in GAI systems include the improper acquisition or vetting of third-party components such as datasets, pre-trained models, and software libraries, which can lead to diminished transparency and accountability. The scale of training data may be too large for humans to vet, and the difficulty of training foundation models can result in extensive reuse of a limited number of models. Additionally, it may be difficult to attribute issues in a system's behavior to any one of these sources, and errors in third-party GAI components can have downstream impacts on accuracy and robustness.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does user feedback improve GAI monitoring for tracking and risk management?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of user consent in the collection and use of personal data according to data privacy guidelines. It is clear in specifying the topic of interest (user consent, personal data, data privacy guidelines) and seeks detailed information on the role of user consent. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does user consent play in the collection and use of personal data according to data privacy guidelines?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What issues did panelists identify regarding the intersection of new technologies and healthcare access, particularly in terms of equity and community involvement?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of civil rights in the context of automated systems and technology, specifically according to 'the foreword'. While it is clear in specifying the topic of interest (civil rights, automated systems, technology) and the source of information (the foreword), it assumes access to and understanding of 'the foreword' without providing its content or context. This makes the question unclear for those without direct access to the foreword. To improve clarity and answerability, the question could include a brief description or key points from the foreword, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Civil rights and liberties', 'Equal opportunities', 'Access to critical resources'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Sensitive data', 'Ethical review', 'Data quality', 'Access limitations'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of establishing feedback processes for end users and impacted communities in the context of AI system evaluation metrics. It is clear in specifying the topic of interest (feedback processes, end users, impacted communities, AI system evaluation metrics) and seeks an explanation of the rationale behind these processes. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics?" +[ragas.testset.evolutions.INFO] seed question generated: "What types of research does the National Science Foundation support to ensure the safety and effectiveness of automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not provide specific strategies for managing IP risks in GAI while ensuring transparency.', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the concerns related to companies using surveillance software to monitor employee discussions about union activity. It does not rely on external references or prior knowledge and has a clear intent, seeking information on the potential issues or problems that may arise from such practices.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of pre-deployment testing in the AI lifecycle. It is specific and clear in its intent, seeking information on the role and significance of pre-deployment testing within the broader context of AI development and deployment. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What criteria does the framework use to determine which automated systems are in scope for the AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for handling sensitive data in automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Entities that collect, use, share, or store sensitive data should attempt to proactively identify harms and seek to manage them to avoid, mitigate, and respond appropriately to identified risks. Appropriate responses include determining not to process data when the privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation', 'GAI systems', 'Digital content transparency', 'Harmful bias', 'Content provenance', 'AI system trustworthiness'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the role of user feedback in improving the monitoring process for GAI models, specifically in the context of tracking and risk management. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Biometric Information Privacy Act', 'Transparency for machine learning systems', 'Adverse action notices', 'Explainable AI systems', 'California warehouse employee quotas'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of human oversight in the voting process, specifically in relation to automated signature matching systems. It is clear in its intent, seeking information on the interaction between human oversight and automated systems within a specific context (voting process). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role does human oversight play in the voting process, particularly regarding automated signature matching systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What significance does user consent hold in the context of safeguarding personal data against abusive practices and ensuring ethical use in sensitive domains?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology in social welfare', 'Fraud detection', 'Digital ID systems', 'Healthcare access and delivery', 'Health disparities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the efforts NIST (National Institute of Standards and Technology) is making to ensure the development of safe and trustworthy AI. It is specific, independent, and has a clear intent, seeking information on NIST's initiatives or actions in this area. The question does not rely on external references or prior knowledge beyond the general understanding of NIST and AI, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the suggested actions to improve Human-AI configuration in GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are adverse action notices and what requirements do lenders have regarding them?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Executive Order 13960', 'Trustworthy Artificial Intelligence', 'AI Bill of Rights', 'NIST AI Risk Management Framework', 'Stakeholder engagement'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.evolutions.INFO] seed question generated: "What role do digital ID systems play in improving efficiency and reducing costs in social welfare?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues identified by panelists concerning the intersection of new technologies and healthcare access, with a focus on equity and community involvement. It is specific in its scope (new technologies and healthcare access) and clearly states the aspects of interest (equity and community involvement). The intent is clear, seeking information on the issues identified by panelists. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Concerns arise from companies using surveillance software to track employee discussions about union activity, as it leads to the surveillance of individual employees and allows companies to surreptitiously intervene in discussions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question is clear and specific, asking about the types of research supported by the National Science Foundation (NSF) to ensure the safety and effectiveness of automated systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on NSF-supported research areas related to automated systems' safety and effectiveness.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What types of research does the National Science Foundation support to ensure the safety and effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI incidents', 'AI Actors', 'Incident reporting', 'Documentation practices', 'AI risk management'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the NIST AI Risk Management Framework?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for handling sensitive data in automated systems. It is clear in its intent, seeking information on best practices or standards for managing sensitive data within such systems. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the expectations for handling sensitive data in automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What challenges did panelists see at the tech-health equity intersection?" +[ragas.testset.evolutions.INFO] seed question generated: "What efforts is NIST making to ensure the safe and trustworthy development of artificial intelligence?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the criteria used by a framework to determine which automated systems fall under the scope of the AI Bill of Rights. It is clear in its intent, seeking specific information about the criteria used for inclusion. The question is independent and does not rely on external references or prior knowledge beyond what is stated. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The importance of pre-deployment testing in the AI lifecycle lies in its ability to complicate risk mapping and pre-deployment measurement efforts due to the diverse ways and contexts in which GAI systems may be developed, used, and repurposed. Robust test, evaluation, validation, and verification (TEVV) processes can be iteratively applied and documented in the early stages of the AI lifecycle, ensuring that the systems are properly assessed before deployment.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of documenting and reporting GAI incidents for AI Actors?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the roles of user feedback and community input in assessing AI system risks. It is clear in specifying the elements of interest (user feedback, community input) and the context (assessing AI system risks). The intent is straightforward, seeking an explanation of the contributions of these factors to risk assessment in AI systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What roles do user feedback and community input play in assessing AI system risks?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of user consent in safeguarding personal data against abusive practices and ensuring ethical use in sensitive domains. It is clear in specifying the topic of interest (user consent) and the context (safeguarding personal data, ethical use in sensitive domains). The intent is to understand the role and importance of user consent in these areas. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What NSF-funded research initiatives align with federal principles for ensuring the ethical deployment and effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for an explanation of adverse action notices and the requirements lenders have regarding them. It is clear in specifying the topic of interest (adverse action notices) and seeks detailed information on both the definition and the regulatory requirements for lenders. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are adverse action notices and what requirements do lenders have regarding them?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'NIST is making efforts to ensure the development of safe and trustworthy AI by developing measurements, technology, tools, and standards that advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence. They have established the U.S. AI Safety Institute and the AI Safety Institute Consortium to build the necessary science for safe, secure, and trustworthy development and use of AI, in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to improve Human-AI configuration in GAI (General Artificial Intelligence) systems. It is clear in its intent, seeking specific actions or recommendations for improvement. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. However, it could benefit from specifying the aspects of Human-AI configuration it is interested in (e.g., collaboration, decision-making, user interface) to narrow down the scope and provide more targeted answers.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the suggested actions to improve Human-AI configuration in GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['CBRN Information', 'Confabulation', 'Dangerous content', 'Data Privacy', 'Harmful Bias'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on concerns related to healthcare access and delivery with new technologies, while the second question is broader, addressing challenges at the intersection of technology and health equity. These questions differ in both depth and breadth.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the safeguards that ensure human oversight in the process of automated signature matching during voting. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information about the mechanisms or procedures in place to maintain human oversight in this automated process.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of digital ID systems in improving efficiency and reducing costs in social welfare. It is clear in specifying the topic of interest (digital ID systems) and the context (social welfare), and it seeks specific information on their impact on efficiency and cost reduction. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role do digital ID systems play in improving efficiency and reducing costs in social welfare?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "Why is user consent important for protecting personal data?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the NIST AI Risk Management Framework. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific framework.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What safeguards ensure human oversight in automated signature matching during voting?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the impacts of data privacy related to the unauthorized use or disclosure of sensitive information?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the efforts made by NIST to ensure the safe and trustworthy development of artificial intelligence. It is specific in identifying the organization (NIST) and the topic of interest (safe and trustworthy development of AI). The intent is clear, seeking information on the actions or initiatives undertaken by NIST in this area. The question is independent and does not rely on external references or prior knowledge beyond what is provided in the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What efforts is NIST making to ensure the safe and trustworthy development of artificial intelligence?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The framework uses a two-part test to determine which automated systems are in scope for the AI Bill of Rights: (1) automated systems that (2) have the potential to meaningfully impact the American publicā€™s rights, opportunities, or access to critical resources or services.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI considerations', 'Governance principles', 'Generative AI risks', 'Organizational governance', 'AI value chain'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of establishing feedback processes, while the second question is about how feedback and input assess AI risks. They have different focuses and depths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what automated systems must ensure regarding consent and ethical review for sensitive data. It is clear in its intent, seeking specific information about the requirements or standards for automated systems in the context of handling sensitive data. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does the 2023 Executive Order on Safe AI play in NIST's efforts to develop trustworthy artificial intelligence?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of documenting and reporting GAI (General Artificial Intelligence) incidents for AI Actors. It is clear in specifying the topic of interest (GAI incidents) and the target audience (AI Actors). The intent is to understand the significance of these actions, which is straightforward and unambiguous. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the importance of documenting and reporting GAI incidents for AI Actors?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What legal obligations do lenders have to inform consumers about adverse actions taken based on automated decision-making systems, and how does this relate to the broader need for transparency in algorithmic processes affecting individual rights?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the specific role of user consent in the context of data privacy guidelines, while the second question asks about the importance of user consent in protecting personal data. The depth and breadth of the inquiries differ.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What considerations are important for governing across the AI value chain in the context of generative AI?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What must automated systems ensure regarding consent and ethical review for sensitive data?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for NSF-funded research initiatives that align with federal principles for ensuring the ethical deployment and effectiveness of automated systems. It is clear in specifying the type of research (NSF-funded) and the criteria for alignment (federal principles for ethical deployment and effectiveness of automated systems). The intent is clear, and the question is specific and independent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about the role of human oversight in the context of automated signature matching systems, while the second question broadly asks about ensuring human oversight in automated voting signatures without specifying the context. This leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Regulatory safety requirements', 'Civil rights and civil liberties', 'Technical standards and practices', 'Fair Information Practice Principles'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "Which NSF projects align with federal ethics for automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What initiatives is NIST undertaking to promote the secure and reliable advancement of AI, particularly in relation to the frameworks and standards outlined in their recent publications?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Panelists discussed several challenges at the tech-health equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense associated with health monitoring devices, which can exacerbate equity issues. They also highlighted the need for accountability in the technologies used in medical care, particularly regarding racial biases and the use of race in medicine, which perpetuate harms and embed prior discrimination.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the NIST AI Risk Management Framework is to help manage risks posed to individuals, organizations, and society by AI. It aims to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does the documentation and reporting of GAI incidents play in enhancing the evaluation and management of AI system performance by AI Actors?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the regulatory safety requirements for medical devices in relation to the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the impacts of data privacy related to the unauthorized use or disclosure of sensitive information. It is clear in specifying the topic of interest (data privacy impacts) and the context (unauthorized use or disclosure of sensitive information). The intent is straightforward, seeking information on the consequences or effects of such privacy breaches. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the impacts of data privacy related to the unauthorized use or disclosure of sensitive information?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the connections between digital ID systems, efficiency in welfare, and potential community burdens. It is clear in specifying the three elements of interest (digital ID systems, welfare efficiency, and community burdens) and seeks to understand their interrelationships. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the expectations for handling sensitive data, while the second question emphasizes considerations for consent and ethics. These are related but distinct aspects, leading to different depths and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about actions that can enhance GAI (General Artificial Intelligence) systems' Human-AI configuration while ensuring information integrity and security. The intent is clear in seeking specific actions or strategies. However, the term 'Human-AI config' is somewhat ambiguous and could benefit from clarification. Additionally, the question could be more specific about what aspects of information integrity and security are of interest (e.g., data privacy, system robustness). To improve clarity, the question could specify what is meant by 'Human-AI config' and detail the particular concerns regarding information integrity and security.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What actions enhance GAI systems' Human-AI config while ensuring info integrity and security?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'User feedback and community input assess AI risks through established feedback processes that allow end users and impacted communities to report problems and appeal system outcomes. These processes are integrated into AI system evaluation metrics, which include conducting impact assessments on how AI-generated content might affect different social, economic, and cultural groups, as well as understanding user perceptions and interactions with GAI content.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of the 2023 Executive Order on Safe AI in NIST's efforts to develop trustworthy artificial intelligence. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information on the impact of a specific executive order on NIST's AI development efforts.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between digital ID systems, efficiency in welfare, and potential community burdens?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Sensitive domains', 'Predictive analytics', 'Student data collection', 'Employee data transfer'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the safety and effectiveness of automated systems, while the second question is concerned with the alignment of NSF projects with federal ethics. These are different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the legal obligations of lenders to inform consumers about adverse actions taken based on automated decision-making systems and how this relates to the broader need for transparency in algorithmic processes affecting individual rights. It is clear in specifying the topic of interest (legal obligations, automated decision-making systems, transparency) and seeks detailed information on both the legal requirements and the broader implications for transparency. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'User consent is important for protecting personal data because it ensures that data collection is justified only in cases where consent can be appropriately and meaningfully given. Consent requests should be brief, understandable in plain language, and provide individuals with agency over their data collection and its specific context of use. This approach helps to prevent abusive data practices and ensures that individuals have control over how their data is used.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about important considerations for governing across the AI value chain specifically in the context of generative AI. It is clear in specifying the topic of interest (governance, AI value chain, generative AI) and seeks detailed information on governance considerations. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'A human curing process helps voters confirm their signatures and correct other voting mistakes, ensuring that all votes are counted. This process is already standard practice in much of the country, allowing both an election official and the voter to review and correct any issues.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Artificial Intelligence and Democratic Values', 'Non-discriminatory technology', 'Explainable AI', 'Community participation', 'Social welfare systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the concerns associated with student data collection in educational settings?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What must lenders disclose to consumers about adverse actions from automated decisions, and how does this tie into the need for transparency in algorithms affecting rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does community participation play in the design of technology for democratic values?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the initiatives NIST is undertaking to promote secure and reliable AI advancement, specifically in relation to frameworks and standards mentioned in their recent publications. It is clear in specifying the organization (NIST) and the focus (secure and reliable AI), and it seeks information on initiatives and related frameworks and standards. However, the question assumes familiarity with 'recent publications' without specifying which publications or providing context about them. To improve clarity and answerability, the question could specify the titles or key points of the recent publications or provide a brief description of the relevant frameworks and standards.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What initiatives is NIST undertaking to promote the secure and reliable advancement of AI, particularly in relation to the frameworks and standards outlined in their recent publications?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Safety Institute established by NIST?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses specifically on the role of digital ID systems in improving efficiency and reducing costs in social welfare, while the second question is broader, asking about the links between digital IDs, welfare efficiency, and community impacts. This difference in scope and focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of documentation and reporting of GAI (General AI) incidents in enhancing the evaluation and management of AI system performance by AI Actors. It is clear in specifying the topic of interest (documentation and reporting of GAI incidents) and the context (evaluation and management of AI system performance by AI Actors). The intent is clear, seeking to understand the impact of these practices on AI system performance management. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the relationship between specific sensitive data leaks and their corresponding privacy impacts. While the intent is clear in seeking information on the consequences of data leaks, the question is somewhat broad and could benefit from more specificity. For example, it could specify types of sensitive data (e.g., financial, medical) or particular privacy impacts (e.g., identity theft, financial loss). This would make the question more focused and easier to answer comprehensively.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The 2023 Executive Order on Safe, Secure, and Trustworthy AI plays a significant role in NIST's efforts by guiding the establishment of the U.S. AI Safety Institute and the AI Safety Institute Consortium, which are aimed at building the necessary science for the safe, secure, and trustworthy development and use of AI.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are some real-life examples of how human alternatives can be implemented in various sectors?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the regulatory safety requirements for medical devices in relation to the AI Bill of Rights. It is clear in specifying the topic of interest (regulatory safety requirements for medical devices) and the context (AI Bill of Rights). However, the AI Bill of Rights is a broad and potentially ambiguous term that could refer to different documents or initiatives depending on the jurisdiction or context. To improve clarity and answerability, the question could specify which AI Bill of Rights it refers to (e.g., a specific country's legislation or a particular organization's guidelines).", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the regulatory safety requirements for medical devices in relation to the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about actions that can enhance GAI (General Artificial Intelligence) systems' Human-AI configuration while ensuring information integrity and security. The intent is clear in seeking specific actions or strategies. However, the term 'Human-AI config' is somewhat ambiguous and could benefit from clarification. Additionally, the question could be more specific about what aspects of information integrity and security are of interest (e.g., data privacy, system robustness). To improve clarity, the question could specify what is meant by 'Human-AI config' and detail the particular concerns regarding information integrity and security.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems should consider that consent for sensitive data may need to be acquired from a guardian and/or child, and that consent for non-necessary functions should be optional. Additionally, any use of sensitive data or decision processes based on sensitive data that might limit rights, opportunities, or access should undergo a thorough ethical review and monitoring. This includes ensuring that data quality is maintained to avoid adverse consequences from flawed data, limiting access to sensitive data based on necessity, and providing regular public reports on data security lapses and ethical pre-reviews.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on adverse action notices and the requirements for lenders, while the second question delves into disclosures about adverse actions from automated decisions and the broader context of algorithmic transparency. The second question has a broader scope and depth.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles, including the National AI Research Institutes, the Cyber Physical Systems program, the Secure and Trustworthy Cyberspace program, the Formal Methods in the Field program, and the Designing Accountable Software Systems program.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What sensitive data leaks lead to which specific privacy impacts?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do GAI incident docs help AI Actors assess and manage system performance?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the concerns associated with student data collection in educational settings. It is clear in its intent, seeking information on potential issues or risks related to this practice. The question is specific and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the concerns associated with student data collection in educational settings?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of community participation in the design of technology for democratic values. It is clear in specifying the topic of interest (community participation, technology design, democratic values) and seeks information on the relationship between these elements. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The important considerations for governing across the AI value chain in the context of generative AI include organizational governance, oversight levels, human-AI configurations, human review, tracking and documentation, and management oversight. Additionally, governance tools and protocols that apply to other types of AI systems can also be applied to generative AI systems, including accessibility, AI actor credentials, alignment to organizational values, auditing, change-management controls, commercial use, and data provenance.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Prompt injection', 'Indirect prompt injection attacks', 'Data poisoning', 'Intellectual property risks', 'Obscene and degrading content'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Safety Institute established by NIST. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific institute.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Incident response plans', 'Third-party GAI technologies', 'Data privacy', 'Continuous monitoring', 'Vendor contracts'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the importance of documenting and reporting GAI incidents, while the second question is about how these documents help in assessing and managing system performance. They have different focuses and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the impacts of data privacy related to unauthorized use or disclosure of sensitive information, which is broader and more detailed. The second question specifically asks about data leaks causing privacy issues, which is narrower in scope.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the initiatives NIST is undertaking to promote secure and reliable AI advancement, specifically in relation to frameworks and standards mentioned in their recent publications. It is clear in specifying the organization (NIST) and the focus (secure and reliable AI advancement), and it seeks information on initiatives, frameworks, and standards. However, it assumes familiarity with NIST's recent publications without providing specific details or context about these documents. To improve clarity and answerability, the question could benefit from mentioning specific frameworks or standards or providing a brief description of the recent publications in question.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are indirect prompt injection attacks and how do they exploit vulnerabilities in GAI-integrated applications?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data Privacy', 'Privacy Act of 1974', 'NIST Privacy Framework', 'Biometric identifying technology', 'Workplace surveillance'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the key considerations for establishing incident response plans for third-party GAI technologies?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how human alternatives can be implemented in various sectors. It is clear in its intent, seeking specific examples across different sectors. However, the term 'human alternatives' is somewhat vague and could benefit from clarification. Does it refer to automation, AI, robotics, or other forms of technology replacing human roles? To improve clarity and answerability, the question could specify what is meant by 'human alternatives' and possibly provide examples of sectors of interest.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some real-life examples of how human alternatives can be implemented in various sectors?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the regulatory safety requirements for medical devices in relation to the AI Bill of Rights. It is clear in specifying the topic of interest (regulatory safety requirements for medical devices) and the context (AI Bill of Rights). However, the AI Bill of Rights is a broad and potentially ambiguous term that could refer to different documents or initiatives depending on the jurisdiction or context. To improve clarity and answerability, the question could specify which AI Bill of Rights it refers to (e.g., a specific country's legislation or a particular organization's guidelines).", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Panelists described the increasing scope of technology use in providing for social welfare, including digital ID systems, which are focused on improving efficiency and reducing cost. However, they cautioned that these systems may reduce the burden for government agencies by increasing the burden and agency of people using and interacting with these technologies. Additionally, these systems can produce feedback loops and compounded harm, collecting data from communities and using it to reinforce inequality. To mitigate these harms, it was suggested that community input should be ensured at the beginning of the design process, and there should be ways to opt out of these systems and use associated human-driven mechanisms instead.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the requirements for employers regarding workplace surveillance during a labor dispute?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Lenders are required by federal law to notify consumers about certain decisions made about them, specifically through "adverse action" notices. This includes providing notice of the reasons a creditor took adverse action on a credit application or existing credit account. This requirement ties into the need for transparency in algorithms affecting rights, as it ensures that consumers are informed about the automated decisions impacting their credit, allowing them to understand and contest those decisions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Prompt injection', 'Indirect prompt injection attacks', 'Data poisoning', 'Intellectual property risks', 'Obscene and degrading content'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Community participation plays a crucial role in the design of technology for democratic values by emphasizing human-computer interaction that involves the community, ensuring that the technology is non-discriminatory, explainable, and privacy-aware. Engaging with impacted communities helps to understand the potential harms of technologies and build protection by design into future systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-enabled nudification technology', 'Image-based abuse', 'Non-consensual intimate images', 'AI-powered cameras', 'Road safety habits'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the AI Safety Institute established by NIST is to continue efforts to build the science necessary for safe, secure, and trustworthy development and use of artificial intelligence (AI), in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues that arise from collecting sensitive student data and its potential misuse. It is clear in its intent, seeking information on the problems associated with data collection and misuse. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['CBRN Information', 'Confabulation', 'Dangerous content', 'Data Privacy', 'Harmful Bias'] +[ragas.testset.evolutions.INFO] seed question generated: "What role does an ethics review play in the development of automated systems to prevent harm?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential consequences of prompt injection attacks on GAI systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions impacts due to leakage and unauthorized use, disclosure, or de-anonymization of biometric, health, location, or other personally identifiable information or sensitive data as causes of privacy issues.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What problems does AI-enabled nudification technology seek to address and protect against?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What issues arise from collecting sensitive student data and its potential misuse?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Policies and procedures for human-AI configurations', 'Oversight of GAI systems', 'Risk measurement processes', 'Human-AI configuration', 'Threat modeling for GAI systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of eased access to dangerous content?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Automated systems', 'Bias testing', 'Equitable design', 'Systemic biases', 'Algorithmic discrimination', 'Equitable design', 'Automated systems', 'Legal protections', 'Proactive equity assessments'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about indirect prompt injection attacks and how they exploit vulnerabilities in GAI-integrated applications. It is specific and clear in its intent, seeking an explanation of a particular type of attack and its impact on a defined context (GAI-integrated applications). The question is self-contained and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are indirect prompt injection attacks and how do they exploit vulnerabilities in GAI-integrated applications?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['OSTP', 'Artificial intelligence', 'Biometric technologies', 'Request For Information (RFI)', 'Public comments'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Automated systems', 'Timely human consideration', 'Fallback and escalation process', 'Sensitive domains'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'GAI incident documentation helps AI Actors assess and manage system performance by facilitating smoother sharing of information regarding incidents, which includes logging, recording, and analyzing GAI incidents. This documentation allows AI Actors to trace impacts to their source, understand previous incidents, and implement measures to prevent similar occurrences in the future. Additionally, regular information sharing and maintaining change management records empower AI Actors in responding to and managing AI incidents effectively.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key considerations in establishing incident response plans for third-party GAI (General Artificial Intelligence) technologies. It is specific in its focus on incident response plans and third-party GAI technologies, and it clearly seeks information on the considerations involved in this process. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the key considerations for establishing incident response plans for third-party GAI technologies?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how human alternatives can be implemented in various sectors. It is clear in its intent, seeking specific examples across different sectors. However, the term 'human alternatives' is somewhat vague and could benefit from clarification. Does it refer to automation, AI, or other technological replacements for human roles? Specifying this would improve the clarity and answerability of the question.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are the policies and procedures related to human-AI configuration in the oversight of AI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.INFO] seed question generated: "What role do legal protections play in addressing algorithmic discrimination?" +[ragas.testset.evolutions.INFO] seed question generated: "What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the requirements for employers regarding workplace surveillance during a labor dispute. It is specific and clear in its intent, seeking information on legal or regulatory requirements. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of labor disputes and workplace surveillance. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "While both questions address the collection of student data, 'concerns associated with student data collection in educational settings' is broader and includes a wider range of issues than 'risks of collecting sensitive student data', leading to different depths of inquiry.", 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential consequences of prompt injection attacks on GAI (Generative AI) systems. It is clear in specifying the type of attack (prompt injection) and the target (GAI systems), and it seeks information on the consequences of such attacks. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the potential consequences of prompt injection attacks on GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Reporting expectations', 'Transparency', 'Artificial Intelligence ethics', 'Traffic calming measures', 'AI Risk Management Framework'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the problems that AI-enabled nudification technology aims to address and protect against. It is clear in its intent, seeking specific information about the objectives and protective measures of this technology. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What problems does AI-enabled nudification technology seek to address and protect against?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What mechanisms underlie indirect prompt injection attacks in GAI systems, and how do these mechanisms facilitate the exploitation of vulnerabilities in applications that integrate large language models?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of an ethics review in the development of automated systems to prevent harm. It is clear in specifying the topic of interest (ethics review) and the context (development of automated systems to prevent harm). The intent is straightforward, seeking an explanation of the role and importance of ethics reviews in this specific context. The question is self-contained and does not rely on external references or prior knowledge beyond general understanding of ethics reviews and automated systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role does an ethics review play in the development of automated systems to prevent harm?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Stakeholder meetings', 'Private sector and civil society', 'Positive use cases', 'Potential harms and oversight'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Clear and accessible notice', 'Explanations for decisions', 'Algorithmic impact assessments', 'User experience research'] +[ragas.testset.evolutions.INFO] seed question generated: "What role do technical protections play in the implementation of the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question 'What are the implications of eased access to dangerous content?' is too vague and broad. It does not specify what type of dangerous content is being referred to (e.g., violent media, harmful substances, misinformation), nor does it provide a context for 'eased access' (e.g., through the internet, social media, physical availability). To improve clarity and answerability, the question could specify the type of dangerous content and the context in which access is being eased. For example, 'What are the implications of eased access to violent media content through social media platforms?'", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the implications of eased access to dangerous content?" +[ragas.testset.evolutions.INFO] seed question generated: "What were some of the discussions related to positive use cases during the meetings conducted by OSTP?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do algorithmic impact assessments play in the expectations for automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of legal protections in addressing algorithmic discrimination. It is clear in specifying the topic of interest (legal protections) and the issue it addresses (algorithmic discrimination). The intent is straightforward, seeking an explanation of how legal measures can mitigate or address biases in algorithms. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of legal protections and algorithmic discrimination.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for using automated systems in sensitive domains. It is clear in its intent, seeking information on factors to consider, and does not rely on external references or unspecified contexts. The question is specific and independent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the policies and procedures related to human-AI configuration in the oversight of AI systems. It is clear in specifying the topic of interest (policies and procedures, human-AI configuration, oversight of AI systems) and seeks detailed information on these aspects. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the Request For Information (RFI) issued by the Office of Science and Technology Policy (OSTP) regarding biometric technologies. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of the RFI.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What issues does AI-driven nudification tech aim to mitigate, and how do these relate to broader concerns about automated systems causing unintended harm?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Federal law requires employers, and any consultants they may retain, to report the costs of surveilling employees in the context of a labor dispute. Employers engaging in workplace surveillance aimed at obtaining information concerning the activities of employees or a labor organization in connection with a labor dispute must report expenditures relating to this surveillance to the Department of Labor Office of Labor-Management Standards, and consultants who employers retain for these purposes must also file reports regarding their activities.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The risks of collecting sensitive student data include concerns about the lack of express parental consent, the lack of transparency in how the data is being used, and the potential for resulting discriminatory impacts. Additionally, the data collected can include sensitive information such as demographic details, drug use, and interest in LGBTQI+ groups, which may lead to inappropriate forecasting of student success and flagging of students with disabilities as cheating.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the mechanisms underlying indirect prompt injection attacks in GAI (Generative AI) systems and how these mechanisms facilitate the exploitation of vulnerabilities in applications that integrate large language models. It is specific in its focus on indirect prompt injection attacks and the exploitation of vulnerabilities, making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the steps to ensure effective incident response for third-party GAI (General Artificial Intelligence), specifically focusing on linking ownership and legal alignment. The intent is clear as it seeks specific steps or measures. However, the term 'GAI' might be ambiguous without further context, and the question could benefit from a brief explanation of what is meant by 'third-party GAI'. Additionally, the phrase 'linking ownership and legal alignment' could be clarified to specify what aspects of ownership and legal alignment are of interest (e.g., data ownership, liability, compliance).", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What multifaceted risks arise from prompt injection attacks on GAI systems, particularly concerning misinformation dissemination and the potential for data poisoning?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What factors must be evaluated to ensure effective human oversight and alternatives when deploying automated systems in high-stakes areas like criminal justice and healthcare?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of technical protections in the implementation of the Blueprint for an AI Bill of Rights. It is clear in specifying the topic of interest (technical protections) and the context (Blueprint for an AI Bill of Rights). The intent is to understand the specific contributions or functions of technical protections within this framework. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role do technical protections play in the implementation of the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the safeguards ensured by ethics reviews to prevent harm in automated systems. It is clear in specifying the topic of interest (safeguards, ethics reviews, harm prevention, automated systems) and seeks detailed information on the measures taken. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology (NIST). It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. The intent is clear, seeking information about the purpose of a specific framework from a specific organization.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What drives indirect prompt injection in GAI systems and how do they exploit app vulnerabilities?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What steps ensure effective incident response for third-party GAI, linking ownership and legal alignment?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination', 'AI Bill of Rights', 'Automated systems', 'American people', 'October 2022'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of algorithmic impact assessments in the expectations for automated systems. It is clear in specifying the topic of interest (algorithmic impact assessments) and the context (expectations for automated systems). The intent is to understand the significance or influence of these assessments on automated systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role do algorithmic impact assessments play in the expectations for automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Policies and procedures are in place to define and differentiate roles and responsibilities for human-AI configurations and oversight of AI systems. This includes establishing acceptable use policies for GAI interfaces, modalities, and human-AI configurations, as well as defining criteria for the kinds of queries GAI applications should refuse to respond to.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions that algorithmic discrimination may violate legal protections, indicating that legal protections play a role in addressing algorithmic discrimination by providing a framework that designers, developers, and deployers of automated systems must adhere to in order to protect individuals and communities from unjustified different treatment based on various classifications.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about discussions related to positive use cases during meetings conducted by OSTP. It is clear in specifying the topic of interest (positive use cases) and the context (meetings conducted by OSTP). However, it assumes familiarity with the specific meetings and their content without providing additional context or details about which meetings or time frame are being referred to. To improve clarity and answerability, the question could specify the particular meetings or time period of interest, or provide more context about the discussions being referred to.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What were some of the discussions related to positive use cases during the meetings conducted by OSTP?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What safeguards are ensured by ethics reviews to prevent harm in automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question 'What are the implications of eased access to dangerous content?' is too vague and broad. It does not specify the type of dangerous content (e.g., violent, illegal, harmful misinformation) or the context in which access is eased (e.g., online platforms, physical media). Additionally, 'implications' could refer to a wide range of potential effects (e.g., societal, psychological, legal). To improve clarity and answerability, the question could specify the type of dangerous content and the context of access, as well as the specific implications of interest.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What issues related to bias and discrimination are associated with the use of automated systems in decision-making?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about indirect prompt injection attacks in GAI systems and how they exploit vulnerabilities in applications, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for automated systems regarding safety and effectiveness?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the issues that AI-driven nudification technology aims to mitigate and how these issues relate to broader concerns about automated systems causing unintended harm. It is clear in specifying the technology of interest (AI-driven nudification tech) and seeks information on both the specific issues it addresses and the broader implications. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. However, the term 'nudification' might be unfamiliar to some readers, so a brief definition or context could enhance clarity further.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Human alternatives', 'Opt-out mechanism', 'Timely human consideration', 'Fallback and escalation system'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks for key considerations in establishing incident response plans, implying a need for detailed steps or factors. The second question is broader, asking what ensures effective incident response, which could include considerations but also other elements like tools, training, and policies. Thus, they differ in depth and breadth.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What significance do algorithmic impact assessments hold in shaping the clarity and accountability expectations for automated systems across varying risk levels?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for the factors that need to be evaluated to ensure effective human oversight and alternatives when deploying automated systems in high-stakes areas like criminal justice and healthcare. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking a list or discussion of relevant factors for effective oversight and alternatives in these specific domains.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Confabulation', 'Generative AI systems', 'False content', 'Statistical prediction', 'Risks of confabulated content'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions pertain to ethics reviews in the context of automated systems, the first question focuses on the role of ethics reviews in preventing harm, whereas the second question is more general, asking about the safeguards provided by ethics reviews. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What problems does AI nudification tech address, and how do they connect to wider concerns about automated harm?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of timely human consideration in the context of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the connections between technical protections and the rights outlined in the AI Bill of Rights. It is clear in its intent, seeking to understand the relationship between two specific concepts: technical protections and the AI Bill of Rights. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. However, for improved clarity, it could specify which AI Bill of Rights it refers to, as there might be different versions or interpretations.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Actors', 'GAI system performance', 'Content provenance data tracking', 'Incident response plans', 'Human-AI Configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key aspects that ensure transparency in AI systems according to the NIST framework. It is specific in its focus on transparency and the NIST framework, making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What factors ensure effective oversight in automated systems for critical fields like justice and healthcare?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with confabulated content in generative AI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated system', 'Plain language documentation', 'System functioning', 'Outcome explanations', 'User notification'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between technical protections and the rights outlined in the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about issues related to bias and discrimination associated with the use of automated systems in decision-making. It is clear in its intent, seeking information on specific problems (bias and discrimination) within a defined context (automated systems in decision-making). The question is independent and does not rely on external references or additional context to be understood. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of incident response plans in managing GAI system incidents?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key aspects ensure transparency in AI systems as per the NIST framework?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for automated systems in terms of safety and effectiveness. It is clear in its intent, seeking information on the criteria or standards for these systems. The question is independent and does not rely on external references or unspecified contexts. Therefore, it is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the expectations for automated systems regarding safety and effectiveness?" +[ragas.testset.evolutions.INFO] seed question generated: "What should users be notified about regarding automated systems that impact them?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the problems AI-enabled nudification technology seeks to address and protect against, while the second question also includes a broader inquiry into how these problems connect to wider concerns about automated harm. This difference in scope and depth makes the questions not equal.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What are the main principles outlined in the AI Bill of Rights and how do they aim to protect the rights of the American public?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Ethics reviews provide safeguards for automated systems by vetting key development decisions to prevent harm from occurring. They help identify and mitigate potential harms through pre-deployment testing and ongoing monitoring processes.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of algorithmic impact assessments in shaping clarity and accountability expectations for automated systems across varying risk levels. It is specific in its focus on algorithmic impact assessments and their role in clarity and accountability. The intent is clear, seeking an explanation of the importance and influence of these assessments. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address the use of automated systems in sensitive or critical domains, the first question is broader, asking for general considerations, whereas the second question specifically focuses on factors ensuring effective oversight. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Effective incident response for third-party GAI is ensured by establishing incident response plans that align with impacts, communicating these plans to relevant AI actors, defining ownership of incident response functions, rehearsing the plans regularly, improving them based on retrospective learning, and reviewing for alignment with relevant laws.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of timely human consideration in the context of automated systems. It is clear in specifying the topic of interest (timely human consideration) and the context (automated systems). The intent is to understand the significance of human intervention or oversight in automated processes. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the importance of timely human consideration in the context of automated systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about the role of technical protections in the implementation of the Blueprint for an AI Bill of Rights, while the second question is broader, asking about the links between tech protections and the AI Bill of Rights. The depth and breadth of the inquiries differ.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks for the purpose of the AI Risk Management Framework by NIST, which is a broad inquiry into the framework's goals. The second question specifically asks about AI transparency according to NIST, which is a narrower aspect of the framework. Thus, they differ in depth and breadth.", 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What role do algorithmic impact assessments play in clarifying accountability for automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential risks associated with confabulated content in generative AI systems. It is clear in specifying the topic of interest (confabulated content in generative AI systems) and seeks detailed information on the associated risks. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the potential risks associated with confabulated content in generative AI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What criteria should automated systems meet to ensure both safety and the prevention of algorithmic discrimination, and how should these be independently evaluated and reported?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems in decision-making have been associated with issues such as reflecting and reproducing existing unwanted inequities, embedding new harmful bias and discrimination, and being unsafe or ineffective in areas like patient care, hiring, and credit decisions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of incident response plans in managing GAI (General Artificial Intelligence) system incidents. It is clear in specifying the topic of interest (incident response plans) and the context (GAI system incidents), making the intent straightforward and understandable. The question is self-contained and does not rely on external references or additional context, making it specific, independent, and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of incident response plans in managing GAI system incidents?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Executive Order 13960', 'Trustworthy Artificial Intelligence', 'AI Bill of Rights', 'NIST AI Risk Management Framework', 'Stakeholder engagement'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what users should be notified about regarding automated systems that impact them. It is clear in its intent, seeking specific information on user notifications related to automated systems. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What should users be notified about regarding automated systems that impact them?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the NIST AI Risk Management Framework?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the main principles outlined in the AI Bill of Rights and how they aim to protect the rights of the American public. It is clear in specifying the document of interest (AI Bill of Rights) and seeks detailed information on both the principles and their protective measures. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does prompt human intervention play in ensuring equitable outcomes and effective fallback mechanisms within automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not provide specific links between tech protections and the AI Bill of Rights.', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the expectations for automated systems, while the second question focuses on clarifying accountability for automated systems. These are different aspects, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the multifaceted risks associated with prompt injection attacks on GAI (Generative AI) systems, specifically focusing on misinformation dissemination and data poisoning. It is clear in its intent, specifying the type of attack (prompt injection) and the particular risks of interest (misinformation dissemination and data poisoning). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Surveillance oversight', 'Algorithmic discrimination', 'Consent practices', 'Civil liberties'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Effective oversight in automated systems for critical fields like justice and healthcare is ensured by tailoring the systems to their intended purpose, providing meaningful access for oversight, including training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions. Additionally, reporting on human governance processes and assessing their timeliness, accessibility, outcomes, and effectiveness should be made public whenever possible.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Actor', 'GAI risks', 'Suggested actions', 'AI RMF functions', 'Govern 1.1'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What implications arise from the erroneous yet confident outputs of generative AI, particularly in relation to the dissemination of dangerous content and the potential for misleading users in critical decision-making scenarios?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What risks do prompt injection attacks pose to GAI, especially regarding misinformation and data poisoning?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key elements of consent practices that should be followed to prevent abusive surveillance?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the role of AI actors in the AI system lifecycle?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What roles do incident response plans play in evaluating GAI system performance and ensuring effective communication among AI Actors during incidents?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "AI nudification technology addresses the problem of creating non-consensual intimate images that can lead to image-based abuse, particularly impacting women. This technology raises wider concerns about automated harm as it exemplifies how advanced tools can be misused, leading to devastating effects on victims' personal and professional lives, as well as their mental and physical health. Additionally, the reliance on automated systems can result in unintended consequences, such as incorrect penalization of drivers or biased decision-making based on flawed historical data, highlighting the need for safeguards and ethical reviews in technology deployment.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Accessibility standards', 'Disparity assessment', 'Algorithmic discrimination', 'Ongoing monitoring and mitigation'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Organizational responsibilities', 'Incident monitoring', 'Document retention policy', 'AI system inventory'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the criteria that automated systems should meet to ensure safety and prevent algorithmic discrimination, as well as how these criteria should be independently evaluated and reported. It is clear in its intent, specifying the dual focus on safety and discrimination prevention, and seeks detailed information on both the criteria and the evaluation/reporting process. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the NIST AI Risk Management Framework. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific framework.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What standards should automated systems follow for safety and fairness, and how to assess them?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to address algorithmic discrimination in automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What actions are suggested for managing the AI system inventory according to organizational risk priorities?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address the impact of prompt injection attacks on GAI systems, the second question specifically focuses on misinformation and data poisoning, adding additional constraints and depth.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key information that users must receive about automated systems affecting their outcomes. It is clear in its intent, seeking specific details about the necessary information users should be provided with. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The main principles outlined in the AI Bill of Rights are not explicitly listed in the provided context. However, the context discusses the Blueprint for an AI Bill of Rights, which consists of five principles aimed at guiding the design, use, and deployment of automated systems to protect the rights of the American public. It emphasizes the importance of technical protections and practices to guard against potential harms and outlines expectations for automated systems, including transparency and reporting.', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key info must users receive about automated systems affecting their outcomes?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Continuous monitoring of GAI system impacts', 'Harmful bias and homogenization', 'Structured human feedback exercises', 'GAI red-teaming', 'Information integrity'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of erroneous yet confident outputs from generative AI, specifically in the context of disseminating dangerous content and misleading users in critical decision-making scenarios. It is clear in its intent, specifying the type of AI output (erroneous yet confident) and the contexts of interest (dangerous content dissemination and critical decision-making). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of prompt human intervention in ensuring equitable outcomes and effective fallback mechanisms within automated systems. It is clear in specifying the topic of interest (human intervention, equitable outcomes, fallback mechanisms, automated systems) and seeks detailed information on the impact and importance of human intervention. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key elements of consent practices that should be followed to prevent abusive surveillance. It is clear in specifying the topic of interest (consent practices) and the context (preventing abusive surveillance). The intent is straightforward, seeking specific elements or guidelines. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of consent practices and surveillance. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the key elements of consent practices that should be followed to prevent abusive surveillance?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of AI actors in the AI system lifecycle. It is clear in specifying the topic of interest (AI actors) and the context (AI system lifecycle). The intent is to understand the specific roles or functions these actors play within the lifecycle of an AI system. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Surveillance oversight', 'Algorithmic discrimination', 'Consent practices', 'Civil liberties'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of continuous monitoring of GAI system impacts?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on expectations for safety and effectiveness, while the second question addresses standards for safety and fairness and how to assess them. The second question has a broader scope and different requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the roles of incident response plans in evaluating GAI (General Artificial Intelligence) system performance and ensuring effective communication among AI Actors during incidents. It is clear in specifying the topic of interest (incident response plans, GAI system performance, communication among AI Actors) and seeks detailed information on both evaluation and communication aspects. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What are the risks of confident but wrong outputs from generative AI?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do civil rights play in the context of automated systems and technology according to the foreword?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to address algorithmic discrimination in automated systems. It is clear in its intent, seeking specific actions or strategies to mitigate discrimination caused by algorithms. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What measures should be taken to address algorithmic discrimination in automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What were the shortcomings of the sepsis prediction model implemented in hospitals?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do civil liberties play in the context of surveillance systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does human input affect fairness and fallback in automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What's the role of incident response plans in assessing GAI performance and AI Actor communication during incidents?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions for managing the AI system inventory based on organizational risk priorities. It is clear in its intent, seeking specific actions related to risk management in the context of AI systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable with sufficient domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What actions are suggested for managing the AI system inventory according to organizational risk priorities?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Prompt injection attacks pose significant risks to GAI by enabling attackers to modify inputs to the system, leading to unintended behaviors and potential misinformation. Direct prompt injections can result in malicious prompts being inputted, causing negative consequences for interconnected systems. Indirect prompt injection attacks exploit vulnerabilities in LLM-integrated applications, potentially leading to the theft of proprietary data or the execution of malicious code. Additionally, data poisoning is a risk where adversaries compromise training datasets, manipulating the outputs or operations of GAI systems, which can exacerbate misinformation and the reliability of generated content.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the risks associated with incorrect outputs from generative AI systems, requiring a similar level of detail and scope in the response.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What specific consent practices and design principles should be implemented in automated systems to effectively mitigate the risks of abusive surveillance while ensuring user privacy and control over their data?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about discussions related to positive use cases during meetings conducted by OSTP. It is clear in specifying the topic of interest (positive use cases) and the context (meetings conducted by OSTP). However, it assumes familiarity with the specific meetings and their content without providing additional context or details about which meetings or time frame are being referred to. To improve clarity and answerability, the question could specify the particular meetings or time period of interest, or provide more context about the discussions being referred to.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI actors play an active role in the AI system lifecycle, including organizations and individuals that deploy or operate AI.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the NIST AI Risk Management Framework is to help incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It aims to foster the development of innovative approaches to address characteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, robustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of harmful uses.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions ask for the same information regarding user notifications about automated systems that impact them, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive and ongoing strategies should be implemented in the design and assessment of automated systems to prevent algorithmic discrimination, particularly concerning underserved communities?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of continuous monitoring of GAI (General Artificial Intelligence) system impacts. It is clear in specifying the topic of interest (continuous monitoring of GAI system impacts) and seeks an explanation of the purpose behind this practice. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of continuous monitoring of GAI system impacts?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the importance of timely human consideration, while the second question is more specific about how human input affects fairness and fallback. They have different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems should follow standards that include independent evaluation to ensure safety and effectiveness, regular reporting on system performance and data usage, and protections against algorithmic discrimination. Assessments should involve algorithmic impact assessments that detail consultation results, equity assessments, and any disparities, with findings made public whenever possible.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of incident response plans specifically in managing GAI system incidents, while the second question addresses the role of these plans in assessing GAI performance and AI Actor communication during incidents. The second question has a broader scope and different requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What recommended strategies should be implemented for the oversight and inventory management of GAI systems, considering both organizational risk priorities and the lifecycle impacts of AI technology?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of civil liberties in the context of surveillance systems. It is clear in specifying the topic of interest (civil liberties) and the context (surveillance systems), making the intent clear and understandable. The question is self-contained and does not rely on external references or prior knowledge, making it independent and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Algorithmic discrimination', 'Independent evaluation', 'Algorithmic impact assessment', 'Public accountability'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Pre-deployment testing', 'GAI system validity', 'Measurement gaps', 'Structured public feedback', 'AI Red-teaming'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data broker exposes social media profiles', 'Facial recognition technology', 'Surveillance technology', 'Virtual testing and disabled students', 'New surveillance technologies and disability discrimination'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the shortcomings of a sepsis prediction model implemented in hospitals. It is clear in its intent, seeking information on the limitations or issues associated with the model. However, it lacks specificity regarding which sepsis prediction model is being referred to, as there could be multiple models with different implementations. To improve clarity and answerability, the question could specify the particular model or provide additional context about the implementation (e.g., the name of the model, the hospital or study involved).', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What were the shortcomings of the sepsis prediction model implemented in hospitals?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of civil rights in the context of automated systems and technology, specifically according to 'the foreword'. While it is clear in its intent to understand the perspective provided in the foreword, it assumes access to and familiarity with this specific foreword without providing its content or context. This makes the question unclear for those who do not have access to the foreword. To improve clarity and answerability, the question could include a brief summary or key points from the foreword, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What role do civil rights play in the context of automated systems and technology according to the foreword?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Trustworthy AI', 'Transparency policies', 'Risk management activities', 'Information integrity', 'GAI capabilities'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for automated systems regarding algorithmic discrimination and reporting?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the limitations of current pre-deployment testing approaches for GAI applications?" +[ragas.testset.evolutions.INFO] seed question generated: "What impact do new surveillance technologies have on disability discrimination?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for particular consent practices and design principles to mitigate risks of abusive surveillance in automated systems while ensuring user privacy and control over their data. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking actionable and detailed information on consent practices and design principles.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Civil rights and liberties', 'Public consultation', 'Algorithmic harms'] +[ragas.testset.evolutions.INFO] seed question generated: "What factors should be considered to ensure information integrity in the context of GAI risk management?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does human-AI integration play in enhancing customer service?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What consent practices and design principles can help balance user privacy and surveillance risks in automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Automated systems', 'Timely human consideration', 'Fallback and escalation process', 'Sensitive domains'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for proactive and ongoing strategies to prevent algorithmic discrimination in automated systems, with a particular focus on underserved communities. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking strategies for design and assessment to address a specific issue.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What are the implications of ongoing evaluations of GAI system effects on equitable content generation and community feedback integration?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do algorithmic harms play in shaping the principles of the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies can help prevent algorithmic bias in automated systems for underserved communities?" +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Incident response plans play a crucial role in assessing GAI performance by providing structured procedures for addressing the generation of inappropriate or harmful content. They ensure that incidents are communicated to relevant AI Actors, including affected communities, and that processes for tracking, responding to, and recovering from incidents are followed and documented. This structured approach helps in understanding the root causes of incidents and implementing preventive measures, thereby enhancing overall AI Actor communication during such events.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not explicitly address how human input affects fairness and fallback in automated systems. However, it mentions that human consideration and fallback mechanisms should be proportionate, accessible, equitable, timely, and effective, which implies that human input is crucial in ensuring these aspects are upheld. The effectiveness of human involvement is emphasized through training, assessment, and oversight to combat automation bias and ensure appropriate results.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for recommended strategies for the oversight and inventory management of GAI (General Artificial Intelligence) systems, considering organizational risk priorities and the lifecycle impacts of AI technology. It is clear in specifying the topic of interest (oversight and inventory management of GAI systems) and the factors to consider (organizational risk priorities and lifecycle impacts). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Civil liberties play a crucial role in the context of surveillance systems by ensuring that civil rights are not limited by the threat of surveillance or harassment facilitated by automated systems. Surveillance systems should not monitor the exercise of democratic rights, such as voting, privacy, peaceful assembly, speech, or association, in a way that restricts these civil liberties. Additionally, information related to identity should be carefully limited to avoid algorithmic discrimination, and continuous surveillance should not be used in ways that suppress the exercise of rights.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for automated systems in terms of algorithmic discrimination and reporting. It is clear in its intent, seeking information on both discrimination and reporting aspects. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the expectations for automated systems regarding algorithmic discrimination and reporting?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the impact of new surveillance technologies on disability discrimination. It is clear in its intent, seeking information on the relationship between surveillance technologies and disability discrimination. The question is specific and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What impact do new surveillance technologies have on disability discrimination?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies are best for managing GAI systems and their lifecycle risks?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the limitations of current pre-deployment testing approaches for GAI (General Artificial Intelligence) applications. It is specific in its focus on pre-deployment testing and GAI applications, and it clearly seeks information about the limitations of these approaches. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses specifically on key elements of consent practices to prevent abusive surveillance, while the second question has a broader scope, including both consent practices and design principles to balance user privacy and surveillance risks. This difference in scope and depth leads to different requirements for the answers.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the shortcomings of a sepsis prediction model implemented in hospitals. It is clear in its intent, seeking information on the limitations or issues associated with the model. However, it lacks specificity regarding which sepsis prediction model is being referred to, as there could be multiple models with different implementations. To improve clarity and answerability, the question could specify the particular model or provide additional context about the implementation (e.g., the name of the model, the hospital or study involved).', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI technology mapping', 'Legal risks', 'Data privacy', 'Intellectual property', 'Harmful biases'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'AI Bill of Rights', 'Civil rights and liberties', 'Equal opportunities', 'Access to critical resources'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of civil rights in the context of automated systems and technology, specifically according to 'the foreword'. While it is clear in specifying the topic of interest (civil rights, automated systems, technology) and the source of information (the foreword), it assumes access to and understanding of 'the foreword' without providing its content or context. This makes the question unclear for those without direct access to the foreword. To improve clarity and answerability, the question could include a brief description or key points from the foreword, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of human-AI integration in enhancing customer service. It is clear in its intent, seeking information on how the combination of human and AI efforts can improve customer service. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role does human-AI integration play in enhancing customer service?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about factors to consider for ensuring information integrity in the context of GAI (General Artificial Intelligence) risk management. It is clear in specifying the topic of interest (information integrity) and the context (GAI risk management). The intent is also clear, seeking a list or discussion of relevant factors. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What factors should be considered to ensure information integrity in the context of GAI risk management?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address algorithmic issues in automated systems, the first question is broader, asking about measures to address discrimination in general, whereas the second question specifically focuses on strategies to prevent bias in underserved communities, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Trustworthy AI', 'Transparency policies', 'Risk management activities', 'Information integrity', 'GAI capabilities'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the suggested actions for addressing legal risks associated with AI technology?" +[ragas.testset.evolutions.INFO] seed question generated: "What types of automated systems should be covered by the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for using automated systems in sensitive domains. It is clear in its intent, seeking information on factors to consider, and does not rely on external references or unspecified contexts. The question is specific and independent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.evolutions.INFO] seed question generated: "What characteristics are integrated into organizational policies to ensure trustworthy AI?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the influence of algorithmic harms on the principles of the Blueprint for an AI Bill of Rights. It is clear in specifying the topic of interest (algorithmic harms) and the document in question (Blueprint for an AI Bill of Rights). The intent is to understand the relationship between these harms and the principles outlined in the Blueprint. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-generated content', 'Real-time auditing tools', 'User feedback mechanisms', 'Synthetic data', 'Incident response and recovery plans'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the implications of ongoing evaluations of GAI (Generative AI) system effects on equitable content generation and community feedback integration. It is clear in specifying the topic of interest (GAI system evaluations) and the aspects to be considered (equitable content generation and community feedback integration). However, the question could benefit from more specificity regarding what is meant by 'ongoing evaluations' and the context in which these evaluations are taking place. Providing a brief description or example of these evaluations would make the question more self-contained and easier to answer comprehensively.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the implications of ongoing evaluations of GAI system effects on equitable content generation and community feedback integration?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive measures and reporting requirements should automated systems implement to prevent algorithmic discrimination and ensure equitable outcomes for marginalized communities?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What are the implications of emerging surveillance tech on the discrimination faced by individuals with disabilities in various sectors?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on actions for managing AI system inventory based on organizational risk priorities, while the second question is broader, asking for strategies to manage GAI systems and their lifecycle risks. The depth and breadth of the inquiries differ.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.evolutions.INFO] seed question generated: "What procedures should be developed and updated in incident response and recovery plans for GAI systems when a previously unknown risk is identified?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of human-AI configuration in ensuring the adequacy of GAI system user instructions?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to address legal risks associated with AI technology. It is clear in its intent, specifying the focus on legal risks and the need for actionable suggestions. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the suggested actions for addressing legal risks associated with AI technology?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the benefits of combining AI tools with human agents in customer service. It is clear in its intent, seeking information on the advantages of this combination. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not provide specific strategies for managing GAI systems and their lifecycle risks.', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Consent practices that can help balance user privacy and surveillance risks in automated systems include use-specific consent, where consent is sought for specific, narrow use contexts and time durations, and should be re-acquired if conditions change. Additionally, brief and direct consent requests should be used, employing short, plain language to ensure users understand the context and duration of data use. User experience research should be conducted to ensure these requests are accessible and comprehensible, avoiding manipulative design choices. Furthermore, privacy should be protected by design and by default, with privacy risks assessed throughout the development life cycle and data collection minimized to only what is necessary for identified goals.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the characteristics integrated into organizational policies to ensure trustworthy AI. It is clear in its intent, seeking specific information about the elements that contribute to trustworthy AI within organizational policies. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What characteristics are integrated into organizational policies to ensure trustworthy AI?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Strategies to prevent algorithmic bias in automated systems for underserved communities include conducting proactive equity assessments during the design phase, ensuring the use of representative and robust data, and guarding against the use of proxies that may lead to algorithmic discrimination. These strategies involve reviewing potential input data, historical context, and accessibility for people with disabilities, as well as testing for correlation between demographic information and attributes to identify and remove any proxies.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the types of automated systems that should be covered by the AI Bill of Rights. It is clear in its intent, seeking specific information about the scope of the AI Bill of Rights in terms of automated systems. The question is independent and does not rely on external references or prior knowledge not included within the question itself. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for proactive measures and reporting requirements that automated systems should implement to prevent algorithmic discrimination and ensure equitable outcomes for marginalized communities. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking detailed information on both measures and reporting requirements to address algorithmic discrimination.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What benefits arise from combining AI tools with human agents in customer service?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Sensitive domains', 'Predictive analytics', 'Student data collection', 'Employee data transfer'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps should automated systems take to avoid bias and support equity for marginalized groups?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the factors influencing the choice of human alternatives over automated systems in sensitive areas. It is clear in its intent, seeking information on the reasons behind such decisions. The question is independent and does not rely on external references or unspecified contexts. However, it could benefit from specifying what is meant by 'sensitive areas' (e.g., healthcare, security, legal decisions) to provide more context and focus for the answer.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of emerging surveillance technology on the discrimination faced by individuals with disabilities across various sectors. It is clear in specifying the topic of interest (emerging surveillance tech, discrimination, individuals with disabilities) and seeks information on the implications across different sectors. The intent is clear, and the question is independent as it does not rely on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What combined strategies should be implemented to mitigate both intellectual property and privacy risks associated with the use of AI training data?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Current pre-deployment TEVV processes used for GAI applications may be inadequate, non-systematically applied, or fail to reflect or be mismatched to deployment contexts. Anecdotal testing of GAI system capabilities through video games or standardized tests designed for humans does not guarantee GAI system validity or reliability. Additionally, jailbreaking or prompt engineering tests may not systematically assess validity or reliability risks. Measurement gaps can arise from mismatches between laboratory and real-world settings, and current testing approaches often remain focused on laboratory conditions or restricted to benchmark test datasets that may not extrapolate well to real-world conditions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the implications of ongoing evaluations of GAI (Generative AI) system effects on equitable content generation and community feedback integration. It is clear in specifying the topic of interest (GAI system evaluations) and the aspects it is concerned with (equitable content generation and community feedback integration). However, the term 'ongoing evaluations' is somewhat vague and could benefit from more specificity, such as the type of evaluations or the context in which they are being conducted. Additionally, the question could be clearer by specifying what kind of implications are of interest (e.g., social, technical, ethical). Overall, the question is mostly clear but could be improved with more detail.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI', 'AI Risk Management Framework', 'Trustworthy AI', 'Bias in Artificial Intelligence', 'GPT-4 Technical Report', 'Unsafe Diffusion'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What factors influence opting for human alternatives over automated systems in sensitive areas?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What impact does new surveillance tech have on discrimination against individuals with disabilities?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of human-AI configuration in ensuring the adequacy of GAI system user instructions. It is clear in specifying the topic of interest (human-AI configuration, GAI system user instructions) and seeks information on the importance or impact of this configuration. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the procedures that should be developed and updated in incident response and recovery plans for GAI (General Artificial Intelligence) systems when a previously unknown risk is identified. It is clear in specifying the context (incident response and recovery plans for GAI systems) and the condition (when a previously unknown risk is identified). The intent is to understand the necessary procedural updates in response to new risks, making it specific and answerable without requiring additional context or external references.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the benefits and roles of integrating AI with human agents in customer service, requiring similar depth and breadth of explanation.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are some concerns related to data privacy in the context of sensitive domains?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What are the effects of GAI evaluations on fair content and community input?" +[ragas.testset.evolutions.INFO] seed question generated: "What efforts are being made to identify and manage bias in artificial intelligence according to NIST?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information sharing and feedback mechanisms', 'AI impact assessment', 'Organizational policies', 'Third-party rights'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The types of automated systems that should be covered by the AI Bill of Rights include those that have the potential to meaningfully impact civil rights, civil liberties, or privacy, equal opportunities, and access to critical resources and services. Examples include speech-related systems, surveillance and criminal justice algorithms, voting-related systems, education-related systems, housing-related systems, employment-related systems, health technologies, and financial system algorithms.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Digital content transparency', 'Harmful bias', 'Content provenance', 'AI system trustworthiness'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on expectations for automated systems in terms of algorithmic discrimination and reporting, while the second question is more specific about steps to avoid bias and support equity for marginalized groups. They differ in both constraints and depth of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about policies that assess GAI (General Artificial Intelligence) risks while ensuring transparency and safety measures. It is clear in its intent, seeking information on specific policies related to GAI risk assessment, transparency, and safety. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Ongoing monitoring', 'Clear organizational oversight', 'High-quality data', 'Governance procedures'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of AI impact assessment in relation to feedback from individuals and communities?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 1, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Algorithmic discrimination protections', 'Equitable design', 'Independent evaluation and reporting'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the impact of new surveillance technologies on discrimination against individuals with disabilities, sharing the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of digital content transparency in relation to the societal impacts of AI?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on considerations for using automated systems in sensitive domains, while the second question asks about the factors influencing the choice between humans and automation in sensitive areas. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for combined strategies to mitigate intellectual property and privacy risks associated with the use of AI training data. It is clear in specifying the type of risks (intellectual property and privacy) and the context (AI training data), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for automated systems regarding ongoing monitoring and organizational oversight?" +[ragas.testset.evolutions.INFO] seed question generated: "What protections does the AI Bill of Rights provide against algorithmic discrimination?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of continuous monitoring of GAI system impacts, while the second question is about the effects of GAI evaluations on fair content and community input. These questions have different constraints and requirements, as well as different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI technologies', 'Content provenance', 'Synthetic content detection', 'Digital transparency mechanisms', 'Provenance data tracking'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The significance of human-AI configuration in ensuring the adequacy of GAI system user instructions is highlighted in the context where it mentions verifying the adequacy of GAI system user instructions through user testing. This suggests that human-AI configuration plays a crucial role in assessing and improving the effectiveness of user instructions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the efforts made by NIST to identify and manage bias in artificial intelligence. It is specific in mentioning the organization (NIST) and the topic of interest (bias in artificial intelligence), making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What efforts are being made to identify and manage bias in artificial intelligence according to NIST?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What policies assess GAI risks while ensuring transparency and safety measures?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies can help reduce IP and privacy risks in AI training data?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Develop and update GAI system incident response and recovery plans and procedures to address the following: Review and maintenance of policies and procedures to account for newly encountered uses; Review and maintenance of policies and procedures for detection of unanticipated uses; Verify response and recovery plans account for the GAI system value chain; Verify response and recovery plans are updated for and include necessary details to communicate with downstream GAI system Actors: Points-of-Contact (POC), Contact information, notification format.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does synthetic content detection play in managing risks associated with AI-generated outputs?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about concerns related to data privacy in sensitive domains. It is clear in its intent, seeking information on potential issues or challenges in this area. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. However, it could be improved by specifying what is meant by 'sensitive domains' (e.g., healthcare, finance) to narrow down the scope and provide a more focused answer.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data broker exposes social media profiles', 'Facial recognition technology', 'Surveillance technology', 'Virtual testing and disabled students', 'New surveillance technologies and disability discrimination'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the key elements that must be assessed to maintain information integrity amid GAI (General Artificial Intelligence) risk factors. It is clear in its intent, seeking specific elements related to information integrity and GAI risk factors. However, the acronym 'GAI' might not be universally recognized without context, and the term 'info integrity' could be more explicitly defined. To improve clarity, the question could spell out 'General Artificial Intelligence' and provide a brief explanation of what is meant by 'information integrity'.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Stakeholder communities', 'Unacceptable use', 'GAI risks', 'Information integrity', 'Governance and oversight'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems should take several steps to avoid bias and support equity for marginalized groups, including conducting proactive equity assessments during the design phase to identify potential discrimination, using representative and robust data that reflects local communities, and guarding against the use of demographic proxies that could lead to algorithmic discrimination. These steps should be integrated throughout the design, development, and deployment processes to ensure ongoing protection against algorithmic discrimination.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What challenges do virtual testing environments pose for disabled students?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key elements must be assessed to maintain info integrity amid GAI risk factors?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Executive Order 13960', 'Trustworthy Artificial Intelligence', 'AI Bill of Rights', 'NIST AI Risk Management Framework', 'Stakeholder engagement'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for automated systems in terms of ongoing monitoring and organizational oversight. It is clear in its intent, seeking information on specific aspects of automated systems (monitoring and oversight). The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the expectations for automated systems regarding ongoing monitoring and organizational oversight?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to manage GAI risks according to the provided context?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question addresses legal risks associated with AI technology in general, while the second question specifically focuses on IP and privacy risks in AI training data. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The choice of humans over automation in sensitive areas is driven by the need for human consideration and remedy, particularly in contexts where automated systems may fail, produce errors, or where individuals wish to appeal or contest the impacts of these systems. This choice is also influenced by the requirement for appropriateness based on reasonable expectations, ensuring broad accessibility, and protecting the public from especially harmful impacts.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the protections provided by the AI Bill of Rights against algorithmic discrimination. It is specific and clear in its intent, seeking information on a particular aspect of the AI Bill of Rights. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of the AI Bill of Rights, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What protections does the AI Bill of Rights provide against algorithmic discrimination?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on characteristics integrated into organizational policies for trustworthy AI, while the second question is specifically about policies ensuring GAI risk assessment with transparency and safety. These questions have different focuses and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the purpose of AI impact assessment in relation to feedback from individuals and communities. It is clear in specifying the topic of interest (AI impact assessment) and the context (feedback from individuals and communities). The intent is to understand the role or purpose of such assessments, making it specific and answerable without needing additional context or external references. Therefore, the question meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the NIST AI Risk Management Framework?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Participatory engagement methods', 'Field testing', 'AI red-teaming', 'User feedback', 'Risk management'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of synthetic content detection in managing risks associated with AI-generated outputs. It is clear in specifying the topic of interest (synthetic content detection) and the context (managing risks associated with AI-generated outputs). The intent is clear, and the question is independent, not relying on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions ask about factors ensuring information integrity in the context of GAI risk management, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of digital content transparency in relation to the societal impacts of AI. It is clear in specifying the topic of interest (digital content transparency) and its context (societal impacts of AI). The intent is to understand the importance or role of transparency in this specific context. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about initiatives that connect NIST's AI Safety Institute to bias management in AI. It is specific in mentioning the organization (NIST's AI Safety Institute) and the topic of interest (bias management in AI), making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'User consent', 'Automated systems', 'Surveillance technologies', 'Sensitive domains', 'Data protection', 'Privacy by design', 'User consent', 'Sensitive domains', 'Surveillance technologies'] +[ragas.testset.evolutions.INFO] seed question generated: "What are some real-life examples of how human alternatives can be implemented in practice?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the challenges that virtual testing environments pose for disabled students. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on the difficulties faced by disabled students in virtual testing settings.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What challenges do virtual testing environments pose for disabled students?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does risk management play in the implementation of feedback activities for AI systems?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What initiatives link NIST's AI Safety Institute to bias management in AI?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for measures to manage GAI risks according to 'the provided context' without including or describing this context within the query. This makes the question unclear for those who do not have access to the unspecified context. For the question to be clear and answerable, it needs to either include the relevant context directly within the question or be framed in a way that does not require external information. Detailing specific aspects of GAI risks or the type of measures of interest could also help clarify the query.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What measures should be taken to manage GAI risks according to the provided context?" +[ragas.testset.evolutions.INFO] seed question generated: "What protections should be in place for data and inferences related to sensitive domains?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Concerns related to data privacy in sensitive domains include the lack of awareness among patients regarding the use of their medical data by insurance companies, the revelation of personal information (such as pregnancy) through targeted advertising, the monitoring of student conversations which may limit emotional expression and unfairly flag students with disabilities, the use of location data to identify individuals visiting abortion clinics, the collection of sensitive student data without parental consent, and the potential for discriminatory impacts from such data usage. Additionally, there are concerns about the accuracy of employee data transferred to third parties, which can affect job opportunities.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the dual aspects that automated systems must ensure for effective monitoring and oversight. It is clear in its intent, seeking specific aspects related to monitoring and oversight in automated systems. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Ethical use of AI systems', 'Department of Energy AI Advancement Council', 'Artificial Intelligence Ethical Principles', 'National Science Foundation research', 'Pretrial risk assessments transparency'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The policies that ensure GAI risk assessment with transparency and safety include establishing transparency policies and processes for documenting the origin and history of training data and generated data for GAI applications, as well as establishing policies to evaluate risk-relevant capabilities of GAI and the robustness of safety measures prior to deployment and on an ongoing basis.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the NIST AI Risk Management Framework. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific framework.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the NIST AI Risk Management Framework?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Strategies to reduce IP and privacy risks in AI training data include conducting periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, documenting training data curation policies, establishing policies for collection and retention of data, and conducting appropriate diligence on training data use to assess intellectual property and privacy risks.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What dual aspects must automated systems ensure for effective monitoring and oversight?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about measures to ensure equitable design and prevent algorithmic discrimination in automated systems. It is clear in its intent, seeking information on specific measures or practices. The question is independent and does not rely on external references or unspecified contexts. It is specific enough to be understood and answered by someone with domain knowledge in automated systems and algorithmic fairness.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What types of research does the National Science Foundation support to ensure the safety and effectiveness of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What specific obstacles do digital assessment platforms create for students with disabilities, considering both the technological limitations and the implications of surveillance practices in educational settings?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The significance of digital content transparency in relation to the societal impacts of AI lies in providing input for training materials about the capabilities and limitations of GAI systems. This transparency is crucial for AI actors, professionals, and the public to understand the societal impacts of AI and the role of diverse and inclusive content generation.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What measures ensure equitable design and prevent algorithmic discrimination in automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Synthetic content detection plays a crucial role in managing risks associated with AI-generated outputs by helping to distinguish human-generated content from AI-generated synthetic content. It facilitates greater information access about both authentic and synthetic content, enabling users to better understand the trustworthiness of AI systems. Additionally, it can assist in tracing negative outcomes back to their source, improving information integrity, and upholding public trust.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question specifically asks about efforts to identify and manage bias in AI according to NIST, while the second question is more general and asks about the connection between NIST's AI Safety Institute and AI bias management. The depth and breadth of the inquiries differ.", 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of AI impact assessment in relation to feedback from individuals and communities is to collect, consider, prioritize, and integrate feedback regarding the potential individual and societal impacts related to AI risks. This process ensures that organizational policies and practices are in place to address these impacts effectively.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how 'human alternatives' can be implemented in practice. While it is clear that the question seeks practical examples, the term 'human alternatives' is vague and could refer to various concepts such as alternative energy sources, alternative dispute resolution methods, or even alternative educational approaches. To improve clarity and answerability, the question should specify what 'human alternatives' refers to, providing context or examples of the type of alternatives being inquired about.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some real-life examples of how human alternatives can be implemented in practice?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of risk management in the implementation of feedback activities for AI systems. It is clear in specifying the topic of interest (risk management, feedback activities, AI systems) and seeks information on the relationship between these elements. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.INFO] seed question generated: "What are some real-life examples of how human alternatives can be implemented in practice?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about expectations for ongoing monitoring and organizational oversight, while the second question is more general and asks about dual aspects for effective oversight without specifying ongoing monitoring and organizational oversight.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What framework is being developed to enhance the trustworthiness of AI systems while ensuring compliance with existing laws and principles related to civil rights and privacy?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the role of the National Institute of Standards and Technology in the development of artificial intelligence?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for measures to manage GAI risks according to 'the provided context' without including or describing this context within the query. This makes the question unclear for those who do not have access to the unspecified context. For the question to be clear and answerable, it needs to either include the relevant context directly within the question or be framed in a way that does not require external information. Detailing specific aspects of GAI risks or the type of measures of interest could also help clarify the query.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the protections that should be in place for data and inferences related to sensitive domains. It is clear in its intent, seeking information on data protection measures specifically for sensitive domains. The question is independent and does not rely on external references or unspecified contexts. However, it could be improved by specifying what is meant by 'sensitive domains' (e.g., healthcare, finance) to provide more context and focus for the answer.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-enabled systems', 'Technological diffusion', 'Urban planning', 'Criminal justice system', 'Predictive policing'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about protections provided by the AI Bill of Rights against algorithmic discrimination, while the second question is broader, asking about fair design in automated systems without specifying the AI Bill of Rights or focusing solely on discrimination.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the types of research supported by the National Science Foundation (NSF) to ensure the safety and effectiveness of automated systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on NSF-supported research areas related to automated systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What types of research does the National Science Foundation support to ensure the safety and effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Stakeholder meetings', 'Private sector and civil society', 'Positive use cases', 'Potential harms and oversight'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the concerns raised by panelists regarding the use of technology in the criminal justice system?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems should cover ongoing monitoring procedures and clear organizational oversight for effective oversight.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking for specific obstacles that digital assessment platforms create for students with disabilities. It specifies two areas of interest: technological limitations and the implications of surveillance practices in educational settings. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What were some of the discussions related to positive use cases during the meetings conducted by OSTP?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Sensitive data', 'Ethical review', 'Data quality', 'Access limitations'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Supplier risk assessment framework', 'Third-party entities', 'Content provenance standards', 'GAI technology and service provider lists', 'Intellectual property and data privacy'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for the name or description of a framework being developed to enhance the trustworthiness of AI systems while ensuring compliance with existing laws and principles related to civil rights and privacy. It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What challenges do digital assessments pose for students with disabilities?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how 'human alternatives' can be implemented in practice. While it is clear that the question seeks practical examples, the term 'human alternatives' is vague and could refer to various concepts such as alternative energy sources, alternative medicine, or even alternative dispute resolution methods. To improve clarity and answerability, the question should specify what is meant by 'human alternatives' and possibly provide a context or domain (e.g., technology, healthcare, environmental science) in which these alternatives are to be considered.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Training data use', 'Intellectual property', 'Data privacy risks', 'Content provenance', 'Generative AI (GAI) risks'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of the National Institute of Standards and Technology (NIST) in the development of artificial intelligence. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the role of the National Institute of Standards and Technology in the development of artificial intelligence?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Disinformation and misinformation', 'Generative AI models', 'Information security risks', 'Cybersecurity attacks'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of an ethical review in the context of using sensitive data?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of implementing a supplier risk assessment framework in evaluating third-party entities?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "Which framework aims to boost AI trustworthiness while upholding civil rights and privacy laws?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'AI Actors', 'Unanticipated impacts', 'Information integrity', 'Content provenance'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how 'human alternatives' can be implemented in practice. While it is clear that the question seeks practical examples, the term 'human alternatives' is vague and could refer to various concepts such as alternative energy sources, alternative dispute resolution methods, or even alternative lifestyles. To improve clarity and answerability, the question should specify what is meant by 'human alternatives' and the context in which they are to be implemented (e.g., in technology, energy, social systems).", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some real-life examples of how human alternatives can be implemented in practice?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do GAI systems play in augmenting cybersecurity attacks?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Environmental impacts of GAI', 'Harmful bias in AI systems', 'Generative AI energy consumption', 'Disparities in model performance', 'Trustworthy AI characteristics'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the challenges faced by disabled students in virtual or digital testing environments, requiring similar depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to identify and quantify unanticipated impacts of GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 1, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Algorithmic discrimination protections', 'Equitable design', 'Independent evaluation and reporting'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Fair design in automated systems is ensured through proactive and continuous measures to protect individuals and communities from algorithmic discrimination. This includes conducting equity assessments as part of the system design, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Additionally, independent evaluation and reporting, including algorithmic impact assessments and disparity testing results, should be made public whenever possible to confirm these protections.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for concerns raised by panelists regarding the use of technology in the criminal justice system. It is clear in specifying the topic of interest (concerns, panelists, technology, criminal justice system) and seeks detailed information on the concerns. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the concerns raised by panelists regarding the use of technology in the criminal justice system?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about NSF programs that ensure automated systems are safe, trustworthy, and compliant with regulations. It is clear in its intent, specifying the type of programs (NSF) and the criteria (safety, trustworthiness, compliance with regulations). The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account regarding intellectual property when conducting diligence on training data use?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to ensure that automated systems are safe and effective?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What NSF programs ensure automated systems are safe, trustworthy, and compliant with regulations?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential consequences of disparities in model performance for different subgroups or languages in GAI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about the purpose of the NIST AI Risk Management Framework, while the second question is more general and could refer to any framework that aims to boost AI trustworthiness and uphold civil rights and privacy laws. They do not share the same constraints and requirements.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What contributions does the National Institute of Standards and Technology make towards ensuring the safety and trustworthiness of AI, particularly in relation to its risk management frameworks and standards?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about discussions related to positive use cases during meetings conducted by OSTP. It is clear in specifying the topic of interest (positive use cases) and the context (meetings conducted by OSTP). However, it assumes familiarity with the specific meetings and their content without providing additional context or details about which meetings or time frame are being referred to. To improve clarity and answerability, the question could specify the particular meetings or time period of interest, or provide more context about the discussions being referred to.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What were some of the discussions related to positive use cases during the meetings conducted by OSTP?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of an ethical review specifically in the context of using sensitive data. It is clear in its intent, seeking an explanation of the role and importance of ethical reviews when handling sensitive information. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of an ethical review in the context of using sensitive data?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated sentiment analyzer', 'Bias against Jews and gay people', 'Search engine results for minority groups', 'Advertisement delivery systems and stereotypes', 'Algorithmic discrimination in healthcare'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 1, 'structure': 3, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Algorithmic discrimination protections', 'Data privacy', 'Human alternatives', 'Automated systems'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of implementing a supplier risk assessment framework in evaluating third-party entities. It is clear in specifying the topic of interest (supplier risk assessment framework) and the context (evaluating third-party entities). The intent is straightforward, seeking an explanation of the purpose behind this implementation. The question is self-contained and does not rely on external references or additional context to be understood and answered.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What issues does the automated sentiment analyzer address regarding bias in online statements?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of GAI (General Artificial Intelligence) systems in augmenting cybersecurity attacks. It is clear in specifying the topic of interest (GAI systems and cybersecurity attacks) and seeks information on the specific role these systems play. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What considerations are involved in providing human alternatives in automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What ethical and operational concerns do panelists highlight regarding the integration of AI technologies in the criminal justice system, particularly in relation to community impact and democratic values?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for real-life examples of how 'human alternatives' can be implemented in practice. While it is clear that the question seeks practical examples, the term 'human alternatives' is vague and could refer to various concepts such as alternative energy sources, alternative medicine, or even alternative dispute resolution methods. To improve clarity and answerability, the question should specify what is meant by 'human alternatives' and possibly provide a context or domain (e.g., technology, healthcare, environmental science) in which these alternatives are to be considered.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to identify and quantify unanticipated impacts of GAI (General Artificial Intelligence) systems. It is clear in its intent, specifying the type of information sought (measures) and the context (unanticipated impacts of GAI systems). The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What measures are suggested to identify and quantify unanticipated impacts of GAI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the types of research supported by the NSF, focusing on safety and effectiveness of automated systems, while the second question asks specifically about NSF programs ensuring safety and compliance. The scope and requirements differ.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Enhanced protections and restrictions for data and inferences related to sensitive domains, including health, work, education, criminal justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and related inferences should only be used for necessary functions, and you should be protected by ethical review and use prohibitions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations related to intellectual property when conducting diligence on training data use. It is clear in its intent, seeking specific information on intellectual property considerations in the context of training data diligence. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What considerations should be taken into account regarding intellectual property when conducting diligence on training data use?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The NIST AI Risk Management Framework aims to boost AI trustworthiness while upholding civil rights and privacy laws.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of implementing a supplier risk assessment framework in evaluating third-party entities is to assess and monitor their performance and adherence to content provenance standards, detect anomalies and unauthorized changes, manage services acquisition and value chain risks, and ensure legal compliance.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure the safety and effectiveness of automated systems. It is clear in its intent, seeking specific actions or strategies to achieve these goals. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided. However, it could be improved by specifying the type of automated systems (e.g., industrial robots, AI software) to narrow down the scope and provide more targeted answers.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What measures should be taken to ensure that automated systems are safe and effective?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about discussions related to positive use cases during meetings conducted by OSTP. It is clear in specifying the topic of interest (positive use cases) and the context (meetings conducted by OSTP). However, it assumes familiarity with the specific meetings and their content without providing additional context or details about which meetings or time frame are being referred to. To improve clarity and answerability, the question could specify the particular meetings or time period of interest, or provide more context about the discussions being referred to.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question is clear and specific, asking about the contributions of the National Institute of Standards and Technology (NIST) towards ensuring the safety and trustworthiness of AI, with a particular focus on its risk management frameworks and standards. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on NIST's role and efforts in AI safety and trustworthiness.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential consequences of disparities in model performance for different subgroups or languages in GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (disparities in model performance) and the context (different subgroups or languages in GAI systems). The intent is also clear, seeking information on the consequences of these disparities. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential consequences of disparities in model performance for different subgroups or languages in GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Ongoing monitoring', 'Clear organizational oversight', 'High-quality data', 'Governance procedures'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the considerations involved in providing human alternatives in automated systems. It is clear in its intent, seeking information on the factors to consider when integrating human alternatives into automated systems. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What considerations are involved in providing human alternatives in automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What role does NIST play in AI safety and risk management?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Homogenized outputs', 'Model collapse', 'Trustworthy AI Characteristics', 'Automation bias', 'Information integrity'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the ethical and operational concerns highlighted by panelists regarding the integration of AI technologies in the criminal justice system, with a particular focus on community impact and democratic values. It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues that an automated sentiment analyzer addresses concerning bias in online statements. It is clear in specifying the tool of interest (automated sentiment analyzer) and the specific aspect (bias in online statements). The intent is clear, seeking information on the problems related to bias that the tool aims to solve. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What issues does the automated sentiment analyzer address regarding bias in online statements?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What strategies are recommended for engaging with AI Actors to effectively identify and measure unforeseen consequences of GAI systems while ensuring the integrity and authenticity of AI-generated content?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'GAI systems may augment cybersecurity attacks by advancing offensive cyber capabilities such as hacking, malware, and phishing. Reports indicate that large language models (LLMs) can discover vulnerabilities in systems and write code to exploit them. Sophisticated threat actors might develop GAI-powered security co-pilots to inform attackers on how to evade threat detection and escalate privileges after gaining system access.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human subject protection', 'Content provenance', 'Data privacy', 'AI system performance', 'Anonymization techniques'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Reporting expectations', 'Transparency', 'Artificial Intelligence ethics', 'Traffic calming measures', 'AI Risk Management Framework'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for automated systems regarding ongoing monitoring and organizational oversight?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What factors must be evaluated regarding IP and data integrity when assessing the use of training data in AI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI system evaluation', 'Safety risks', 'Harmful bias', 'Data privacy violations', 'GAI system outputs'] +[ragas.testset.evolutions.INFO] seed question generated: "What can lead to model collapse in AI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what must be reviewed to ensure the ethical use of sensitive data affecting rights. It is clear in its intent, seeking information on the review process or criteria for ethical use of sensitive data. The question is independent and does not rely on external references or unspecified contexts. However, it could be improved by specifying the type of sensitive data or the context in which the ethical review is being conducted (e.g., medical data, financial data, research data). Overall, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What concerns do panelists raise about AI in criminal justice and its effects on communities and democracy?" +[ragas.testset.evolutions.INFO] seed question generated: "What criteria are used to measure AI system performance or assurance in deployment settings?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The NSF programs that ensure automated system safety and compliance include the National AI Research Institutes, which support research on safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program, which supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program, which supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program, which supports research on rigorous formal verification and analysis of automated systems and machine learning; and the Designing Accountable Software Systems program, which supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology?" +[ragas.testset.evolutions.INFO] seed question generated: "What steps are suggested to assess harmful bias in the AI system's training data?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What must be reviewed to ensure ethical use of sensitive data affecting rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What factors must be evaluated when integrating human options within automated frameworks as outlined in the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Cyberattacks', 'Intellectual Property', 'Obscene and abusive content', 'CBRN weapons', 'Chemical and biological design tools'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question focuses on the overall role of NIST in the development of AI, while the second question specifically targets NIST's involvement in AI safety and risk management. These inquiries have different depths and breadths.", 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the steps to ensure that automated systems are safe, effective, and free from algorithmic discrimination. It is clear in its intent, seeking specific measures or practices to achieve these goals. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What biases does the automated sentiment analyzer reveal in online expressions, and how do these biases compare to those found in predictive policing systems regarding transparency and accountability?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What steps ensure automated systems are safe, effective, and free from algorithmic discrimination?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with the production and access to obscene and abusive content?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question broadly addresses concerns about technology in the criminal justice system, while the second question specifically focuses on AI and its effects on communities and democracy, indicating a different depth and breadth of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for automated systems in terms of ongoing monitoring and organizational oversight. It is clear in its intent, seeking information on specific aspects of automated systems (monitoring and oversight). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the expectations for automated systems regarding ongoing monitoring and organizational oversight?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors that need to be evaluated concerning IP (Intellectual Property) and data integrity when assessing the use of training data in AI systems. It is clear in its intent, specifying the two main areas of interest (IP and data integrity) and the context (use of training data in AI systems). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for recommended strategies for engaging with AI Actors to identify and measure unforeseen consequences of GAI (General Artificial Intelligence) systems while ensuring the integrity and authenticity of AI-generated content. It is clear in its intent, specifying the need for strategies and the dual focus on unforeseen consequences and content integrity. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about issues arising from GAI (General Artificial Intelligence) model performance differences across languages and subgroups. It is clear in its intent, seeking information on the problems or challenges associated with these performance disparities. The question is independent and does not rely on external references or unspecified contexts. However, it could be improved by specifying what kind of issues are of interest (e.g., ethical, technical, social) to provide a more focused answer.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the steps suggested to assess harmful bias in an AI system's training data. It is clear in its intent, seeking specific steps or methods for bias assessment. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What steps are suggested to assess harmful bias in the AI system's training data?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors that can lead to model collapse in AI systems. It is clear in its intent, seeking information on potential causes of model collapse. The question is independent and does not rely on external references or unspecified contexts. Therefore, it is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What can lead to model collapse in AI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks for the purpose of an ethical review, while the second question asks what to review for ethical use. The first focuses on the 'why' and the second on the 'what', leading to different depths and requirements.", 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the criteria used to measure AI system performance or assurance in deployment settings. It is clear in specifying the topic of interest (criteria for measuring AI system performance or assurance) and the context (deployment settings). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for automated systems regarding safety and effectiveness?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What to consider for IP and data integrity in AI training data?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies help engage AI Actors to assess GAI impacts while maintaining AI content integrity?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What issues arise from GAI model performance differences across languages and subgroups?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology (NIST). It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge beyond what is stated, and it clearly seeks information about the purpose of a specific framework from a specific organization.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the AI Risk Management Framework as described by the National Institute of Standards and Technology?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for measures to ensure safety and effectiveness, while the second question asks what ensures safety and fairness. The difference in focus (effectiveness vs. fairness) leads to different depths and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors to be evaluated when integrating human options within automated frameworks, specifically referencing the AI Bill of Rights. It is clear in its intent to understand the evaluation criteria and the context of the AI Bill of Rights. However, it assumes familiarity with the AI Bill of Rights without providing any details or context about it. To improve clarity and answerability, the question could briefly describe what the AI Bill of Rights entails or specify the particular aspects of it that are relevant to the integration of human options in automated frameworks.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What factors must be evaluated when integrating human options within automated frameworks as outlined in the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'NIST plays a significant role in AI safety and risk management by developing measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence. They are also helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI and have established the U.S. AI Safety Institute and the AI Safety Institute Consortium to build the necessary science for the safe and trustworthy development and use of AI.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential risks associated with the production and access to obscene and abusive content. It is clear in its intent, seeking information on the risks involved. The question is self-contained and does not rely on external references or additional context to be understood. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address considerations related to intellectual property and data integrity in the context of AI training data, requiring similar depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What governance structures and ongoing monitoring practices should be established to ensure the safety and effectiveness of automated systems while addressing public concerns?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for conditions similar to deployment setting(s). Measures are documented.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the issue of performance disparities in GAI systems across different subgroups or languages, requiring similar depth and breadth of analysis.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on identifying and quantifying unanticipated impacts of GAI systems, while the second question is about engaging AI actors to assess GAI impacts while maintaining AI content integrity. These questions have different constraints and requirements, and they explore different aspects of the topic.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Any use of sensitive data or decision processes based in part on sensitive data that might limit rights, opportunities, or access should go through a thorough ethical review and monitoring, both in advance and by periodic review. This may involve an independent ethics committee or a similarly robust process. The ethical review may determine that data should not be used or shared for specific uses even with consent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI technology mapping', 'Legal risks', 'Data privacy', 'Intellectual property', 'Harmful biases', 'Training data use', 'Intellectual property', 'Data privacy risks', 'Content provenance', 'Generative AI (GAI) risks'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What methodologies are recommended for evaluating the presence of harmful bias in AI training data while ensuring the system's overall safety and reliability?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What factors contribute to the phenomenon of model collapse in AI systems, particularly in relation to the reliance on synthetic data and the potential for harmful biases?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What overarching goals does the NIST AI Risk Management Framework aim to achieve in promoting safe and equitable AI practices, particularly in relation to public transparency and ethical standards?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for automated systems in terms of safety and effectiveness. It is clear in its intent, seeking information on the standards or criteria that automated systems should meet regarding these two aspects. The question is independent and does not rely on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Panelists raised concerns about the validity of AI systems used in the criminal justice system, noting that adverse or irrelevant data can lead to a replication of unjust outcomes. They highlighted issues such as confirmation bias and the tendency to defer to potentially inaccurate automated systems. The impact of these systems on individuals and communities is seen as potentially severe, with concerns that they lack individualization, work against the belief that people can change for the better, and can lead to job loss and custody issues. Additionally, surveillance can create chilling effects for communities and send negative signals about how they are viewed. Panelists emphasized that while transparency is important, it is not sufficient for achieving accountability, and they discussed the need for regulation that includes limits on the type and cost of such technologies.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Lisa Feldman Barrett', 'Microsoft Corporation', 'National Association for the Advancement of Colored People', 'University of Michigan Ann Arbor', 'OSTP listening sessions'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 3, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Incident Database', 'Generative AI security flaws', 'Large Language Models', 'Ethical Tensions in Human-AI Companionship', 'Disinformation Business of Chinese Influence Operations'] +[ragas.testset.evolutions.INFO] seed question generated: "What risks are associated with harmful biases in AI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Clear and accessible notice', 'Explanations for decisions', 'Algorithmic impact assessments', 'User experience research'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Automated systems should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring to ensure they are safe and effective. They should be developed with consultation from diverse communities, stakeholders, and domain experts, and should include protective measures to prevent endangering safety. Additionally, independent evaluation and reporting that confirms the system's safety and effectiveness should be performed, with results made public whenever possible.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The potential risks associated with the production and access to obscene and abusive content include eased production of and access to obscene, degrading, and/or abusive imagery, which can cause harm. This includes synthetic child sexual abuse material (CSAM) and nonconsensual intimate images (NCII) of adults.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking for governance structures and ongoing monitoring practices to ensure the safety and effectiveness of automated systems while addressing public concerns. It is specific and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Trustworthy AI', 'Transparency policies', 'Risk management activities', 'Information integrity', 'GAI capabilities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors that need to be evaluated when integrating human options within automated frameworks, specifically as outlined in the AI Bill of Rights. It is clear in its intent, specifying the context (AI Bill of Rights) and the subject of interest (factors for evaluation). The question is self-contained and does not rely on external references beyond the AI Bill of Rights, which is a well-known document in the domain. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role do algorithmic impact assessments play in the expectations for automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does the National Association for the Advancement of Colored People play in advocacy and civil rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What governance and monitoring are needed for safe, effective automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some of the challenges associated with large language models as indicated in the references?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Strategies to engage AI Actors to assess GAI impacts while maintaining AI content integrity include determining context-based measures to identify new impacts, planning regular engagements with AI Actors responsible for inputs to GAI systems, employing methods to trace the origin and modifications of digital content, integrating tools to analyze content provenance, and using structured feedback mechanisms to capture user input about AI-generated content.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of establishing transparency policies for GAI applications?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What factors to consider for human options in automated systems per the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question is clear and specific, asking for recommended methodologies to evaluate harmful bias in AI training data while ensuring the system's overall safety and reliability. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on evaluation methodologies with a focus on bias and system safety.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology in social welfare', 'Fraud detection', 'Digital ID systems', 'Healthcare access and delivery', 'Health disparities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the overarching goals of the NIST AI Risk Management Framework in promoting safe and equitable AI practices, with a particular focus on public transparency and ethical standards. It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors contributing to model collapse in AI systems, with a specific focus on the reliance on synthetic data and the potential for harmful biases. It is clear in its intent, seeking an explanation of the causes of model collapse and the role of synthetic data and biases. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The expectations for automated systems regarding safety and effectiveness include the need for independent evaluation, where evaluators should have access to the system and associated data to perform evaluations. Additionally, entities responsible for automated systems should provide regularly-updated reports that cover an overview of the system, data used for training, risk management assessments, performance testing results, and ongoing monitoring procedures. These reports should be presented in plain language and a machine-readable format.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What methods help assess bias in AI training data while ensuring safety?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the risks associated with harmful biases in AI systems. It is clear in its intent, seeking information on the potential dangers or negative consequences of biases within AI. The question is independent and does not rely on external references or additional context to be understood. It is specific enough to be answerable by someone with domain knowledge in AI ethics or related fields.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What risks are associated with harmful biases in AI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the requirements for monitoring and oversight of automated systems, focusing on governance and ongoing monitoring. They share the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What goals does the NIST AI Risk Management Framework pursue for safe, equitable AI, especially in transparency and ethics?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about biases revealed by an automated sentiment analyzer in online expressions and seeks a comparison with biases in predictive policing systems, specifically regarding transparency and accountability. While the intent is clear, the question assumes familiarity with the specific biases in both systems without providing context or examples. To improve clarity and answerability, the question could specify the types of biases of interest (e.g., racial, gender), provide a brief description of the sentiment analyzer and predictive policing systems, or clarify the criteria for comparison in terms of transparency and accountability.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What biases does the automated sentiment analyzer reveal in online expressions, and how do these biases compare to those found in predictive policing systems regarding transparency and accountability?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What leads to model collapse in AI, especially with synthetic data and biases?" +[ragas.testset.evolutions.INFO] seed question generated: "What concerns were raised by panelists regarding healthcare access and delivery in relation to new technologies?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI incidents', 'AI Actors', 'Incident reporting', 'Documentation practices', 'AI risk management'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about considerations for human alternatives in automated systems, but the second question specifically references the AI Bill of Rights, adding a specific constraint not present in the first question.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Ethical use of AI systems', 'Department of Energy AI Advancement Council', 'Artificial Intelligence Ethical Principles', 'National Science Foundation research', 'Pretrial risk assessments transparency'] +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account regarding data privacy when deploying a GAI system?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of algorithmic impact assessments in the expectations for automated systems. It is clear in specifying the topic of interest (algorithmic impact assessments) and the context (expectations for automated systems). The intent is to understand the significance or influence of these assessments on automated systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role do algorithmic impact assessments play in the expectations for automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does incident reporting play in improving GAI risk management across the AI ecosystem?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of establishing transparency policies for GAI (General Artificial Intelligence) applications. It is clear in its intent, seeking an explanation of the rationale behind such policies. The question is specific and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What requirements have been established regarding transparency for pretrial risk assessments in Idaho?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about challenges associated with large language models as indicated in 'the references', but it does not specify or include these references within the question itself. This makes the question unclear and dependent on external information that is not provided. To improve clarity and answerability, the question should either specify the references directly within the question or be framed in a way that does not rely on unspecified external documents. For example, it could ask about general challenges associated with large language models based on common knowledge or specific studies that are well-known in the field.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some of the challenges associated with large language models as indicated in the references?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek methods to assess bias in AI training data, with an emphasis on safety and harmful bias. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The second question specifies additional factors such as synthetic data and biases, which introduces a broader scope and more specific constraints compared to the first question.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Safety Institute established by NIST?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions pertain to the NIST AI Risk Management Framework, the first question asks for the general purpose of the framework, whereas the second question specifically focuses on goals related to safety, equity, transparency, and ethics. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about concerns raised by panelists regarding healthcare access and delivery in relation to new technologies. It is clear in specifying the topic of interest (concerns, healthcare access and delivery, new technologies) and seeks detailed information on the concerns raised. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What concerns were raised by panelists regarding healthcare access and delivery in relation to new technologies?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Ongoing monitoring', 'Clear organizational oversight', 'High-quality data', 'Governance procedures'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of establishing transparency policies for GAI applications is to document the origin and history of training data and generated data, which advances digital content transparency while balancing the proprietary nature of training approaches.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for data privacy when deploying a GAI (General Artificial Intelligence) system. It is clear in its intent, seeking specific information on data privacy aspects related to GAI deployment. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What considerations should be taken into account regarding data privacy when deploying a GAI system?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the requirements established for transparency in pretrial risk assessments in Idaho. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What requirements have been established regarding transparency for pretrial risk assessments in Idaho?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the connections between harmful biases in AI and risks such as data privacy or misinformation. It is clear in its intent, seeking to understand the relationship between these specific issues. The question is self-contained and does not rely on external references or prior knowledge beyond general understanding of AI, biases, data privacy, and misinformation. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'White House Office of Science and Technology Policy', 'Automated systems', 'Civil rights and democratic values', 'National security and defense activities'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the key components of ongoing monitoring for automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of harms caused by algorithmic bias in automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of incident reporting in improving GAI (General Artificial Intelligence) risk management across the AI ecosystem. It is clear in specifying the topic of interest (incident reporting) and the context (GAI risk management within the AI ecosystem). The intent is to understand the impact or contribution of incident reporting to risk management, which is straightforward and does not rely on external references or unspecified contexts. Therefore, the question is specific, independent, and has a clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role does incident reporting play in improving GAI risk management across the AI ecosystem?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about challenges associated with large language models as indicated in 'the references', but it does not specify or include these references within the question. This makes the question dependent on external documents that are not provided, leading to ambiguity and lack of independence. To improve clarity and answerability, the question should either specify the references directly within the question or be reframed to ask about general challenges associated with large language models without relying on unspecified references.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about biases revealed by an automated sentiment analyzer in online expressions and seeks a comparison with biases in predictive policing systems, specifically regarding transparency and accountability. While the intent is clear, the question assumes familiarity with the specific biases in both systems without providing context or examples. To improve clarity and answerability, the question could specify the types of biases of interest (e.g., racial, gender), provide a brief description of the sentiment analyzer and predictive policing systems, or clarify the aspects of transparency and accountability being compared.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 3 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Safety Institute established by NIST. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific institute.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between harmful biases in AI and risks like data privacy or misinformation?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the relationship between algorithmic impact assessments and expectations for automated system transparency. It is clear in specifying the two concepts of interest (algorithmic impact assessments and automated system transparency) and seeks to understand their connection. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Model collapse in AI can occur when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. This phenomenon threatens the robustness of the model overall and can lead to homogenized outputs, amplifying any homogenization from the model used to generate the synthetic training data.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Bill of Rights as outlined by the White House Office of Science and Technology Policy?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Ongoing monitoring', 'Clear organizational oversight', 'High-quality data', 'Governance procedures'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What issues did panelists identify regarding the intersection of new technologies and healthcare access, particularly in terms of equity and community involvement?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The NIST AI Risk Management Framework aims to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) to realize its full commercial and societal benefits without harm to people or the planet. It also supports the development of safe, secure, and trustworthy AI, emphasizing transparency and ethical considerations in its implementation.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of the National Association for the Advancement of Colored People (NAACP) in advocacy and civil rights. It is clear in specifying the organization of interest (NAACP) and the areas of focus (advocacy and civil rights). The intent is straightforward, seeking information on the organization's functions and contributions in these areas. The question is self-contained and does not rely on external references or prior knowledge beyond a basic understanding of what the NAACP is.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does the National Association for the Advancement of Colored People play in advocacy and civil rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key components of ongoing monitoring for automated systems?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What is the link between algorithmic impact assessments and expectations for automated system transparency?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What stipulations exist in Idaho's legislation regarding the transparency and validation of pretrial risk assessments, and how do these align with federal principles for ethical AI use?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Unethical behavior', 'Text-to-image models', 'Data privacy', 'Sensitive information', 'Harmful recommendations'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['OSTP', 'Artificial intelligence', 'Biometric technologies', 'Request For Information (RFI)', 'Public comments'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Race correction in clinical algorithms', 'Algorithmic impact assessment', 'Racial equity', 'Algorithmic bias detection', 'Property appraisal and valuation equity'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the general risks associated with harmful biases in AI systems, while the second question specifically links harmful AI biases to data privacy and misinformation risks. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key components of ongoing monitoring for automated systems. It is clear in its intent, seeking specific information about the elements involved in monitoring such systems. The question is independent and does not rely on external references or additional context to be understood. Therefore, it is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the key components of ongoing monitoring for automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for examples of harms caused by algorithmic bias in automated systems. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge and clearly seeks information on the negative impacts of algorithmic bias.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are some examples of harms caused by algorithmic bias in automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Predictive policing system', 'Gun violence risk assessment', 'Watch list transparency', 'System flaws in benefit allocation', 'Lack of explanation for decisions', 'Automated systems', 'Notice and explanation', 'Impact on lives', 'Opaque decision-making', 'Algorithmic risk assessment'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Bill of Rights as outlined by the White House Office of Science and Technology Policy. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge beyond what is stated, and it clearly seeks information about the purpose of a specific document.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the AI Bill of Rights as outlined by the White House Office of Science and Technology Policy?" +[ragas.testset.evolutions.INFO] seed question generated: "What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the AI Safety Institute established by NIST is to continue efforts to build the science necessary for safe, secure, and trustworthy development and use of artificial intelligence (AI), in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the Action Plan to Advance Property Appraisal and Valuation Equity?" +[ragas.testset.evolutions.INFO] seed question generated: "What risks to privacy are associated with the use of GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of the lack of explanation for decisions made by automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the benefits of documenting GAI (General Artificial Intelligence) incidents for AI actors in the context of risk management. It is clear in its intent, seeking specific information about the advantages of such documentation. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the role of algorithmic impact assessments in setting expectations for automated systems, while the second question is concerned with the relationship between algorithmic impact assessments and transparency in automated systems. These are related but distinct inquiries with different depths and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the factors related to human knowledge and security that must be documented for GAI (General Artificial Intelligence) deployment. It is clear in specifying the topic of interest (GAI deployment) and the aspects to be documented (human knowledge and security factors). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. However, it could be improved by specifying what is meant by 'human knowledge' in this context, as it might be interpreted in various ways (e.g., expertise, ethical considerations).", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What influence does the National Association for the Advancement of Colored People exert on civil rights advocacy, particularly in relation to the governance of emerging technologies as highlighted by recent public engagements?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues identified by panelists concerning the intersection of new technologies and healthcare access, with a focus on equity and community involvement. It is specific in its scope (new technologies and healthcare access) and clear in its intent (issues related to equity and community involvement). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key components of ongoing monitoring for automated systems. It is clear in its intent, seeking specific information about the elements involved in monitoring such systems. The question is independent and does not rely on external references or additional context to be understood. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the key components of ongoing monitoring for automated systems?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What benefits arise from documenting GAI incidents for AI Actors in risk mgmt?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What factors related to human knowledge and security must be documented for GAI deployment?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness', 'Automated systems', 'Algorithmic discrimination', 'Independent evaluation', 'Algorithmic impact assessment', 'Public accountability'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What challenges did panelists see at the tech-healthcare equity intersection?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What ongoing procedures and stakeholder engagements are essential for ensuring the safety and effectiveness of automated systems throughout their lifecycle?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What are notable instances where automated systems have caused harm due to algorithmic bias, particularly in relation to safety violations or discriminatory impacts?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not explicitly link harmful AI biases to data privacy or misinformation risks. However, it mentions risks such as harmful biases, data privacy, and misinformation in separate sections, indicating that these issues are recognized but not directly connected in the provided text.', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the stipulations in Idaho's legislation concerning the transparency and validation of pretrial risk assessments and how these align with federal principles for ethical AI use. It is clear in specifying the topic of interest (Idaho's legislation, pretrial risk assessments, federal principles for ethical AI use) and seeks detailed information on both the state-level stipulations and their alignment with federal principles. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What precautions should be taken when using derived data sources in automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What guiding principles were established by the White House OSTP to ensure the protection of civil rights in the deployment of automated systems, and how were these principles shaped by public input?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the Request For Information (RFI) issued by the Office of Science and Technology Policy (OSTP) regarding biometric technologies. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of the RFI.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What are Idaho's rules on pretrial risk assessment transparency and their alignment with federal ethical AI standards?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the purpose of the 'Action Plan to Advance Property Appraisal and Valuation Equity'. It is specific and clear in its intent, seeking information about the objective of a particular action plan. The question is self-contained and does not rely on external references or prior knowledge beyond the name of the action plan itself, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the Action Plan to Advance Property Appraisal and Valuation Equity?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What ongoing procedures and stakeholder engagements are essential for ensuring the safety and effectiveness of automated systems throughout their lifecycle?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the risks to privacy associated with the use of GAI (General Artificial Intelligence) systems. It is specific in its focus on privacy risks and does not rely on external references or unspecified contexts. The intent is clear, seeking information on potential privacy issues related to GAI systems. Therefore, the question is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What risks to privacy are associated with the use of GAI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses specifically on data privacy considerations, while the second question encompasses a broader range of knowledge and security factors, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the role of incident reporting in improving GAI risk management across the AI ecosystem, implying a broader and more systemic inquiry. The second question asks about the perks of logging GAI incidents for AI risk management, which is narrower in scope and focuses on specific benefits.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address issues at the intersection of technology and healthcare, the first question focuses on concerns about access and delivery, whereas the second question is broader, addressing challenges at the tech-healthcare equity intersection. This difference in focus leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of the lack of explanation for decisions made by automated systems. It is clear in its intent, seeking information on the consequences or effects of this lack of explanation. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided. However, it could be improved by specifying the type of automated systems (e.g., AI, machine learning models) to narrow down the scope and provide a more focused answer.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation', 'GAI systems', 'Digital content transparency', 'Harmful bias', 'Content provenance', 'AI system trustworthiness'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the influence of the National Association for the Advancement of Colored People (NAACP) on civil rights advocacy, with a specific focus on the governance of emerging technologies as highlighted by recent public engagements. It is clear in specifying the organization (NAACP) and the areas of interest (civil rights advocacy and governance of emerging technologies). However, the phrase 'recent public engagements' is somewhat vague and could benefit from more specificity. For improved clarity, the question could specify particular events, statements, or initiatives by the NAACP related to emerging technologies.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the suggested actions to address confabulation in GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the ongoing procedures and stakeholder engagements necessary to ensure the safety and effectiveness of automated systems throughout their lifecycle. It is clear in its intent, specifying the focus on safety and effectiveness and the lifecycle of automated systems. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses solely on the requirements for transparency in pretrial risk assessments in Idaho, while the second question also includes an inquiry into how these rules align with federal ethical AI standards, adding an additional layer of complexity.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for notable instances where automated systems have caused harm due to algorithmic bias, specifically focusing on safety violations or discriminatory impacts. It is clear in its intent, specifying the type of harm (safety violations or discriminatory impacts) and the cause (algorithmic bias). The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does the NAACP impact civil rights in tech governance?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What insights did OSTP aim to gather through the RFI on biometric tech, and what types of stakeholders were involved in the feedback process?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What key processes and stakeholder interactions ensure automated systems' safety and effectiveness?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What objectives does the PAVE initiative aim to achieve in relation to racial equity and the valuation disparities affecting marginalized communities?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of bias and discrimination in automated systems on the rights of the American public?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for precautions to be taken when using derived data sources in automated systems. It is clear in specifying the topic of interest (precautions, derived data sources, automated systems) and seeks detailed information on safety or best practices. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What multifaceted privacy threats arise from GAI systems, particularly concerning data usage and potential misuse by individuals?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the ongoing procedures and stakeholder engagements necessary to ensure the safety and effectiveness of automated systems throughout their lifecycle. It is clear in its intent, specifying the focus on safety and effectiveness and the lifecycle of automated systems. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the guiding principles established by the White House OSTP to ensure the protection of civil rights in the deployment of automated systems and how these principles were shaped by public input. It is clear in specifying the topic of interest (guiding principles by the White House OSTP) and seeks detailed information on both the principles and the influence of public input. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "Any examples of harm from algorithmic bias in automated systems, especially regarding safety or discrimination?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Logging GAI incidents can facilitate smoother sharing of information with relevant AI Actors, empower them in responding to and managing AI incidents, and improve GAI risk management across the AI ecosystem. It also aids in documenting and reviewing third-party inputs and plugins, which is crucial for incident disclosure.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions several knowledge and security factors for GAI deployment, including the need to document the extent of human domain knowledge employed to improve GAI system performance, verify sources and citations in GAI system outputs, track instances of anthropomorphization in GAI system interfaces, verify GAI system training data and TEVV data provenance, and regularly review security and safety guardrails, especially in novel circumstances.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What key processes and stakeholder interactions ensure automated systems' safety and effectiveness?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What principles did the White House OSTP set for civil rights in automated systems, and how was public input involved?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the components of ongoing monitoring for automated systems, while the second question addresses key processes and stakeholder interactions to ensure safety and effectiveness. These questions differ in both scope and depth.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks about the overall role of the NAACP in advocacy and civil rights, while the second question specifically focuses on the NAACP's impact on civil rights within the context of tech governance. These questions have different scopes and depths of inquiry.", 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Panelists identified several challenges at the tech-healthcare equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense of health monitoring devices, which can exacerbate equity issues. Additionally, they discussed how racial biases and the use of race in medicine perpetuate harms and embed prior discrimination, emphasizing the need for accountability of the technologies used in medical care and the importance of hearing the voices of those subjected to these technologies.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The lack of explanation for decisions made by automated systems can lead to several implications, including the inability for individuals to understand or contest decisions that affect their lives. For instance, applicants may not know why their resumes were rejected, defendants may be unaware if their bail decisions are influenced by an automated system labeling them as 'high risk', and individuals may face difficulties in correcting errors or contesting decisions due to a lack of transparency. This opacity can result in unaccountable decision-making processes and can hinder the public's ability to trust the validity and reasonable use of automated systems.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to address confabulation in GAI (General Artificial Intelligence) systems. It is clear in specifying the issue of interest (confabulation) and the context (GAI systems), and it seeks actionable recommendations. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the insights that the OSTP aimed to gather through the RFI on biometric tech and the types of stakeholders involved in the feedback process. It is clear in specifying the topic of interest (OSTP, RFI on biometric tech) and seeks detailed information on both the insights and the stakeholders. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data broker exposes social media profiles', 'Facial recognition technology', 'Surveillance technology', 'Virtual testing and disabled students', 'New surveillance technologies and disability discrimination', 'Digital surveillance', 'Reproductive health clinics', 'Private equity firms', 'Facial recognition ban', 'User privacy protection'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the objectives of the PAVE initiative in relation to racial equity and valuation disparities affecting marginalized communities. It is clear in specifying the initiative (PAVE) and the areas of interest (racial equity and valuation disparities). The intent is clear, seeking information on the goals of the PAVE initiative. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated system', 'Plain language documentation', 'System functioning', 'Outcome explanations', 'User notification'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of bias and discrimination in automated systems on the rights of the American public. It is specific in its focus on bias and discrimination within automated systems and their impact on a particular group (the American public). The intent is clear, seeking an explanation of the consequences of these issues on rights. The question is independent and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the components of ongoing monitoring, while the second question addresses processes and stakeholder interactions for ensuring safety and effectiveness. These questions differ in both scope and depth.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Idaho's rules on pretrial risk assessment transparency require that any pretrial risk assessment be shown to be free of bias against any class of individuals protected from discrimination by state or federal law. Additionally, any locality using a pretrial risk assessment must formally validate the claim of it being free of bias, and all documents, records, and information used to build or validate the risk assessment must be open to public inspection. However, the context does not provide specific information on how these rules align with federal ethical AI standards.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the multifaceted privacy threats arising from GAI (General Artificial Intelligence) systems, specifically focusing on data usage and potential misuse by individuals. It is clear in its intent to explore privacy threats and specifies the areas of concern (data usage and misuse). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of the AI Bill of Rights, while the second question asks about the principles set for civil rights in automated systems and the involvement of public input. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek examples of harms caused by algorithmic bias in automated systems, with the second question specifying areas like safety or discrimination. However, the core inquiry and constraints are similar.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Opt out', 'Human alternatives', 'Automated systems', 'Human consideration', 'Sensitive domains'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What insights did OSTP seek from the biometric tech RFI, and who provided feedback?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Disinformation and misinformation', 'Generative AI models', 'Information security risks', 'Cybersecurity attacks'] +[ragas.testset.evolutions.INFO] seed question generated: "What are some concerns associated with the use of surveillance technology in various sectors?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What goals does PAVE have for racial equity and valuing marginalized communities?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What should users be notified about regarding automated systems that impact them?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do GAI systems play in augmenting cybersecurity attacks?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be in place to ensure human alternatives and consideration in the use of automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Key processes and stakeholder interactions that ensure automated systems' safety and effectiveness include ongoing monitoring procedures, clear organizational oversight, consultation with the public during various phases of development, extensive testing before deployment, and proactive risk identification and mitigation. These processes involve continuous evaluation of performance metrics, involvement of organizational stakeholders, engagement with diverse impacted communities, and adherence to domain-specific best practices for testing.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What privacy risks come from GAI systems regarding data use and misuse?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Sensitive data', 'Sensitive domains', 'Surveillance technology', 'Underserved communities'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Biometric Information Privacy Act', 'Transparency for machine learning systems', 'Adverse action notices', 'Explainable AI systems', 'California warehouse employee quotas'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses solely on the purpose of the RFI issued by OSTP regarding biometric technologies, while the second question asks for both the insights sought and the sources of feedback, indicating a broader scope.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Risk assessment', 'Explanatory mechanisms', 'Transparency in decision-making', 'Summary reporting'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting and reproducing existing unwanted inequities. These outcomes can undermine civil rights and democratic values, which are foundational American principles.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the overall purpose of the Action Plan to Advance Property Appraisal and Valuation Equity, while the second question specifically targets the goals related to racial equity and valuing marginalized communities. These questions have different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What initiatives are being taken to promote transparency for machine learning systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key considerations outlined in the AI Bill of Rights regarding sensitive data and underserved communities?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Precautions that should be taken when using derived data sources in automated systems include careful tracking and validation of derived data, as it is viewed as potentially high-risk and may lead to feedback loops, compounded harm, or inaccurate results. Such data should be validated against the risk of collateral consequences.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about concerns associated with the use of surveillance technology in various sectors. It is clear in its intent, seeking information on potential issues or drawbacks of surveillance technology across different fields. The question is independent and does not rely on external references or specific prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are some concerns associated with the use of surveillance technology in various sectors?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Algorithmic discrimination protections', 'Data privacy', 'Human alternatives', 'Automated systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What should be included in the design of explanatory mechanisms for automated systems in high-risk settings?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Key processes and stakeholder interactions that ensure automated systems' safety and effectiveness include ongoing monitoring procedures, clear organizational oversight, consultation with the public during various phases of development, extensive testing before deployment, and proactive risk identification and mitigation. These processes involve continuous evaluation of performance metrics, involvement of organizational stakeholders, engagement with diverse impacted communities, and adherence to domain-specific best practices for testing.", 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about privacy risks associated with GAI systems, focusing on data use and misuse. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 4 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about measures to ensure human alternatives and consideration in the use of automated systems. It is clear in its intent, seeking specific measures or guidelines. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of GAI (General Artificial Intelligence) systems in augmenting cybersecurity attacks. It is clear in specifying the topic of interest (GAI systems and cybersecurity attacks) and seeks information on the specific role these systems play. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what users should be notified about regarding automated systems that impact them. It is clear in its intent, seeking information on the types of notifications or disclosures that should be provided to users. The question is independent and does not rely on external references or unspecified contexts. However, it could be improved by specifying the types of automated systems (e.g., AI algorithms, automated decision-making systems) or the nature of the impact (e.g., privacy, decision outcomes) to provide more context and focus for the answer.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the key considerations regarding data privacy in the context of the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The Blueprint for an AI Bill of Rights includes five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It was developed through extensive consultation with the American public, which involved a year-long process of seeking and distilling input from impacted communities, industry stakeholders, technology developers, and policymakers. This public engagement included panel discussions, public listening sessions, and a formal request for information, allowing various voices to shape the principles aimed at preventing algorithmic and data-driven harms.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The suggested actions to address confabulation in GAI systems include: 1) Avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments (MS-2.5-001). 2) Review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities (MS-2.5-003). 3) Evaluate GAI system performance in real-world scenarios to observe its behavior in practical environments and reveal issues that might not surface in controlled and optimized testing environments (MS-4.2-002).', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Synthetic training data', 'Model collapse', 'Environmental impact', 'GAI systems', 'Carbon capture programs'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Transparency artifacts', 'Explainable AI (XAI)', 'Pre-trained models', 'Harmful bias', 'Content filters'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI lifecycle', 'AI technology risks', 'Organizational practices for AI', 'Impact documentation process', 'Content provenance methodologies'] +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to assess the environmental impact of AI model training and management activities?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Identity theft', 'Facial recognition system', 'Surveillance software', 'Employee discussions about union activity'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about initiatives to promote transparency in machine learning systems. It is clear in its intent, seeking information on specific actions or programs aimed at enhancing transparency in this field. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What initiatives are being taken to promote transparency for machine learning systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of reviewing transparency artifacts in the context of third-party models?" +[ragas.testset.evolutions.INFO] seed question generated: "What organizational practices are necessary for enabling AI testing and incident identification?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Public consultation', 'Testing and deployment', 'Risk identification and mitigation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key considerations outlined in the AI Bill of Rights regarding sensitive data and underserved communities. It is clear in specifying the document of interest (AI Bill of Rights) and the specific aspects (sensitive data and underserved communities) it seeks information about. The intent is clear, and the question is independent as it does not rely on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the key considerations outlined in the AI Bill of Rights regarding sensitive data and underserved communities?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data broker exposes social media profiles', 'Facial recognition technology', 'Surveillance technology', 'Virtual testing and disabled students', 'New surveillance technologies and disability discrimination', 'Digital surveillance', 'Reproductive health clinics', 'Private equity firms', 'Facial recognition ban', 'User privacy protection'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'OSTP sought insights on the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. Feedback was provided by 130 organizations and individuals, including Accenture, ACLU, Google, Microsoft Corporation, and many others.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for elements that should be included in the design of explanatory mechanisms for automated systems in high-risk settings. It is clear in specifying the topic (explanatory mechanisms) and the context (automated systems in high-risk settings), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What should be included in the design of explanatory mechanisms for automated systems in high-risk settings?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of how data privacy principles aim to protect against identity theft?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some concerns associated with digital surveillance as highlighted in recent articles?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key considerations for testing and deployment of automated systems to ensure their safety and effectiveness?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key considerations regarding data privacy within the context of the AI Bill of Rights. It is clear in specifying the topic of interest (data privacy) and the context (AI Bill of Rights), making the intent straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Users should be notified about the use of automated systems, the individual or organization responsible for the system, significant use case or key functionality changes, and how and why an outcome impacting them was determined by the automated system.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'GAI systems may augment cybersecurity attacks by advancing offensive cyber capabilities such as hacking, malware, and phishing. Reports indicate that large language models (LLMs) can discover vulnerabilities in systems and write code to exploit them. Sophisticated threat actors might develop GAI-powered security co-pilots to inform attackers on how to evade threat detection and escalate privileges after gaining system access.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about issues arising from surveillance technology in sectors like education and healthcare. It is clear in specifying the sectors of interest (education and healthcare) and the general topic (issues from surveillance technology). The intent is to understand the problems or challenges associated with the use of surveillance tech in these specific sectors. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Measures to ensure human alternatives and consideration in the use of automated systems include the ability to opt out from automated systems in favor of a human alternative where appropriate, access to timely human consideration and remedy through a fallback and escalation process if an automated system fails, and ensuring that human consideration and fallback are accessible, equitable, effective, and maintained. Additionally, automated systems in sensitive domains should be tailored to their purpose, provide meaningful access for oversight, include training for people interacting with the system, and incorporate human consideration for adverse or high-risk decisions.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What measures are being implemented to ensure the public is informed about the use of automated systems in decision-making processes, particularly regarding their rights and opportunities?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['OSTP', 'Artificial intelligence', 'Biometric technologies', 'Request For Information (RFI)', 'Public comments'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to assess the environmental impact of AI model training and management activities. It is specific in its focus on environmental impact and AI model training and management, making the intent clear. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What issues arise from surveillance tech in sectors like education and healthcare?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What critical factors regarding sensitive data protection and the rights of historically marginalized groups are emphasized in the AI Bill of Rights?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are outlined in the Blueprint for an AI Bill of Rights to protect the rights of the American public?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of reviewing transparency artifacts in the context of third-party models. It is clear in specifying the topic of interest (transparency artifacts, third-party models) and seeks information on the purpose of this review. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human subject protection', 'Content provenance', 'Data privacy', 'AI system performance', 'Anonymization techniques'] +[ragas.testset.evolutions.INFO] seed question generated: "What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What elements must be integrated into the design of automated systems in high-risk environments to ensure clear, valid, and accessible explanations of decisions made by these systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for examples of how data privacy principles protect against identity theft. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on the application of data privacy principles in the context of identity theft protection.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are some examples of how data privacy principles aim to protect against identity theft?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Accessibility standards', 'Disparity assessment', 'Algorithmic discrimination', 'Ongoing monitoring and mitigation'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI models', 'Synthetic NCII and CSAM', 'Trustworthy AI Characteristics', 'Value Chain and Component Integration', 'GAI risks management'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the organizational practices necessary for enabling AI testing and incident identification. It is clear in its intent, seeking specific practices related to AI testing and incident identification within an organization. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What organizational practices are necessary for enabling AI testing and incident identification?" +[ragas.testset.evolutions.INFO] seed question generated: "What criteria are used to measure AI system performance or assurance in deployment settings?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key considerations in testing and deploying automated systems to ensure their safety and effectiveness. It is clear in its intent, specifying the focus on safety and effectiveness, and does not rely on external references or unspecified contexts. The question is self-contained and understandable, making it specific and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken during disparity assessment of automated systems to ensure inclusivity and fairness?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about concerns associated with surveillance technology in various sectors, which is broader than the second question that focuses specifically on education and healthcare sectors.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What are the challenges associated with value chain and component integration in GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Clear and accessible notice', 'Explanations for decisions', 'Algorithmic impact assessments', 'User experience research'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the measures being implemented to inform the public about the use of automated systems in decision-making processes, with a focus on their rights and opportunities. It is clear in its intent, specifying the topic of interest (public information measures, automated systems, decision-making processes) and the specific aspects of interest (rights and opportunities). The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What instances illustrate how principles of data privacy mitigate risks associated with identity theft in the context of pervasive surveillance and data collection practices?" +[ragas.testset.evolutions.INFO] seed question generated: "What role do algorithmic impact assessments play in the expectations for automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the measures outlined in the 'Blueprint for an AI Bill of Rights' to protect the rights of the American public. It is specific in its request for information about the measures and clearly identifies the document of interest ('Blueprint for an AI Bill of Rights'). The intent is clear, and the question is self-contained, not relying on external references or prior knowledge beyond the named document. Therefore, it meets the criteria for clarity and answerability.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What measures are outlined in the Blueprint for an AI Bill of Rights to protect the rights of the American public?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What organizational strategies must be implemented to facilitate effective AI testing and incident reporting while ensuring comprehensive risk communication and feedback integration?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of reviewing transparency artifacts in the context of third-party models is to ensure information integrity, security, and effective value chain and component integration.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps are taken to inform the public about automated decision-making and their rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for the elements that need to be integrated into the design of automated systems in high-risk environments to ensure clear, valid, and accessible explanations of decisions made by these systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is also clear, seeking detailed information on design elements for explanation purposes in high-risk environments.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the Request For Information (RFI) issued by the Office of Science and Technology Policy (OSTP) regarding biometric technologies. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the critical factors related to sensitive data protection and the rights of historically marginalized groups as emphasized in the AI Bill of Rights. It is specific in its focus on two key areas (sensitive data protection and rights of marginalized groups) and clearly identifies the document of interest (AI Bill of Rights). The intent is clear, seeking information on particular aspects of the AI Bill of Rights. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the criteria used to measure AI system performance or assurance in deployment settings. It is clear in specifying the topic of interest (criteria for measuring AI system performance or assurance) and seeks detailed information on these criteria. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What elements ensure clear explanations in automated systems for high-risk environments?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The concerns with surveillance technology in education and healthcare include its invasive nature, potential for discrimination, and the disproportionate harm it may cause to disabled individuals. Specifically, new surveillance technologies can monitor students in ways that may violate their privacy and exacerbate existing inequalities, particularly for those with disabilities.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The suggested measures to assess the environmental impact of AI model training and management activities include: 1) Assessing safety to physical environments when deploying GAI systems, 2) Documenting anticipated environmental impacts of model development, maintenance, and deployment in product design decisions, 3) Measuring or estimating environmental impacts such as energy and water consumption for training, fine-tuning, and deploying models, and verifying trade-offs between resources used at inference time versus additional resources required at training time, and 4) Verifying the effectiveness of carbon capture or offset programs for GAI training and applications, while addressing green-washing concerns.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Key considerations for testing and deployment of automated systems to ensure their safety and effectiveness include extensive testing before deployment, following domain-specific best practices, considering the roles of human operators, mirroring real-world conditions during testing, comparing system performance with existing human-driven procedures, and identifying and mitigating potential risks proactively. Testing should include both automated and human-led testing, and decision possibilities should include the option of not deploying the system if performance does not meet standards.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to be taken during the disparity assessment of automated systems to ensure inclusivity and fairness. It is clear in specifying the topic of interest (disparity assessment, automated systems) and the desired outcome (inclusivity and fairness). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for concerns associated with digital surveillance as highlighted in recent articles. It is clear in specifying the topic of interest (concerns with digital surveillance) and the source of information (recent articles). However, it does not specify which articles or provide any context about the articles, making it somewhat vague. To improve clarity and answerability, the question could specify the type of concerns (e.g., privacy, security, ethical issues) or mention specific articles or sources if known.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some concerns associated with digital surveillance as highlighted in recent articles?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the challenges associated with value chain and component integration in GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (value chain and component integration) and the context (GAI systems). The intent is to understand the challenges, which is straightforward and unambiguous. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What key points on data protection and marginalized rights are in the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on promoting transparency in machine learning systems, while the second question is about informing the public about automated decision-making and their rights. These are related but distinct topics, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What key principles and actionable practices are proposed in the Blueprint for an AI Bill of Rights to ensure the safeguarding of civil liberties and democratic values for the American populace?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Trustworthy AI', 'Transparency policies', 'Risk management activities', 'Information integrity', 'GAI capabilities'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What insights did OSTP aim to gather through the RFI on biometric tech, and what types of stakeholders were involved in the feedback process?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for instances that illustrate how principles of data privacy mitigate risks associated with identity theft in the context of pervasive surveillance and data collection practices. It is clear in specifying the topic of interest (data privacy principles, identity theft, pervasive surveillance, and data collection practices) and seeks specific examples or instances. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the components necessary for effective explanatory mechanisms in automated systems within high-risk settings, sharing the same constraints, requirements, depth, and breadth.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI model explanation', 'GAI risks', 'Privacy risk assessment', 'Data provenance', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about organizational strategies for effective AI testing, incident reporting, risk communication, and feedback integration. It is clear in its intent, specifying the areas of interest (AI testing, incident reporting, risk communication, feedback integration) and the desired outcome (effective facilitation). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Disinformation and misinformation', 'Generative AI models', 'Information security risks', 'Cybersecurity attacks'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of algorithmic impact assessments in the expectations for automated systems. It is clear in specifying the topic of interest (algorithmic impact assessments) and the context (expectations for automated systems). The intent is to understand the significance or function of these assessments within the given context. The question is self-contained and does not rely on external references or prior knowledge beyond general understanding of the terms used. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-enabled systems', 'Technological diffusion', 'Urban planning', 'Criminal justice system', 'Predictive policing', 'Artificial Intelligence and Democratic Values', 'Non-discriminatory technology', 'Explainable AI', 'Community participation', 'Social welfare systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of establishing transparency policies for GAI applications?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What examples show how data privacy principles reduce identity theft risks amid widespread surveillance?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for conditions similar to deployment setting(s). Measures are documented.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What org strategies help with AI testing, incident reporting, and risk communication?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some suggested actions to address GAI risks in AI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions focus on the AI Bill of Rights and its key points regarding data protection and the rights of underserved or marginalized communities. They share the same constraints and requirements, as well as a similar depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with generative AI models in the context of disinformation and cybersecurity?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of technological diffusion in the context of integrating AI technologies within communities?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI system evaluation', 'Safety risks', 'Harmful bias', 'Data privacy violations', 'GAI system outputs'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'During disparity assessment of automated systems, measures should include testing using a broad set of measures to assess whether the system components produce disparities. The demographics of the assessed groups should be as inclusive as possible, covering aspects such as race, color, ethnicity, sex, religion, age, national origin, disability, and other classifications protected by law. The assessment should include demographic performance measures, overall and subgroup parity assessment, and calibration. Additionally, demographic data collected for disparity assessment should be separated from data used for the automated system, and privacy protections should be instituted.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Reporting expectations', 'Transparency', 'Artificial Intelligence ethics', 'Traffic calming measures', 'AI Risk Management Framework'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek examples of how data privacy principles protect against identity theft, with a focus on the context of widespread surveillance in the second question. However, the core inquiry and requirements remain the same.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 5 times +[ragas.testset.evolutions.INFO] seed question generated: "What steps are suggested to assess harmful bias in the AI system's training data?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Steps taken to inform the public about automated decision-making and their rights include written notice provided by private entities in Illinois regarding the use of biometric information, federal laws requiring lenders to notify consumers about adverse actions related to credit decisions, and California laws mandating that warehouse employees receive written descriptions of quotas. Additionally, major technology companies are developing frameworks for transparency in machine learning systems, and federal agencies are conducting research on explainable AI systems to ensure that the public understands how automated systems impact their rights and opportunities.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Homogenized outputs', 'Model collapse', 'Trustworthy AI Characteristics', 'Automation bias', 'Information integrity'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on organizational practices specifically for AI testing and incident identification, while the second question includes a broader scope by adding incident reporting and risk communication. This difference in scope affects the depth and breadth of the inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for key principles and actionable practices proposed in the 'Blueprint for an AI Bill of Rights' to safeguard civil liberties and democratic values for the American populace. It is specific in its request for principles and practices, and it clearly identifies the document of interest ('Blueprint for an AI Bill of Rights'). The intent is clear, seeking detailed information on how the document aims to protect civil liberties and democratic values. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the principles of artificial intelligence ethics as outlined for the intelligence community?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the insights that the OSTP aimed to gather through the RFI on biometric tech and the types of stakeholders involved in the feedback process. It is clear in specifying the topic of interest (OSTP, RFI on biometric tech) and seeks detailed information on both the insights and the stakeholders. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for concerns associated with digital surveillance as highlighted in recent articles. It is clear in specifying the topic of interest (concerns with digital surveillance) and the source of information (recent articles). However, it does not specify which articles or provide any context about the articles, making it somewhat dependent on external references. To improve clarity and answerability, the question could specify the articles or the key themes discussed in those articles, or alternatively, frame the question in a way that does not rely on unspecified external sources.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of establishing transparency policies for GAI (General Artificial Intelligence) applications. It is clear in specifying the topic of interest (transparency policies for GAI applications) and seeks information on the rationale behind these policies. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of establishing transparency policies for GAI applications?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Challenges associated with value chain and component integration in GAI systems include the improper acquisition or vetting of third-party components such as datasets, pre-trained models, and software libraries, which can lead to diminished transparency and accountability. The scale of training data may be too large for humans to vet, and the difficulty of training foundation models can result in extensive reuse of a limited number of models. Additionally, it may be difficult to attribute issues in a system's behavior to any one of these sources, and errors in third-party GAI components can have downstream impacts on accuracy and robustness.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What can lead to model collapse in AI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What principles in the AI Bill of Rights protect U.S. civil liberties?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to address GAI (General Artificial Intelligence) risks in AI systems. It is clear in its intent, seeking specific actions or strategies to mitigate risks associated with GAI. The question is independent and does not rely on external references or prior knowledge beyond a basic understanding of AI and GAI risks. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Sensitive data', 'Ethical review', 'Data quality', 'Access limitations'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Artificial Intelligence Decisionmaking', 'Biometric Information Privacy Act', 'Model Cards for Model Reporting', 'Adverse Action Notice Requirements', 'Explainable Artificial Intelligence (XAI)'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the potential risks associated with generative AI models in the context of disinformation and cybersecurity. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on risks, which allows for a direct and relevant response.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What insights did OSTP seek from the biometric tech RFI, and who provided feedback?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of technological diffusion in the context of integrating AI technologies within communities. It is clear in specifying the topic of interest (technological diffusion) and the context (integrating AI technologies within communities). The intent is to understand the importance or impact of this diffusion process. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI lifecycle', 'AI technology risks', 'Organizational practices for AI', 'Impact documentation process', 'Content provenance methodologies'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Algorithmic discrimination protections', 'Data privacy', 'Human alternatives', 'Automated systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of an ethical review in the context of using sensitive data?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of Explainable Artificial Intelligence (XAI) as referenced by DARPA?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Stakeholder meetings', 'Private sector and civil society', 'Positive use cases', 'Potential harms and oversight'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the steps suggested to assess harmful bias in an AI system's training data. It is clear in its intent, seeking specific steps or methods for bias assessment. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What steps are suggested to assess harmful bias in the AI system's training data?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key oversight functions involved in the GAI lifecycle?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role do transparency policies play in mitigating risks associated with GAI applications while ensuring compliance with legal and ethical standards?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Organizational strategies that help with AI testing, incident reporting, and risk communication include establishing policies for measuring the effectiveness of content provenance methodologies, identifying the minimum set of criteria necessary for GAI system incident reporting, and verifying information sharing and feedback mechanisms regarding any negative impact from GAI systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the protective measures or principles in the AI Bill of Rights aimed at safeguarding the rights or civil liberties of the American public, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information integrity', 'Human-AI configuration', 'Digital content transparency', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses solely on the purpose of the RFI issued by OSTP regarding biometric technologies, while the second question asks for both the insights sought and the sources of feedback, indicating a broader scope.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the principles of artificial intelligence ethics as outlined for the intelligence community. It is clear in specifying the topic of interest (principles of AI ethics) and the context (intelligence community). The intent is straightforward, seeking specific information about ethical guidelines. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the principles of artificial intelligence ethics as outlined for the intelligence community?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of human-AI configuration in managing GAI risks and ensuring information integrity?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors that can lead to model collapse in AI systems. It is clear in its intent, seeking information on potential causes of model collapse. The question is independent and does not rely on external references or unspecified contexts. Therefore, it is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What can lead to model collapse in AI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of automated systems mentioned in the technical companion to the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Some suggested actions to address GAI risks in AI systems include: applying and documenting ML explanation results such as analysis of embeddings, counterfactual prompts, gradient-based attributions, model compression/surrogate models, and occlusion/term reduction. Additionally, documenting GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-generated content', 'Real-time auditing tools', 'User feedback mechanisms', 'Synthetic data', 'Incident response and recovery plans'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Fairness in Artificial Intelligence', 'Automatic signature verification', 'Ballot curing', 'Digital divide in unemployment benefits', 'Racial equity and underserved communities'] +[ragas.testset.evolutions.INFO] seed question generated: "What topics were discussed regarding potential harms and oversight in the development of the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What methodologies are recommended for evaluating the presence of harmful bias in AI training data while ensuring the system's overall safety and reliability?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of an ethical review specifically in the context of using sensitive data. It is clear in its intent, seeking an explanation of the role and importance of ethical reviews when handling sensitive data. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of an ethical review in the context of using sensitive data?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Technological diffusion is significant in the context of integrating AI technologies within communities as it emphasizes the importance of thoughtful and responsible development and integration of technology. Panelists noted that examining how technological diffusion has worked in urban planning can provide lessons on balancing ownership rights, use rights, and community health, safety, and welfare, ensuring better representation of all voices, especially those traditionally marginalized by technological advances.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of Explainable Artificial Intelligence (XAI) as referenced by DARPA. It is clear in specifying the topic of interest (XAI) and the source of reference (DARPA), making the intent clear and specific. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of Explainable Artificial Intelligence (XAI) as referenced by DARPA?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What ethical guidelines for AI usage are established for the intelligence sector, and how do they align with NIST's standards for safe and transparent AI development?" +[ragas.testset.evolutions.INFO] seed question generated: "What issue does the digital divide in unemployment benefits highlight in relation to access for individuals?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of using real-time auditing tools in the context of AI-generated data?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key oversight functions involved in the GAI (General Artificial Intelligence) lifecycle. It is clear in specifying the topic of interest (oversight functions in the GAI lifecycle) and seeks detailed information on these functions. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the key oversight functions involved in the GAI lifecycle?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The potential risks associated with generative AI models in the context of disinformation include the ease of producing or disseminating false, inaccurate, or misleading content at scale, both unintentionally (misinformation) and deliberately (disinformation). GAI systems can enable malicious actors to create targeted disinformation campaigns, generate realistic deepfakes, and produce compelling imagery and propaganda. In terms of cybersecurity, GAI models may lower barriers for offensive capabilities, expand the attack surface, and assist in discovering vulnerabilities and writing exploit code, thereby augmenting cybersecurity attacks such as hacking, malware, and phishing.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of transparency policies in mitigating risks associated with GAI (General Artificial Intelligence) applications while ensuring compliance with legal and ethical standards. It is clear in specifying the topic of interest (transparency policies, GAI applications) and seeks detailed information on both risk mitigation and compliance with standards. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What factors contribute to the phenomenon of model collapse in AI systems, particularly in relation to the reliance on synthetic data and the potential for harmful biases?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do transparency policies help manage GAI risks and ensure compliance?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'OSTP sought insights on the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. Feedback was provided by 130 organizations and individuals, including Accenture, ACLU, Google, Microsoft Corporation, and many others.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What actions can be taken to prevent the harms associated with automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['TEVV metrics', 'Measurement error models', 'GAI system risks', 'Feedback processes', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of human-AI configuration in managing GAI (General Artificial Intelligence) risks and ensuring information integrity. It is clear in specifying the topic of interest (human-AI configuration) and the areas of concern (GAI risks and information integrity). The intent is clear, seeking an explanation of the importance or impact of this configuration. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does an ethical review play in ensuring that the use of sensitive data aligns with established privacy protections and minimizes potential risks?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the topics discussed concerning potential harms and oversight in the development of the AI Bill of Rights. It is specific and clear in its intent, seeking information on particular aspects (potential harms and oversight) related to a defined subject (AI Bill of Rights). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question is clear and specific, asking for recommended methodologies to evaluate harmful bias in AI training data while ensuring the system's overall safety and reliability. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on evaluation methodologies with a focus on safety and reliability.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does DARPA's XAI play in addressing the challenges posed by opaque AI decision-making in various sectors?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for examples of automated systems mentioned in the technical companion to the AI Bill of Rights. While it specifies the source (technical companion to the AI Bill of Rights) and the type of information sought (examples of automated systems), it assumes access to this specific document without providing its content or context. This reliance on an external reference makes the question unclear for those who do not have access to or are unfamiliar with the document. To improve clarity and answerability, the question could include a brief description or context of the technical companion or frame the question in a way that does not rely on specific, unpublished documents.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are some examples of automated systems mentioned in the technical companion to the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What methods help assess bias in AI training data while ensuring safety?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Safe and effective systems', 'Automated systems', 'Pre-deployment testing', 'Risk identification and mitigation', 'Independent evaluation'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issue highlighted by the digital divide in the context of access to unemployment benefits for individuals. It is clear in its intent, seeking to understand the specific problem related to access caused by the digital divide. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What issue does the digital divide in unemployment benefits highlight in relation to access for individuals?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the ethical guidelines for AI usage in the intelligence sector and their alignment with NIST's standards for safe and transparent AI development. It is clear in specifying the topic of interest (ethical guidelines for AI in the intelligence sector) and the comparison criteria (alignment with NIST's standards). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for the purpose of transparency policies, while the second question focuses on how these policies help manage risks and ensure compliance. These questions have different requirements and depths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of using real-time auditing tools specifically in the context of AI-generated data. It is clear in specifying the topic of interest (real-time auditing tools) and the context (AI-generated data), making the intent straightforward and understandable. The question is self-contained and does not rely on external references or additional context to be answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of using real-time auditing tools in the context of AI-generated data?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to ensure that automated systems are safe and effective?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Digital content transparency', 'Harmful bias', 'Content provenance', 'AI system trustworthiness'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the oversight roles that span from GAI (General Artificial Intelligence) problem formulation to system decommission. It is clear in its intent, seeking information on the various oversight roles involved throughout the lifecycle of a GAI system. The question is specific and does not rely on external references or context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors contributing to model collapse in AI systems, with a specific focus on the reliance on synthetic data and the potential for harmful biases. It is clear in its intent, seeking an explanation of the causes of model collapse and the role of synthetic data and biases. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What are the AI ethics for intel and their alignment with NIST standards?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for actions to prevent harms associated with automated systems. It is clear in its intent, seeking specific preventive measures. The question is independent and does not rely on external references or unspecified contexts. It is broad but understandable and answerable with sufficient domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What actions can be taken to prevent the harms associated with automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the role of an ethical review in the context of using sensitive data. It seeks to understand how ethical reviews ensure alignment with privacy protections and risk minimization. The intent is unambiguous, and the question is self-contained, not relying on external references or prior knowledge beyond general understanding of ethical reviews and privacy protections.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of harmful bias in the context of GAI systems and how can it be addressed?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What causes model collapse in AI, especially with synthetic data and biases?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek methods to assess bias in AI training data, with an emphasis on safety and harmful bias. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions that some meetings focused on providing ideas related to the development of the Blueprint for an AI Bill of Rights, and others provided useful general context on the positive use cases, potential harms, and/or oversight possibilities for these technologies. However, specific topics discussed regarding potential harms and oversight are not detailed in the provided context.', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What oversight roles span from GAI problem formulation to system decommission?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The significance of human-AI configuration in managing GAI risks and ensuring information integrity lies in its role in evaluating content lineage and origin, adapting training programs for digital content transparency, and delineating human proficiency tests from GAI capabilities. It also involves continual monitoring of human-GAI configurations and engaging end-users in prototyping and testing activities to address various scenarios, including crisis situations and ethically sensitive contexts.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What challenges does the reliance on digital platforms for unemployment benefits reveal about equitable access for marginalized individuals?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does an ethical review help protect sensitive data?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of DARPA's XAI (Explainable Artificial Intelligence) in addressing challenges related to opaque AI decision-making across various sectors. It is clear in specifying the topic (DARPA's XAI) and the context (challenges of opaque AI decision-making), and it seeks information on the impact or role of XAI in different sectors. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role do real-time auditing tools play in ensuring the authenticity and tracking of AI-generated content while also facilitating effective monitoring and response to system performance issues?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of creating measurement error models for pre-deployment metrics within the context of TEVV (Test, Evaluation, Verification, and Validation) processes. It is clear in specifying the topic of interest (measurement error models, pre-deployment metrics, TEVV processes) and seeks an explanation of the purpose behind this practice. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Transparency policies help manage GAI risks by establishing processes for documenting the origin and history of training data and generated data for GAI applications. This promotes digital content transparency while balancing the proprietary nature of training approaches, thereby ensuring compliance with data privacy, information integrity, and intellectual property standards.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the principles of AI ethics specifically for the intelligence community, while the second question asks about AI ethics for intelligence and their alignment with NIST standards, introducing an additional requirement.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does DARPA's XAI tackle opaque AI decision-making challenges?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The second question specifies additional factors such as synthetic data and biases, which introduces a narrower and more detailed scope compared to the first question.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive measures can be implemented to ensure automated systems are both safe and effective while preventing potential harms, including those arising from unintended uses or algorithmic discrimination?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Notice and explanation', 'Impact on lives', 'Opaque decision-making', 'Algorithmic risk assessment'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for examples of automated systems mentioned in the technical companion to the AI Bill of Rights. It is clear in specifying the source (technical companion to the AI Bill of Rights) and the type of information sought (examples of automated systems). However, it assumes access to the technical companion document, which is not provided within the question. To improve clarity and answerability, the question could include a brief description or context of the technical companion or specify the type of automated systems of interest (e.g., healthcare, finance).', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Sensitive domains', 'Predictive analytics', 'Student data collection', 'Employee data transfer'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure the safety and effectiveness of automated systems. It is clear in its intent, seeking specific actions or strategies. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided. However, it could be improved by specifying the type of automated systems (e.g., industrial robots, AI software) to narrow down the scope and provide more targeted answers.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the oversight functions or roles throughout the GAI lifecycle, requiring similar depth and breadth of information.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the role of an ethical review in the context of sensitive data, focusing on its purpose and protective measures. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of automated systems on individuals' rights and opportunities?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with the transfer of employee data to third-party job verification services?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some of the potential harms associated with the use of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of harmful bias in the context of GAI (General Artificial Intelligence) systems and how it can be addressed. It is clear in specifying the topic of interest (harmful bias in GAI systems) and seeks detailed information on both the implications and potential solutions. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the implications of harmful bias in the context of GAI systems and how can it be addressed?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the challenges that reliance on digital platforms for unemployment benefits reveals regarding equitable access for marginalized individuals. It is clear in its intent, specifying the focus on challenges and equitable access for a particular group (marginalized individuals). The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Equitable design', 'Automated systems', 'Legal protections', 'Proactive equity assessments'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI risks management', 'Risk response options', 'Model release approaches', 'Information security', 'Harmful bias mitigation'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated system', 'Plain language documentation', 'System functioning', 'Outcome explanations', 'User notification'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks for the purpose of XAI as referenced by DARPA, while the second question focuses on how DARPA's XAI addresses opaque AI decision-making challenges. These questions have different requirements and depths of inquiry.", 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Unemployment benefits system', 'Fraud detection system', 'Access to pain medication', 'Automated performance evaluation', 'Human alternatives'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What issues arise for marginalized groups with digital unemployment benefits?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to protect individuals from algorithmic discrimination?" +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when determining model release approaches?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of real-time auditing tools in ensuring the authenticity and tracking of AI-generated content, as well as their role in monitoring and responding to system performance issues. It is clear in specifying the tools of interest (real-time auditing tools) and the aspects it wants to explore (authenticity, tracking, monitoring, and response). The intent is clear and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What should designers and developers provide to ensure transparency about the functioning of an automated system?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for proactive measures to ensure the safety and effectiveness of automated systems while preventing potential harms, including unintended uses and algorithmic discrimination. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking strategies or actions that can be implemented to address the mentioned concerns.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Model collapse in AI can occur when model training over-relies on synthetic data, leading to data points disappearing from the distribution of the new model's outputs. This threatens the robustness of the model overall and can result in homogenized outputs, amplifying any homogenization from the model used to generate the synthetic training data.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential issues associated with automated performance evaluation in the workplace?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the biases that need to be documented when creating measurement error models for pre-deployment metrics. It is clear in its intent, seeking specific information about biases in the context of measurement error models and pre-deployment metrics. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To ensure that automated systems are safe and effective, measures should include consultation with diverse communities, stakeholders, and domain experts to identify concerns and risks. Systems should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring. These measures should demonstrate safety and effectiveness based on intended use, mitigate unsafe outcomes, and adhere to domain-specific standards. Additionally, independent evaluation and reporting should confirm safety and effectiveness, with results made public whenever possible.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What strategies can be employed to mitigate harmful bias in GAI systems while ensuring the accuracy and integrity of the data used throughout their lifecycle?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do real-time auditing tools help with AI content authenticity and system monitoring?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the potential risks associated with transferring employee data to third-party job verification services. It does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. The intent is clear, seeking information on the risks involved in this data transfer process.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential risks associated with the transfer of employee data to third-party job verification services?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps can ensure automated systems are safe and effective, avoiding harms like misuse or bias?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Fairness and bias', 'Systemic bias assessment', 'GAI system outputs', 'Harmful bias and homogenization', 'Training data bias'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What biases must be documented when creating measurement error models for pre-deployment metrics?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about potential harms associated with the use of automated systems. It is clear in its intent, seeking information on the negative impacts or risks of automated systems. The question is independent and does not rely on external references or additional context to be understood. It is specific enough to be answerable by someone with domain knowledge in automated systems or related fields.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are some of the potential harms associated with the use of automated systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address issues related to digital unemployment benefits and access, particularly for marginalized groups. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the implications of automated systems on individuals' rights and opportunities. It is clear in its intent, seeking information on the effects of automation on specific aspects of human life (rights and opportunities). The question is broad but understandable and does not rely on external references or context. However, it could be improved by specifying the type of automated systems (e.g., AI, robotics) or the context (e.g., workplace, legal systems) to narrow down the scope and provide a more focused answer.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the implications of automated systems on individuals' rights and opportunities?" +[ragas.testset.evolutions.INFO] seed question generated: "What methods are suggested for conducting a systemic bias assessment in GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Automated systems', 'Timely human consideration', 'Fallback and escalation process', 'Sensitive domains'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated sentiment analyzer', 'Bias against Jews and gay people', 'Search engine results for minority groups', 'Advertisement delivery systems and stereotypes', 'Algorithmic discrimination in healthcare'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for determining model release approaches. It is clear in its intent, seeking information on factors to consider when deciding how to release a model. The question is independent and does not rely on external references or unspecified contexts. It is specific enough to be understood and answered by someone with domain knowledge in model release strategies.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What considerations should be taken into account when determining model release approaches?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek measures to prevent harms associated with automated systems, including misuse or bias, and require similar depth and breadth in the response.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what designers and developers should provide to ensure transparency about the functioning of an automated system. It is clear in its intent, seeking specific information on the measures or elements necessary for transparency in automated systems. The question is independent and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What should designers and developers provide to ensure transparency about the functioning of an automated system?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the purpose of using real-time auditing tools specifically for AI-generated data, while the second question asks about how these tools help with AI content authenticity and system monitoring. The scope and requirements differ.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to protect individuals from algorithmic discrimination. It is clear in its intent, seeking specific actions or strategies to address a well-defined issue (algorithmic discrimination). The question is independent and does not rely on external references or prior knowledge beyond a general understanding of algorithmic discrimination. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What measures should be taken to protect individuals from algorithmic discrimination?" +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.evolutions.INFO] seed question generated: "What issues does the automated sentiment analyzer address regarding bias in online statements?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information integrity', 'Human-AI configuration', 'Digital content transparency', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about potential issues associated with automated performance evaluation in the workplace. It is clear in its intent, seeking information on the drawbacks or challenges of using automated systems for performance evaluation. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the potential issues associated with automated performance evaluation in the workplace?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for the purpose of creating measurement error models in the context of TEVV processes, which is a broader inquiry. The second question specifically asks about biases to note in these models, which is a narrower and more specific aspect.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What are the risks and unintended consequences associated with automated systems that may arise from their design, data reliance, and deployment practices?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of digital content transparency in the context of GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for strategies to mitigate harmful bias in GAI (General Artificial Intelligence) systems while ensuring the accuracy and integrity of the data used throughout their lifecycle. It is clear in specifying the topic of interest (mitigating bias in GAI systems) and the dual focus on both bias mitigation and data integrity. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What effects do automated systems have on individual rights and opportunities, and how do current laws and practices address the need for transparency in their decision-making processes?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['TEVV metrics', 'Measurement error models', 'GAI system risks', 'Feedback processes', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for methods suggested for conducting a systemic bias assessment in GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (systemic bias assessment in GAI systems) and seeks detailed information on the methods used. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What methods are suggested for conducting a systemic bias assessment in GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the risks associated with third-party job verification using employee data, specifically considering potential misuse. It is clear in its intent, seeking information on the risks involved. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What factors should be evaluated when selecting model release strategies, considering both risk management approaches and feedback mechanisms from organizational oversight?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What challenges arise from the reliance on automated performance evaluations in workplaces, particularly regarding the absence of human oversight and the potential for biased outcomes?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the suggested actions to evaluate potential biases and stereotypes related to harmful bias and homogenization in AI-generated content?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions documenting biases or statistical variance in applied metrics or structured human feedback processes, particularly when modeling complex societal constructs such as hateful content. However, it does not specify particular biases to note for pre-deployment measurement error models.', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What risks arise from third-party job verification using employee data, considering potential misuse?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Real-time auditing tools aid in the tracking and validation of the lineage and authenticity of AI-generated data, which is essential for ensuring the integrity and reliability of the content produced by AI systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for proactive steps to ensure equitable design and mitigate algorithmic discrimination effects. It is clear in its intent, seeking specific actions or strategies to address these issues. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key elements that should be included in documentation to clarify the impact of an automated system. It is specific in its intent, seeking information on documentation elements, and does not rely on external references or context. The question is clear and self-contained, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for using automated systems in sensitive domains. It is clear in its intent, seeking information on factors to consider, and does not rely on external references or unspecified contexts. The question is specific and independent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies can reduce bias in GAI while maintaining data accuracy?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the risks and unintended consequences associated with automated systems, specifically focusing on aspects of their design, data reliance, and deployment practices. It is clear in its intent to understand potential negative outcomes related to these systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided. The specificity regarding design, data reliance, and deployment practices helps in narrowing down the scope of the answer.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What proactive steps ensure equitable design and mitigate algorithmic discrimination effects?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues that an automated sentiment analyzer addresses concerning bias in online statements. It is clear in specifying the topic of interest (automated sentiment analyzer) and the specific aspect (bias in online statements). The intent is clear, seeking information on the problems related to bias that the sentiment analyzer aims to solve. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What issues does the automated sentiment analyzer address regarding bias in online statements?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key elements must be included in docs for clarity on an automated system's impact?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of digital content transparency in the context of GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (digital content transparency) and the context (GAI systems), making the intent clear and understandable. The question does not rely on external references or unspecified contexts, making it self-contained and independent. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the significance of digital content transparency in the context of GAI systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What combined strategies are recommended for assessing systemic bias in GAI systems while ensuring equitable outputs across diverse demographic groups?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What risks come with automated systems due to design and data use?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Technical protections', 'Rights of the American public', 'Implementation of principles'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the risks associated with transferring employee data to third-party job verification services, sharing the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the effects of automated systems on individual rights and opportunities and how current laws and practices address the need for transparency in their decision-making processes. It is clear in specifying the topic of interest (automated systems, individual rights, opportunities, transparency) and seeks detailed information on both the effects and the legal/practical responses. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'AI Bill of Rights', 'Civil rights and liberties', 'Equal opportunities', 'Access to critical resources'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 2, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the challenges associated with relying on automated performance evaluations in workplaces. It highlights two particular concerns: the absence of human oversight and the potential for biased outcomes. The intent is clear, and the question is self-contained, not requiring additional context or external references to be understood and answered. It effectively conveys its purpose and allows for a direct and relevant response.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for factors to be evaluated when selecting model release strategies, with a focus on risk management approaches and feedback mechanisms from organizational oversight. It is clear in specifying the topic of interest (model release strategies) and the aspects to consider (risk management and feedback mechanisms). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What impact do automated systems have on rights, and how are transparency needs met by current laws?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to evaluate potential biases and stereotypes related to harmful bias and homogenization in AI-generated content. It is clear in specifying the topic of interest (evaluation of biases and stereotypes in AI-generated content) and seeks actionable information. The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the suggested actions to evaluate potential biases and stereotypes related to harmful bias and homogenization in AI-generated content?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'When using automated systems in sensitive domains, considerations should include tailoring the systems to their intended purpose, providing meaningful access for oversight, ensuring training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the issue of algorithmic discrimination and bias, focusing on measures and steps to mitigate these problems. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What issues come from using automated performance reviews without human oversight?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the implications of harmful bias in GAI systems and how it can be addressed, which involves understanding the consequences and potential solutions. The second question focuses specifically on strategies to reduce bias while maintaining data accuracy, which is narrower in scope. Therefore, they do not share the same depth and breadth of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the potential negative impacts of automated systems, focusing on risks and harms. They share similar constraints and requirements, as well as depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 3 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What biases does the automated sentiment analyzer reveal in online expressions, and how do these biases compare to issues found in predictive policing systems regarding transparency and fairness?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Confabulation', 'Generative AI systems', 'False content', 'Statistical prediction', 'Risks of confabulated content'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Preventable harms', 'Ethics review', 'Sepsis prediction model', 'Algorithmic bias'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question focuses on what designers and developers should provide to ensure transparency about the functioning of an automated system, while the second question asks about key elements that ensure clarity in documentation about an automated system's impact. The focus and requirements differ, leading to different depths and breadths of inquiry.", 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What roles do training and feedback mechanisms play in enhancing the understanding of digital content transparency within GAI systems, particularly regarding societal impacts and content provenance?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What factors to consider for model release strategies regarding risk and oversight?" +[ragas.testset.evolutions.INFO] seed question generated: "What is confabulation in the context of generative AI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of harms caused by algorithmic bias in automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Executive Order 13960', 'Trustworthy Artificial Intelligence', 'AI Bill of Rights', 'NIST AI Risk Management Framework', 'Stakeholder engagement'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Sensitive domains', 'Human oversight', 'Algorithmic discrimination', 'Meaningful access'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for combined strategies to assess systemic bias in GAI (General Artificial Intelligence) systems and ensure equitable outputs across diverse demographic groups. It is clear in its intent, specifying the need for strategies that address both assessment of bias and equity in outputs. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the potential issues of automated performance evaluation in the workplace, specifically focusing on the lack of human oversight. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 3 times +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What methodologies are recommended for assessing biases and stereotypes in AI-generated content while ensuring effective feedback mechanisms from diverse user communities?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question focuses on the implications of automated systems on individuals' rights and opportunities, while the second question addresses the impact on rights and the transparency needs met by current laws. The second question introduces an additional dimension of legal transparency, leading to different depths and breadths of inquiry.", 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies help assess bias in GAI for fair outputs across demographics?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Bill of Rights in relation to the Executive Order on trustworthy artificial intelligence?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to avoid algorithmic discrimination in automated systems used within sensitive domains?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the measures proposed in the 'Blueprint for an AI Bill of Rights' to protect the rights of the American public. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information about the proposed measures in the specified document.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for examples of automated systems that should be included in the Blueprint for an AI Bill of Rights. It is clear in its intent, seeking specific examples of automated systems, and does not rely on external references or prior knowledge beyond understanding what automated systems and the AI Bill of Rights are. The question is self-contained and understandable.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are some examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions focus on the considerations for model release strategies, including aspects like risk and oversight, sharing the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Key elements that ensure clarity in documentation about an automated system's impact include providing generally accessible plain language documentation, clear descriptions of the overall system functioning and the role of automation, timely updates about significant use case or key functionality changes, and explanations of outcomes that are clear, timely, and accessible.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Risk assessment', 'Explanatory mechanisms', 'Transparency in decision-making', 'Summary reporting'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI risks', 'Human-AI interactions', 'Disinformation impact', 'Risk management resources', 'Trustworthy AI Characteristics'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Policies and procedures for human-AI configurations', 'Oversight of GAI systems', 'Risk measurement processes', 'Human-AI configuration', 'Threat modeling for GAI systems'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for examples of harms caused by algorithmic bias in automated systems. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge and clearly seeks information on the negative impacts of algorithmic bias.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for an explanation of 'confabulation' specifically within the context of generative AI systems. It is clear in specifying the term of interest (confabulation) and the context (generative AI systems), making the intent straightforward and understandable. The question is self-contained and does not rely on external references or additional context to be answered. Therefore, it meets the criteria for clarity and answerability.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is confabulation in the context of generative AI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the roles of training and feedback mechanisms in enhancing the understanding of digital content transparency within GAI (General Artificial Intelligence) systems, with a focus on societal impacts and content provenance. It is specific in its scope (training and feedback mechanisms, digital content transparency, societal impacts, content provenance) and clear in its intent, seeking an explanation of how these mechanisms contribute to the understanding of transparency. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about biases revealed by an automated sentiment analyzer in online expressions and seeks a comparison of these biases with issues found in predictive policing systems, specifically regarding transparency and fairness. The intent is clear, as it specifies the areas of interest (biases, transparency, fairness) and the systems to be compared (sentiment analyzer, predictive policing). However, it assumes familiarity with the specific biases and issues in both systems without providing context or examples. To improve clarity and answerability, the question could benefit from briefly describing the types of biases or issues typically associated with these systems or specifying particular aspects of transparency and fairness to be compared.', 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What biases does the automated sentiment analyzer reveal in online expressions, and how do these biases compare to issues found in predictive policing systems regarding transparency and fairness?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek methods or strategies for assessing bias in General Artificial Intelligence (GAI) systems to ensure fairness across demographics. They share the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What role does transparency in decision-making play in the design of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information sharing and feedback mechanisms', 'AI impact assessment', 'Organizational policies', 'Third-party rights'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with human-AI interactions?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of engaging in threat modeling for GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking for recommended methodologies to assess biases and stereotypes in AI-generated content and how to ensure effective feedback mechanisms from diverse user communities. It is specific and does not rely on external references or prior knowledge not included in the question itself. The question is self-contained and understandable, making it answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do training and feedback improve understanding of digital content transparency in GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What types of automated systems, impacting civil rights, equal opportunities, and access to essential services, should be included in the AI Bill of Rights framework?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The Blueprint for an AI Bill of Rights proposes a set of five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It includes expectations for automated systems, practical steps for implementation, and emphasizes transparency through reporting to ensure that rights, opportunities, and access are respected.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role do organizational policies play in addressing AI risks associated with third-party entities?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the purpose of the AI Bill of Rights in relation to the Executive Order on trustworthy artificial intelligence. It is clear in specifying the two documents of interest (AI Bill of Rights and Executive Order on trustworthy AI) and seeks information on their relationship. The intent is clear, and the question is self-contained, not relying on external references or additional context. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated systems significantly impact rights by determining opportunities in various areas such as employment and credit, often without individuals being aware of the algorithms influencing decisions. Current laws, such as the Biometric Information Privacy Act in Illinois, require written notice when biometric information is used, and federal laws like the Fair Credit Reporting Act mandate that consumers receive adverse action notices when credit is denied. These laws aim to ensure transparency and provide individuals with the knowledge necessary to contest decisions made by automated systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to avoid algorithmic discrimination in automated systems used within sensitive domains. It is clear in its intent, seeking specific actions or strategies to address a well-defined issue (algorithmic discrimination) within a specified context (sensitive domains). The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of algorithmic discrimination and automated systems. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What measures should be taken to avoid algorithmic discrimination in automated systems used within sensitive domains?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What methods work for evaluating biases in AI content with diverse user feedback?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'White House Office of Science and Technology Policy', 'Automated systems', 'Civil rights and democratic values', 'National security and defense activities'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the role of the White House Office of Science and Technology Policy in relation to the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of transparency in decision-making within the context of designing automated systems. It is clear in specifying the topic of interest (transparency in decision-making) and the context (design of automated systems). The intent is to understand the impact or importance of transparency in this specific area, making it understandable and answerable without needing additional context or external references.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does transparency in decision-making play in the design of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential risks associated with human-AI interactions. It is clear in its intent, seeking information on the risks involved in interactions between humans and AI. The question is independent and does not rely on external references or additional context to be understood. It is specific enough to be answerable by someone with domain knowledge in AI and human-computer interaction.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the potential risks associated with human-AI interactions?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about biases revealed by an automated sentiment analyzer in online expressions and seeks a comparison of these biases with issues found in predictive policing systems, specifically regarding transparency and fairness. The intent is clear, aiming to explore both the nature of biases in sentiment analysis and their parallels with predictive policing. However, the question assumes familiarity with the specific biases in both domains without providing context or examples. To improve clarity and answerability, the question could benefit from specifying the types of biases or providing a brief description of the known issues in both sentiment analysis and predictive policing systems.', 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account regarding data privacy when deploying a GAI system?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of engaging in threat modeling for GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (threat modeling for GAI systems) and seeks an explanation of the purpose behind this activity. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of engaging in threat modeling for GAI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on evaluating potential biases and stereotypes specifically related to harmful bias and homogenization in AI-generated content, while the second question is broader, asking about methods for evaluating biases in AI content with diverse user feedback. The depth and breadth of the inquiries differ.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for types of automated systems that impact civil rights, equal opportunities, and access to essential services, which should be included in the AI Bill of Rights framework. It is clear in its intent, specifying the areas of impact and the context of the AI Bill of Rights framework. The question is self-contained and does not rely on external references or prior knowledge not provided within the question itself. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Examples of harms caused by algorithmic bias in automated systems include: 1) A proprietary model predicting sepsis in hospitalized patients that underperformed and caused alert fatigue by falsely alerting likelihood of sepsis. 2) An automated moderation system on social media that silenced Black people who quoted and criticized racist messages, failing to distinguish their counter speech from the original hateful messages. 3) A device meant to help track lost items being misused by stalkers to track victims' locations, despite manufacturer attempts to implement safety measures. 4) An algorithm used for police deployment that sent officers to neighborhoods they regularly visited, rather than those with the highest crime rates, due to a feedback loop from previous data and predictions.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of organizational policies in addressing AI risks associated with third-party entities. It is clear in specifying the topic of interest (organizational policies) and the context (AI risks with third-party entities). The intent is straightforward, seeking an explanation of how these policies mitigate or manage such risks. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role do organizational policies play in addressing AI risks associated with third-party entities?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for a specific term that describes the generation of false content by GAI (General Artificial Intelligence) that misleads users into trusting it. The intent is clear, seeking a specific term related to a well-defined concept. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The Blueprint for an AI Bill of Rights is consistent with the Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the Federal Government, which requires federal agencies to adhere to nine principles when using AI.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Fairness in Artificial Intelligence', 'Automatic signature verification', 'Ballot curing', 'Digital divide in unemployment benefits', 'Racial equity and underserved communities'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the significance of digital content transparency in GAI systems, while the second question is about how training and feedback improve understanding of this transparency. They have different constraints and requirements, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What automated systems affecting civil rights and access to services belong in the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Automated systems', 'Bias testing', 'Equitable design', 'Systemic biases'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What term describes GAI's generation of false content that misleads users into trusting it?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What significance does the clarity and accessibility of decision-making explanations hold in the context of automated systems, particularly regarding risk assessment and user understanding?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key human oversight measures that prevent algorithmic discrimination in sensitive automated systems. It is clear in specifying the topic of interest (human oversight measures) and the context (preventing algorithmic discrimination in sensitive automated systems). The intent is clear, and the question is independent, as it does not rely on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of the White House Office of Science and Technology Policy (OSTP) in relation to the AI Bill of Rights. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the role of the White House Office of Science and Technology Policy in relation to the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What multifaceted dangers arise from human interactions with generative AI, considering both immediate emotional impacts and broader societal implications?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of the NSF Program on Fairness in Artificial Intelligence in collaboration with Amazon?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does threat modeling play in identifying and mitigating risks associated with GAI systems, particularly in relation to organizational policies on transparency and risk management?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Artificial Intelligence and Democratic Values', 'Non-discriminatory technology', 'Explainable AI', 'Community participation', 'Social welfare systems'] +[ragas.testset.evolutions.INFO] seed question generated: "What role does bias testing play in preventing algorithmic discrimination in automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to address risks associated with intellectual property infringement in organizational GAI (General Artificial Intelligence) systems. It is specific in its focus on intellectual property infringement and organizational GAI systems, and it clearly seeks actionable recommendations. The intent is clear, and the question is self-contained, not relying on external references or additional context. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key human oversight measures prevent algorithmic discrimination in sensitive automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for considerations regarding data privacy when deploying a GAI (General Artificial Intelligence) system. It is clear in specifying the topic of interest (data privacy) and the context (deploying a GAI system). The intent is straightforward, seeking information on the factors to consider for data privacy in this specific scenario. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What considerations should be taken into account regarding data privacy when deploying a GAI system?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about automated systems relevant to the AI Bill of Rights, focusing on civil rights and access to services, thus sharing the same depth, breadth, and requirements.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What role does technology play in implementing or improving social welfare systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What significance do organizational policies hold in mitigating AI-related risks posed by third-party entities, particularly in the context of oversight and impact documentation throughout the AI lifecycle?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions evaluating potential biases and stereotypes that could emerge from AI-generated content using appropriate methodologies, including computational testing methods as well as evaluating structured feedback input. Additionally, it suggests recording and integrating structured feedback about content provenance from operators, users, and potentially impacted communities through methods such as user research studies, focus groups, or community forums.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to demonstrate the safety and effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks for an explanation of 'confabulation' specifically in the context of generative AI systems, while the second question is looking for a term that refers to misleading false content generated by GAI. The depth and breadth of the inquiries differ as the first is more specific and the second is more general.", 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What functions does the OSTP serve in shaping the AI Bill of Rights, particularly in relation to public input and the protection of civil liberties?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Training and feedback improve understanding of digital content transparency in GAI systems by providing input for training materials about the capabilities and limitations of GAI systems related to digital content transparency. This includes actively seeking feedback on generated content quality and potential biases, as well as assessing the general awareness among end users and impacted communities about the availability of feedback channels.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the issue of preventing algorithmic discrimination or bias in sensitive systems, requiring similar measures and safeguards. They share the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of clarity and accessibility of decision-making explanations in automated systems, specifically in the context of risk assessment and user understanding. It is clear in its intent, seeking to understand the importance of these factors. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking about the dangers of human interactions with generative AI, specifically focusing on both immediate emotional impacts and broader societal implications. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The question is specific enough to allow for a detailed and relevant response, covering both emotional and societal dimensions.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of the NSF Program on Fairness in Artificial Intelligence in collaboration with Amazon. It is clear in specifying the program of interest and seeks information on its importance or impact. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The suggested action to address risks associated with intellectual property infringement in organizational GAI systems is to compile statistics on actual policy violations, take-down requests, and intellectual property infringement, and analyze transparency reports across demographic and language groups.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Civil rights and liberties', 'Public consultation', 'Algorithmic harms'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What multifaceted factors regarding data privacy and content integrity must be evaluated when implementing a GAI system, particularly in relation to user feedback and the system's operational transparency?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How important are clear decision-making explanations in automated systems for risk assessment and user understanding?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of threat modeling in identifying and mitigating risks associated with GAI (General Artificial Intelligence) systems, specifically in the context of organizational policies on transparency and risk management. It is clear in specifying the topic of interest (threat modeling, GAI systems) and the specific aspects of organizational policies it relates to (transparency, risk management). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does the 2023 Executive Order on Safe AI play in NIST's efforts to develop trustworthy artificial intelligence?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What risks come from human use of generative AI, both emotionally and socially?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of technology in implementing or improving social welfare systems. It is clear in its intent, seeking information on the impact or contribution of technology in this specific context. The question is independent and does not rely on external references or prior knowledge not included within the question itself. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The term that refers to GAI\'s misleading false content is "confabulation."', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.INFO] seed question generated: "What role did public input play in addressing algorithmic harms in the Blueprint for an AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does threat modeling help with GAI risk and org policies on transparency?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI risks management', 'Risk response options', 'Model release approaches', 'Information security', 'Harmful bias mitigation'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of organizational policies in mitigating AI-related risks posed by third-party entities, with a focus on oversight and impact documentation throughout the AI lifecycle. It is specific in its scope (organizational policies, AI-related risks, third-party entities) and clear in its intent (understanding the role of policies in risk mitigation). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of bias testing in preventing algorithmic discrimination in automated systems. It is clear in specifying the topic of interest (bias testing) and the context (preventing algorithmic discrimination in automated systems). The intent is straightforward, seeking an explanation of the importance and impact of bias testing. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does bias testing play in preventing algorithmic discrimination in automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to demonstrate the safety and effectiveness of automated systems. It is clear in its intent, seeking specific actions or protocols that can be implemented to ensure these systems are safe and effective. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What measures should be taken to demonstrate the safety and effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data protection', 'Privacy by design', 'User consent', 'Sensitive domains', 'Surveillance technologies'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address the importance of decision-making transparency in automated systems, the second question specifically focuses on risk assessment and user understanding, which adds additional constraints and depth.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What techniques can be employed to mitigate harmful bias in AI-generated content?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do org policies help manage AI risks from third parties during the AI lifecycle?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the functions of the OSTP (Office of Science and Technology Policy) in shaping the AI Bill of Rights, with a focus on public input and the protection of civil liberties. It is clear in specifying the topic (OSTP's role in the AI Bill of Rights) and the aspects of interest (public input and civil liberties). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Supplier risk assessment framework', 'Third-party entities', 'Content provenance standards', 'GAI technology and service provider lists', 'Intellectual property and data privacy'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question broadly asks about potential risks associated with human-AI interactions, while the second question specifically focuses on emotional and social risks from the use of generative AI. This difference in scope and specificity leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What role does the OSTP play in the AI Bill of Rights regarding public input and civil liberties?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the general purpose of engaging in threat modeling for GAI systems, while the second question specifically asks about how threat modeling helps with GAI risk and organizational policies on transparency. These questions have different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of content provenance standards in evaluating third-party entities' performance?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does user consent play in the collection and use of personal data?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Generative AI Public Working Group', 'GAI risk management', 'Governance', 'Content Provenance', 'AI lifecycle risks'] +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to demonstrate the safety and effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of the 2023 Executive Order on Safe AI in NIST's efforts to develop trustworthy artificial intelligence. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information on the impact of a specific executive order on NIST's AI development efforts.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does the 2023 Executive Order on Safe AI play in NIST's efforts to develop trustworthy artificial intelligence?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the multifaceted factors related to data privacy and content integrity that need to be evaluated when implementing a GAI (Generative AI) system, with a particular focus on user feedback and the system's operational transparency. It is clear in specifying the areas of interest (data privacy, content integrity, user feedback, operational transparency) and seeks detailed information on these aspects. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive measures, including bias testing, are essential for ensuring that automated systems are designed and deployed equitably to prevent algorithmic discrimination across various demographics?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the role of organizational policies in managing AI risks associated with third-party entities, covering similar constraints and requirements, and having the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of public input in addressing algorithmic harms within the context of the Blueprint for an AI Bill of Rights. It is clear in specifying the topic of interest (public input, algorithmic harms, Blueprint for an AI Bill of Rights) and seeks detailed information on the impact of public input. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role did public input play in addressing algorithmic harms in the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the steps that ensure independent evaluation and reporting for automated system safety. It is clear in its intent, seeking specific steps or procedures related to the evaluation and reporting of safety in automated systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What factors on data privacy and content integrity should be considered for a GAI system, especially regarding user feedback and transparency?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated system', 'Plain language documentation', 'System functioning', 'Outcome explanations', 'User notification'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context emphasizes the importance of clear decision-making explanations in automated systems, particularly for risk assessment and user understanding. It states that explanations should accurately reflect the factors influencing decisions and be tailored to the purpose and audience, ensuring they are understandable and accessible. This is crucial in settings with high consequences, where transparency and clarity are necessary for accountability and user trust.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What steps ensure independent evaluation and reporting for automated system safety?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the primary considerations addressed in GAI risk management as outlined by NIST's Generative AI Public Working Group?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "While both questions inquire about the OSTP's role in the AI Bill of Rights, the second question specifically focuses on public input and civil liberties, adding additional constraints and depth.", 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for techniques to mitigate harmful bias in AI-generated content. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information on methods to address bias in AI content generation.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What techniques can be employed to mitigate harmful bias in AI-generated content?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The risks that come from human use of generative AI (GAI) include immediate and prolonged emotional harm, potential risks to physical safety due to the distribution of harmful deepfake images, and the long-term effect of disinformation on societal trust in public institutions.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What should users be notified about regarding automated systems that impact them?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Algorithmic discrimination', 'Automated systems', 'Community impacts', 'Equity and access'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What significance does the 2023 Executive Order on Safe AI hold in shaping NIST's framework for ensuring the reliability and security of artificial intelligence systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of user consent in the collection and use of personal data. It is clear in its intent, seeking information on the importance and implications of user consent in this context. The question is independent and does not rely on external references or additional context to be understood. It is specific and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to demonstrate the safety and effectiveness of automated systems. It is clear in its intent, seeking specific actions or protocols that can be implemented to ensure these systems are safe and effective. The question is independent and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the significance of content provenance standards in evaluating third-party entities' performance. It is clear in specifying the topic of interest (content provenance standards) and the context (evaluating third-party entities' performance). The intent is also clear, seeking an explanation of the importance or impact of these standards. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the significance of content provenance standards in evaluating third-party entities' performance?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The second question introduces additional factors such as content integrity, user feedback, and transparency, which are not explicitly mentioned in the first question. This leads to a broader and deeper inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Public consultation', 'Testing and deployment', 'Risk identification and mitigation', 'Safety and effectiveness'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of evaluating the harms of automated systems at the community level according to the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for measures to demonstrate safety and effectiveness of automated systems, while the second question focuses on independent evaluation and reporting for system safety. These questions have different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Safe and effective systems', 'Automated systems', 'Pre-deployment testing', 'Risk identification and mitigation', 'Independent evaluation'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking for proactive measures, including bias testing, that are essential for ensuring equitable design and deployment of automated systems to prevent algorithmic discrimination across various demographics. It is specific and does not rely on external references or context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Safe and effective systems', 'Automated systems', 'Pre-deployment testing', 'Risk identification and mitigation', 'Independent evaluation'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the expectations for ensuring that automated systems are safe and effective?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What steps, like bias testing, ensure fair automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between public input and the shaping of the AI Bill of Rights' principles. It is clear in its intent, seeking to understand the influence of public input on the development of these principles. The question is specific and does not rely on external references or prior knowledge beyond a general understanding of the AI Bill of Rights. Therefore, it meets the criteria for independence and clear intent.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of pre-deployment testing in the development of automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of pre-deployment testing in the development of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what users should be notified about regarding automated systems that impact them. It is clear in its intent, seeking information on the types of notifications or disclosures that should be provided to users. The question is independent and does not rely on external references or unspecified contexts. It is specific enough to be understood and answered based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What should users be notified about regarding automated systems that impact them?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The Office of Science and Technology Policy (OSTP) plays a crucial role in the AI Bill of Rights by leading a year-long process to seek and distill input from various stakeholders, including impacted communities, industry stakeholders, technology developers, and policymakers. This engagement informs the development of policies and practices that protect civil rights and promote democratic values in the governance of automated systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between public input and shaping the AI Bill of Rights' principles?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'User consent plays a crucial role in the collection and use of personal data, as it should only be used to justify data collection in cases where it can be appropriately and meaningfully given. Consent requests should be brief, understandable in plain language, and provide individuals with agency over data collection and its specific context of use.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for methods to reduce harmful bias in AI outputs while ensuring content integrity. It is clear in its intent, specifying the dual goals of bias reduction and content integrity. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role do content provenance standards play in assessing the performance and risks associated with third-party GAI systems, particularly in relation to information integrity and intellectual property?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for the primary considerations in GAI risk management as outlined by NIST's Generative AI Public Working Group. It is clear in specifying the topic (GAI risk management) and the source (NIST's Generative AI Public Working Group), making the intent clear. However, it assumes familiarity with the specific document or findings of the NIST's Generative AI Public Working Group without providing any context or summary of their work. To improve clarity and answerability, the question could include a brief description or key points from the NIST's Generative AI Public Working Group's findings, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the primary considerations addressed in GAI risk management as outlined by NIST's Generative AI Public Working Group?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What methods can reduce harmful bias in AI outputs while ensuring content integrity?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the significance of the 2023 Executive Order on Safe AI in shaping NIST's framework for ensuring the reliability and security of artificial intelligence systems. It is clear in specifying the topic of interest (2023 Executive Order on Safe AI) and the specific aspect of NIST's framework it impacts (reliability and security of AI systems). The intent is clear, seeking an explanation of the influence or role of the Executive Order. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To demonstrate the safety and effectiveness of automated systems, the following measures should be taken: 1. Independent evaluation should be allowed, enabling access for independent evaluators such as researchers and auditors to the system and associated data. 2. Reporting should be regularly updated, including an overview of the system, data used for training, risk assessments, performance testing results, and ongoing monitoring procedures. Reports should be provided in plain language and machine-readable formats.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated system', 'Plain language documentation', 'System functioning', 'Outcome explanations', 'User notification'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of evaluating the harms of automated systems at the community level according to the Blueprint for an AI Bill of Rights. It is clear in specifying the topic of interest (harms of automated systems, community level, Blueprint for an AI Bill of Rights) and seeks detailed information on the significance of this evaluation. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the significance of evaluating the harms of automated systems at the community level according to the Blueprint for an AI Bill of Rights?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address the prevention of algorithmic discrimination in automated systems, the first question specifically focuses on the role of bias testing, whereas the second question is broader and asks about various steps, including but not limited to bias testing. This difference in specificity leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Factors on data privacy and content integrity for a GAI system include documenting the extent to which human domain knowledge is employed to improve GAI system performance, reviewing and verifying sources and citations in GAI system outputs, tracking instances of anthropomorphization in GAI system interfaces, verifying GAI system training data and TEVV data provenance, and regularly reviewing security and safety guardrails. Additionally, structured feedback about content provenance should be recorded and integrated from operators, users, and impacted communities, and there should be an emphasis on digital content transparency regarding the societal impacts of AI and the role of diverse and inclusive content generation.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of pre-deployment testing in the development of automated systems. It is clear and specific, seeking information about the role and importance of this testing phase. The question is self-contained and does not rely on external references or additional context to be understood or answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the expectations for ensuring that automated systems are safe and effective. It is clear in its intent, seeking information on safety and effectiveness standards or guidelines for automated systems. The question is independent and does not rely on external references or unspecified contexts. However, it could be improved by specifying the type of automated systems (e.g., industrial robots, autonomous vehicles) or the context (e.g., regulatory, operational) to narrow down the scope and provide a more targeted answer.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Independent evaluation for system safety is ensured by designing automated systems to allow for independent evaluation through mechanisms such as application programming interfaces. Independent evaluators, including researchers, journalists, ethics review boards, inspectors general, and third-party auditors, should have access to the system and samples of associated data, consistent with privacy, security, law, or regulation. Additionally, entities responsible for automated systems should provide regularly-updated reports that include an overview of the system, data used, risk assessments, performance testing results, and independent evaluation outcomes, all presented in plain language and a machine-readable format.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of pre-deployment testing in the development of automated systems. It is clear and specific, seeking information about the role and importance of this testing phase. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of pre-deployment testing in the development of automated systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about the role of public input in addressing algorithmic harms within the Blueprint for an AI Bill of Rights, while the second question is broader, asking generally about the influence of public input on the AI Bill of Rights. The depth and breadth of the inquiries differ.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What should designers and developers provide to ensure clear understanding of system functioning in automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What role does human-AI integration play in enhancing customer service?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 1, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Race correction in clinical algorithms', 'Algorithmic impact assessment', 'Racial equity', 'Algorithmic bias detection', 'Property appraisal and valuation equity', 'Executive Order on Advancing Racial Equity', 'Supreme Court Decision Roe v. Wade', 'Bill of Rights for an Automated Society', 'Sepsis prediction model', 'Apple AirTags stalking concerns'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key information that users must receive about automated systems affecting their outcomes. It is clear in its intent, seeking specific details about the necessary information users should be provided with. The question is independent and does not rely on external references or additional context to be understood. It is specific and straightforward, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of content provenance standards in evaluating the performance and risks of third-party GAI (Generative AI) systems, with a focus on information integrity and intellectual property. It is specific and clear in its intent, seeking to understand the impact of these standards on particular aspects of GAI systems. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Identity theft', 'Facial recognition system', 'Surveillance software', 'Employee discussions about union activity'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Transparency artifacts', 'Explainable AI (XAI)', 'Pre-trained models', 'Harmful bias', 'Content filters'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does community-level assessment play in mitigating algorithmic discrimination as outlined in the AI Bill of Rights?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the primary considerations in GAI risk management as outlined by NIST's Generative AI Public Working Group. It is specific in its focus on GAI risk management and the NIST Generative AI Public Working Group, making the intent clear. However, it assumes familiarity with the specific document or findings of the NIST Generative AI Public Working Group without providing any context or summary of these considerations. To improve clarity and answerability, the question could include a brief description or key points from the NIST document, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of algorithmic impact assessment as discussed in the context?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key info must users receive about automated systems affecting their outcomes?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do content provenance standards impact the performance and risks of third-party GAI systems regarding info integrity and IP?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of how data privacy principles aim to protect against identity theft?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does extensive testing play in ensuring the safety and effectiveness of automated systems prior to their deployment, particularly in relation to community consultation and risk mitigation?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of pre-deployment testing in the development of automated systems is to identify risks and potential impacts of the system, ensuring that it is safe and effective based on its intended use, and to mitigate unsafe outcomes, including those beyond the intended use.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of reviewing transparency artifacts in the context of third-party models?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does the 2023 Exec Order on Safe AI impact NIST's AI reliability and security framework?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what designers and developers should provide to ensure a clear understanding of system functioning in automated systems. It is specific and independent, as it does not rely on external references or prior knowledge. The intent is clear, seeking information on best practices or necessary elements for clarity in automated systems. No improvements are necessary.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What should designers and developers provide to ensure clear understanding of system functioning in automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Public input influences the AI Bill of Rights by providing insights and feedback from impacted communities, industry stakeholders, technology developers, and experts. The White House Office of Science and Technology Policy conducted a year-long process to gather this input through various means, including panel discussions and public listening sessions, which helped shape the principles and practices outlined in the Blueprint for an AI Bill of Rights.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of human-AI integration in enhancing customer service. It is clear in its intent, seeking information on the impact or benefits of combining human and AI efforts in the context of customer service. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What role does human-AI integration play in enhancing customer service?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to products being changed or not launched to prevent harm. Federal government agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have developed best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek methods to reduce bias in AI-generated content, requiring similar constraints and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 3 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Pre-deployment testing', 'GAI system validity', 'Measurement gaps', 'Structured public feedback', 'AI Red-teaming'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions ask for the key information or notifications that users should receive about automated systems impacting them, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The expectations for ensuring that automated systems are safe and effective include: 1) Safeguards to protect the public from harm in a proactive and ongoing manner; 2) Avoiding the use of data that is inappropriate or irrelevant to the task at hand; 3) Demonstrating the safety and effectiveness of the system. Additionally, there should be consultation with the public during the design and implementation phases, extensive testing before deployment, and identification and mitigation of potential risks.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Training data use', 'Intellectual property', 'Data privacy risks', 'Content provenance', 'Generative AI (GAI) risks'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of structured public feedback in evaluating GAI systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question focuses on the significance of content provenance standards in evaluating third-party entities' performance, while the second question addresses the impact of these standards on the performance and risks of third-party GAI systems specifically regarding information integrity and intellectual property. The second question has a broader scope and different depth of inquiry.", 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI lifecycle', 'AI technology risks', 'Organizational practices for AI', 'Impact documentation process', 'Content provenance methodologies'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "Both questions inquire about the influence of the 2023 Executive Order on Safe AI on NIST's initiatives related to AI trustworthiness, reliability, and security, sharing the same depth, breadth, and requirements for the answer.", 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 2 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for examples of how data privacy principles protect against identity theft. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on the application of data privacy principles in the context of identity theft protection.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI technology mapping', 'Legal risks', 'Data privacy', 'Intellectual property', 'Harmful biases'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of community-level assessment in mitigating algorithmic discrimination as outlined in the AI Bill of Rights. It is specific in its focus on community-level assessment and its relation to algorithmic discrimination, and it references the AI Bill of Rights, which provides a clear context. The intent is clear, seeking an explanation of the role and impact of community-level assessment within the specified framework. The question is self-contained and does not rely on external references beyond the AI Bill of Rights, which is sufficiently well-known to provide necessary context.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account regarding intellectual property when conducting diligence on training data use?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What essential elements must be included in the documentation and notifications provided by designers and developers to ensure users comprehend the functioning and decision-making processes of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of reviewing transparency artifacts in the context of third-party models. It is clear in specifying the topic of interest (transparency artifacts, third-party models) and seeks information on the purpose of this review. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of reviewing transparency artifacts in the context of third-party models?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of extensive testing in ensuring the safety and effectiveness of automated systems before deployment, with a particular focus on community consultation and risk mitigation. It is clear in its intent, specifying the aspects of safety, effectiveness, community consultation, and risk mitigation. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the purpose of algorithmic impact assessment but refers to 'the context' without providing or describing this context within the query. This makes the question unclear for those who do not have access to the unspecified context. For the question to be clear and answerable, it needs to either include the relevant context directly within the question or be framed in a way that does not require external information. Detailing the specific aspects or scenarios of algorithmic impact assessment being referred to could also help clarify the query.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What is the purpose of algorithmic impact assessment as discussed in the context?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key oversight functions involved in the GAI lifecycle?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Predictive policing system', 'Gun violence risk assessment', 'Watch list transparency', 'System flaws in benefit allocation', 'Lack of explanation for decisions'] +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to address data privacy risks in AI-generated content?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does community assessment help reduce algorithmic bias in the AI Bill of Rights?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does testing ensure the safety of automated systems before deployment, especially regarding community input and risk?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What issues arise from system flaws in benefit allocation?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['CBRN Information', 'Confabulation', 'Dangerous content', 'Data Privacy', 'Harmful Bias'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the benefits of combining AI tools with human agents in customer service. It is clear in its intent, seeking information on the advantages of this combination. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human subject protection', 'Content provenance', 'Data privacy', 'AI system performance', 'Anonymization techniques'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'User consent', 'Automated systems', 'Surveillance technologies', 'Sensitive domains'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What benefits arise from combining AI tools with human agents in customer service?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of eased access to dangerous content in relation to violent or hateful material?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Examples of how data privacy principles aim to protect against identity theft include: a data broker harvesting large amounts of personal data and suffering a breach that exposes individuals to potential identity theft, and an insurer collecting data from a person's social media presence to determine life insurance rates, which could lead to misuse of personal information.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What requirements must evaluations involving human subjects meet to ensure human subject protection?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations related to intellectual property when conducting diligence on training data use. It is clear in its intent, seeking specific information on intellectual property considerations in the context of training data diligence. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role do transparency artifacts play in ensuring the integrity and security of third-party AI models during their deployment and monitoring?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the significance of evaluating harms at the community level according to the Blueprint for an AI Bill of Rights, while the second question is more specific to how community assessment helps reduce algorithmic bias. They have different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to ensure data privacy and protect individuals from abusive data practices?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Fairness in Artificial Intelligence', 'Automatic signature verification', 'Ballot curing', 'Digital divide in unemployment benefits', 'Racial equity and underserved communities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for the essential elements that should be included in documentation and notifications to help users understand the functioning and decision-making processes of automated systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking detailed information on documentation and notification practices for user comprehension in the context of automated systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to address data privacy risks in AI-generated content. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or unspecified contexts, and it clearly seeks information on suggested measures for a particular issue.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What measures are suggested to address data privacy risks in AI-generated content?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the general purpose of pre-deployment testing in automated systems, while the second question specifically addresses how testing ensures safety, with an emphasis on community input and risk. These questions have different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of structured public feedback in evaluating GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (structured public feedback) and the context (evaluating GAI systems). The intent is straightforward, seeking an explanation of the role or benefits of structured public feedback in this specific context. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of structured public feedback in evaluating GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the purpose of algorithmic impact assessment but refers to 'the context' without providing or describing this context within the query. This makes the question unclear for those who do not have access to the unspecified context. For the question to be clear and answerable, it needs to either include the relevant context directly within the question or be framed in a way that does not require external information. Detailing the specific aspects or scenarios of algorithmic impact assessment being referred to could also help clarify the query.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What threat does automatic signature verification software pose to U.S. voters?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the benefits and roles of integrating AI with human agents in customer service, requiring similar depth and breadth of explanation.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key oversight functions involved in the GAI (General Artificial Intelligence) lifecycle. It is clear in its intent, seeking specific information about oversight functions within a defined context (GAI lifecycle). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What key info should designers and devs include for user understanding of automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about issues arising from system flaws in benefit allocation. It is clear in its intent, seeking information on the problems caused by flaws in the benefit allocation system. The question is independent and does not rely on external references or additional context to be understood. It is specific enough to be answerable by someone with knowledge in the domain of benefit allocation systems.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What issues arise from system flaws in benefit allocation?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 1, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Bill of Rights', 'Automated systems', 'Algorithmic discrimination protections', 'Equitable design', 'Independent evaluation and reporting'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of eased access to dangerous content, specifically violent or hateful material. It is clear in its intent to understand the consequences of such access. The question is self-contained and does not rely on external references or unspecified contexts. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are the implications of eased access to dangerous content in relation to violent or hateful material?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Considerations regarding intellectual property when conducting diligence on training data use include assessing risks related to intellectual property and privacy, and examining whether the use of proprietary or sensitive training data is consistent with applicable laws.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to ensure that automated systems are safe and effective?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the requirements for evaluations involving human subjects to ensure their protection. It is clear in its intent, seeking specific information about the criteria or standards that must be met for human subject protection. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What requirements must evaluations involving human subjects meet to ensure human subject protection?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What combined strategies are recommended for mitigating privacy and intellectual property risks associated with AI-generated content?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI risks management', 'Risk response options', 'Model release approaches', 'Information security', 'Harmful bias mitigation'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Prompt injection', 'Indirect prompt injection attacks', 'Data poisoning', 'Intellectual property risks', 'Obscene and degrading content'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of transparency artifacts in ensuring the integrity and security of third-party AI models during deployment and monitoring. It is clear in specifying the topic of interest (transparency artifacts, integrity, security, third-party AI models) and seeks detailed information on their role. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions ask for the essential information that designers and developers should provide to ensure user understanding of automated systems, sharing the same depth, breadth, and requirements for the answer.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The key oversight functions involved in the GAI lifecycle include senior leadership, legal, compliance, and internal evaluation.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when determining model release approaches?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the intellectual property risks associated with GAI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure data privacy and protect individuals from abusive data practices. It is clear in its intent, seeking specific actions or strategies to address data privacy and protection issues. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What measures should be taken to ensure data privacy and protect individuals from abusive data practices?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Safe and effective systems', 'Automated systems', 'Pre-deployment testing', 'Risk identification and mitigation', 'Independent evaluation'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "Whatā€™s the role of transparency artifacts in securing third-party AI models?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['TEVV metrics', 'Measurement error models', 'GAI system risks', 'Feedback processes', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the threat posed by automatic signature verification software to U.S. voters. It is specific in its focus on a particular technology (automatic signature verification software) and its potential impact on a defined group (U.S. voters). The intent is clear, seeking information on the negative implications or risks associated with this technology. The question is self-contained and does not rely on external references or prior knowledge beyond general understanding of the terms used.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Testing ensures the safety of automated systems before deployment by requiring extensive testing that follows domain-specific best practices, taking into account the specific technology used and the roles of human operators. This testing should mirror real-world conditions and include both automated and human-led testing. Additionally, community input is gathered through consultation during the design and implementation phases, allowing for the identification and mitigation of potential risks that may impact rights and access, particularly for affected communities. Concerns raised during this consultation should be documented and considered in the development process, ensuring that the system is safe and effective based on community feedback.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What potential risks arise from the increased accessibility of violent or hateful content, particularly in relation to the facilitation of CBRN weapon knowledge and the spread of misinformation?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the key components of risk identification and mitigation in the development of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Organizational responsibilities', 'Incident monitoring', 'Document retention policy', 'AI system inventory'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What criteria must be fulfilled in evaluations involving human subjects to ensure their protection while also considering the implications of AI system performance and content transparency?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the problems that arise from hidden criteria changes in benefit allocation systems. It is clear in its intent, seeking information on the negative consequences of such hidden changes. The question is specific and does not rely on external references or additional context to be understood. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the roles of structured public feedback and participatory methods in the evaluation of GAI (General Artificial Intelligence). It is clear in specifying the two elements of interest (structured public feedback and participatory methods) and the context (GAI evaluation). The intent is to understand the impact or contribution of these elements in the evaluation process. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI Risk Management Framework', 'Generative AI', 'Cross-sectoral profile', 'Risk management priorities', 'Large language models'] +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure the safety and effectiveness of automated systems. It is clear in its intent, seeking specific actions or strategies. The question is independent and does not rely on external references or context. However, it could be improved by specifying the type of automated systems (e.g., industrial robots, AI software) to provide more targeted and relevant answers.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What measures should be taken to ensure that automated systems are safe and effective?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for combined strategies to mitigate privacy and intellectual property risks associated with AI-generated content. It is clear in specifying the type of risks (privacy and intellectual property) and the context (AI-generated content), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What problems stem from hidden criteria changes in benefit allocation systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the role of transparency artifacts in the context of third-party models, focusing on their purpose and role in security, thus sharing the same depth, breadth, and requirements.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What roles do structured public feedback and participatory methods play in GAI evaluation?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of maintaining a document retention policy in relation to GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Risk Management Framework for Generative AI?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for determining model release approaches. It is clear in its intent, seeking information on factors to consider when deciding how to release a model. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI system incidents', 'Organizational risk management', 'Incident response processes', 'Third-party GAI resources', 'Data privacy and localization compliance'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies help with privacy and IP risks in AI content?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the intellectual property risks associated with GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (intellectual property risks) and the context (GAI systems). The question is self-contained and does not rely on external references or additional context to be understood. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the intellectual property risks associated with GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What organizational risk tolerances should be applied to the utilization of third-party GAI resources?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automatic signature verification software threatens to disenfranchise U.S. voters.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for key actions to ensure ethical data collection and prioritization of privacy. It is clear in its intent, seeking specific actions or practices related to data ethics and privacy. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What proactive strategies should be implemented during the design and deployment of automated systems to ensure they are both safe and free from algorithmic discrimination, particularly for underserved communities?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the key components of risk identification and mitigation in the development of automated systems. It is clear in specifying the topic of interest (risk identification and mitigation) and the context (development of automated systems). The intent is straightforward, seeking a list or description of key components, making it understandable and answerable based on the details provided. No additional context or external references are needed to comprehend or respond to the question.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question addresses issues from system flaws in benefit allocation, while the second question focuses on issues from hidden criteria changes in benefit allocation. These are different sources of issues, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear in its intent, asking about the potential risks associated with the increased accessibility of violent or hateful content, specifically in relation to the facilitation of CBRN (Chemical, Biological, Radiological, and Nuclear) weapon knowledge and the spread of misinformation. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The question is specific and seeks a detailed analysis of the risks involved, which can be provided with sufficient domain knowledge.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What key actions ensure your data is collected ethically and your privacy is prioritized?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address risks associated with AI-generated content, the first question specifically focuses on data privacy risks, whereas the second question encompasses both privacy and intellectual property risks, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What role does automated customer service play in enhancing customer care?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the criteria necessary for evaluations involving human subjects to ensure their protection, while also considering the implications of AI system performance and content transparency. It is clear in its intent, specifying the need for criteria related to human subject protection, AI performance, and content transparency. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of establishing feedback processes for end users and impacted communities in the context of AI system evaluation metrics. It is clear in specifying the topic of interest (feedback processes, end users, impacted communities, AI system evaluation metrics) and seeks an explanation of the rationale behind this practice. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What risks come from easier access to violent content, especially regarding CBRN knowledge and misinformation?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of maintaining a document retention policy in relation to GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (document retention policy) and its relation to GAI systems, making the intent clear and specific. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of maintaining a document retention policy in relation to GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI trustworthy characteristics', 'Human-AI Configuration', 'Information Integrity', 'Data Privacy', 'Confabulation'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What criteria ensure human subject protection in AI evaluations?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'When determining model release approaches, considerations should include documenting trade-offs, decision processes, and relevant measurement and feedback results for risks that do not surpass organizational risk tolerance. Additionally, different approaches for model release should be considered, such as leveraging a staged release approach and evaluating release approaches in the context of the model and its projected use cases.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions inquire about the role of public feedback in evaluating GAI systems, sharing the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Risk Management Framework for Generative AI. It is clear in specifying the topic of interest (AI Risk Management Framework for Generative AI) and seeks specific information about its purpose. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures should be taken to address confabulation in GAI system outputs?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the organizational risk tolerances that should be applied to the utilization of third-party GAI (General Artificial Intelligence) resources. It is clear in its intent, seeking specific information on risk tolerances related to a particular context (third-party GAI resources). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks for measures to ensure data privacy and protect against abusive data practices, while the second question is broader, asking about ethical data collection and privacy in general. They differ in depth and breadth.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on general evaluations involving human subjects, while the second specifically addresses AI evaluations. This difference in context leads to different constraints and requirements.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'While both questions address the risks of easier access to harmful content, the first question focuses on violent or hateful material in general, whereas the second question specifically mentions CBRN knowledge and misinformation, leading to different depths and specificities.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 1.75} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Security measures assessment', 'Transparency and accountability risks', 'Intellectual property infringement', 'Digital content transparency solutions', 'Human-AI configuration'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of automated customer service in enhancing customer care. It is clear in its intent, seeking information on the impact or contribution of automated customer service to customer care. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What role does automated customer service play in enhancing customer care?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI risks management', 'Risk response options', 'Model release approaches', 'Information security', 'Harmful bias mitigation'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for proactive strategies to ensure the safety and fairness of automated systems, with a particular focus on avoiding algorithmic discrimination against underserved communities. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking actionable strategies for design and deployment phases.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does a document retention policy play in ensuring the integrity and oversight of GAI systems throughout their lifecycle?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the risks to intellectual property (IP) arising from the use of copyrighted works and data poisoning by generative AI (GAI). It is clear in specifying the topic of interest (risks to IP) and the specific concerns (use of copyrighted works and data poisoning). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Issues arising from hidden criteria changes in benefit allocation include individuals being denied benefits due to data entry errors and other system flaws, which were only revealed when an explanation of the system was demanded. The lack of transparency made it harder for errors to be corrected in a timely manner.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Strategies to help with privacy and intellectual property (IP) risks in AI content include conducting periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, documenting training data curation policies, establishing policies for collection and retention of data, and conducting appropriate diligence on training data use to assess intellectual property and privacy risks.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The key components of risk identification and mitigation in the development of automated systems include pre-deployment testing, risk identification and mitigation processes, ongoing monitoring, and adherence to domain-specific standards. These components aim to ensure that systems are safe and effective based on their intended use and to mitigate unsafe outcomes, including those beyond the intended use.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What strategies ensure safe, fair automated systems for underserved communities?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the risks associated with transparency and accountability as identified in the MAP function?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the different risk response options identified for high-priority AI risks?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What risks to IP arise from GAI's use of copyrighted works and data poisoning?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics is to allow these groups to report problems and appeal system outcomes, ensuring that the impact of AI-generated content on different social, economic, and cultural groups is assessed and understood.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to address confabulation in GAI (General Artificial Intelligence) system outputs. It is clear in specifying the issue (confabulation) and the context (GAI system outputs), making the intent straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the AI Risk Management Framework (AI RMF) for Generative AI is to improve the ability of organizations to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It assists organizations in deciding how to best manage AI risks in alignment with their goals, legal/regulatory requirements, and best practices.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI model explanation', 'GAI risks', 'Privacy risk assessment', 'Data provenance', 'Human-AI configuration'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What impact do integrated automated customer service systems have on addressing complex customer needs while ensuring human oversight?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 1, 'score': 1.25} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Equitable design', 'Automated systems', 'Legal protections', 'Proactive equity assessments'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on general measures for ensuring the safety and effectiveness of automated systems, while the second question specifically addresses strategies for ensuring that automated systems are safe and fair for underserved communities. The scope and requirements differ.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To ensure ethical data collection and privacy, designers, developers, and deployers of automated systems should seek user permission and respect their decisions regarding data collection, use, access, transfer, and deletion. They should implement built-in protections, ensure data collection conforms to reasonable expectations, and only collect data that is strictly necessary. Consent should be meaningful and understandable, and enhanced protections should be in place for sensitive domains. Additionally, there should be oversight of surveillance technologies to protect privacy and civil liberties.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What actions are suggested for conducting a privacy risk assessment of the AI system?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question broadly asks about intellectual property risks associated with GAI systems, while the second question specifically focuses on IP risks from using copyrighted works and data poisoning. This difference in specificity leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Human alternatives', 'Healthcare navigators', 'Automated customer service', 'Ballot curing laws', 'Human-AI integration'] +[ragas.testset.evolutions.INFO] seed question generated: "What role do legal protections play in addressing algorithmic discrimination?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data privacy', 'Identity theft', 'Facial recognition system', 'Surveillance software', 'Employee discussions about union activity'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of a document retention policy in ensuring the integrity and oversight of GAI (General Artificial Intelligence) systems throughout their lifecycle. It is clear in specifying the topic of interest (document retention policy) and the context (integrity and oversight of GAI systems). The intent is also clear, seeking an explanation of the policy's role. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What role does human-AI integration play in enhancing customer service?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the different risk response options identified for high-priority AI risks. It is clear in specifying the topic of interest (risk response options) and the context (high-priority AI risks). The intent is straightforward, seeking a list or description of these options. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the different risk response options identified for high-priority AI risks?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Organizational risk tolerances that should be applied to the utilization of third-party GAI resources include applying risk tolerances to the utilization of third-party datasets and other GAI resources, fine-tuned third-party models, and existing third-party models adapted to a new domain. Additionally, it involves reassessing risk measurements after fine-tuning third-party GAI models.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Eased access to violent content can lead to the production of and access to violent, inciting, radicalizing, or threatening content, as well as recommendations to carry out self-harm or conduct illegal activities. This includes difficulty controlling public exposure to hateful and disparaging or stereotyping content. Additionally, the lowered barrier to generate and support the exchange of content may not distinguish fact from opinion or acknowledge uncertainties, which could be leveraged for large-scale dis- and mis-information campaigns, potentially impacting the operational likelihood of attacks involving CBRN knowledge.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Continuous monitoring of GAI system impacts', 'Harmful bias and homogenization', 'Structured human feedback exercises', 'GAI red-teaming', 'Information integrity'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does a document retention policy support GAI system integrity?" +[ragas.testset.evolutions.INFO] seed question generated: "What are some examples of how data privacy principles aim to protect against identity theft?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Incident response plans', 'Third-party GAI technologies', 'Data privacy', 'Continuous monitoring', 'Vendor contracts'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the impact of integrated automated customer service systems on addressing complex customer needs while ensuring human oversight. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on both the effectiveness of these systems in handling complex needs and the role of human oversight.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of structured human feedback exercises in the context of GAI risk measurement and management?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI system context', 'Harmful bias and homogenization', 'Interdisciplinary AI actors', 'Risk measurement plans', 'Human-AI configuration'] +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account regarding data privacy when establishing incident response plans for third-party GAI technologies?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To address confabulation in GAI system outputs, the following measures should be taken: review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities (MS-2.5-003), and avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments (MS-2.5-001).', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for suggested actions to conduct a privacy risk assessment of an AI system. It is clear in its intent, seeking specific steps or actions related to privacy risk assessment. The question is independent and does not rely on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What actions are suggested for conducting a privacy risk assessment of the AI system?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do automated customer service systems meet complex needs with human oversight?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI technologies', 'Content provenance', 'Synthetic content detection', 'Digital transparency mechanisms', 'Provenance data tracking'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Strategies to ensure safe and fair automated systems for underserved communities include conducting proactive equity assessments during the design phase, using representative and robust data, guarding against proxies that may lead to algorithmic discrimination, and implementing ongoing monitoring and evaluation to confirm protections against algorithmic discrimination. These strategies aim to identify potential discrimination and effects on equity, ensuring that the systems are designed and deployed in an equitable manner.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of legal protections in addressing algorithmic discrimination. It is clear in specifying the topic of interest (legal protections) and the issue it addresses (algorithmic discrimination). The intent is straightforward, seeking an explanation of how legal measures can mitigate or address biases in algorithms. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of legal protections and algorithmic discrimination.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for the purpose of maintaining a document retention policy in relation to GAI systems, while the second question focuses on how such a policy supports GAI system integrity. Although related, the questions have different focuses and thus different depths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of human-AI configuration in the context of risk measurement and management for GAI systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Human subject protection in AI evaluations is ensured through several criteria, including: 1) evaluations involving human subjects must meet applicable requirements and be representative of the relevant population; 2) options must be provided for human subjects to withdraw participation or revoke consent for the use of their data; 3) techniques such as anonymization and differential privacy should be used to minimize risks associated with linking AI-generated content back to individual human subjects; 4) documentation of how content provenance data is tracked and how it interacts with privacy and security is necessary, including the removal of personally identifiable information (PII).', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of content provenance in managing risks associated with AI-generated synthetic content?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of human-AI integration in enhancing customer service. It is clear in its intent, seeking information on how the combination of human and AI efforts can improve customer service. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Intellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair use under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI outputs displaying instances of training data memorization could infringe on copyright. Additionally, data poisoning poses a risk where an adversary compromises a training dataset used by a model to manipulate its outputs or operation, potentially leading to malicious tampering with data or parts of the model.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking for examples of how data privacy principles protect against identity theft. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking information on the application of data privacy principles in the context of identity theft protection.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What are some examples of how data privacy principles aim to protect against identity theft?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the role of automated customer service in enhancing customer care, while the second question addresses how automated customer service systems meet complex needs with human oversight. These questions have different constraints and requirements, as well as different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about options for addressing high-priority AI risks and their relation to organizational tolerance. It is clear in its intent, seeking information on both the strategies for mitigating AI risks and how these strategies align with an organization's risk tolerance. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Human alternatives', 'Opt-out mechanism', 'Timely human consideration', 'Fallback and escalation system', 'Automated systems', 'Equitable access', 'Timely consideration', 'Human decision-maker', 'Automation bias'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['TEVV metrics', 'Measurement error models', 'GAI system risks', 'Feedback processes', 'Harmful bias and homogenization'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context does not provide specific information on how a document retention policy supports GAI system integrity.', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the AI Safety Institute established by NIST?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The context mentions that algorithmic discrimination may violate legal protections depending on specific circumstances, indicating that legal protections play a role in addressing algorithmic discrimination.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What options exist for addressing high-priority AI risks, and how do they relate to organizational tolerance?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the risks associated with transparency and accountability as identified in the MAP function. It is clear in specifying the topic of interest (risks, transparency, accountability, MAP function) and seeks detailed information on these risks. However, the question assumes familiarity with the 'MAP function' without providing any context or explanation of what it entails. To improve clarity and answerability, the question could benefit from a brief description or definition of the MAP function, or alternatively, frame the question in a way that does not rely on specific, potentially unfamiliar terminology.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What are the risks associated with transparency and accountability as identified in the MAP function?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-generated content', 'Real-time auditing tools', 'User feedback mechanisms', 'Synthetic data', 'Incident response and recovery plans'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of structured human feedback exercises specifically in the context of GAI (General Artificial Intelligence) risk measurement and management. It is clear in specifying the topic of interest (structured human feedback exercises) and the context (GAI risk measurement and management), making the intent clear and the question self-contained. No additional context or external references are needed to understand or answer the question.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of timely human consideration in the context of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What instances illustrate how principles of data privacy mitigate risks associated with unauthorized data collection and potential identity theft?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the steps that connect AI red-teaming and stakeholder engagement in the context of assessing privacy risks. It is clear in its intent, specifying the two activities (AI red-teaming and stakeholder engagement) and the context (assessing privacy risks). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of using structured feedback mechanisms in relation to AI-generated content?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Human-AI integration plays a key role in enhancing customer service by allowing companies to provide faster customer care through partially automated customer service platforms. These systems help answer customer questions and compile common problems for human agents to review, while maintaining human agents to respond to complicated requests. This integration is viewed as essential for successful customer service.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of content provenance in managing risks associated with AI-generated synthetic content. It is clear in specifying the topic of interest (content provenance) and the context (managing risks associated with AI-generated synthetic content). The intent is clear, seeking an explanation of the importance or role of content provenance in this specific context. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What steps link AI red-teaming and stakeholder engagement in assessing privacy risks?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Information integrity', 'Human-AI configuration', 'Digital content transparency', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Opt out', 'Human alternatives', 'Automated systems', 'Human consideration', 'Sensitive domains'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Automated customer service systems meet complex needs with human oversight by integrating automated services such as chat-bots and AI-driven call response systems, which can escalate issues to a human support team when necessary. This allows companies to provide faster customer care while ensuring that human agents are available to handle complicated requests.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of human-AI configuration in the context of risk measurement and management for GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (human-AI configuration) and the context (risk measurement and management for GAI systems). The intent is to understand the importance or impact of this configuration within the given context. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the significance of human-AI configuration in the context of risk measurement and management for GAI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the significance of human-AI configuration in managing GAI risks and ensuring information integrity?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on identifying risk response options for high-priority AI risks, while the second question asks for options and their link to organizational tolerance, adding an additional layer of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the AI Safety Institute established by NIST. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about the purpose of a specific institute.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of the AI Safety Institute established by NIST?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 2, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Reporting expectations', 'Transparency', 'Artificial Intelligence ethics', 'Traffic calming measures', 'AI Risk Management Framework', 'National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for data privacy when establishing incident response plans for third-party GAI (General Artificial Intelligence) technologies. It is specific in its focus on data privacy and incident response plans, and it clearly identifies the context of third-party GAI technologies. The intent is clear, seeking information on what factors need to be considered. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What considerations should be taken into account regarding data privacy when establishing incident response plans for third-party GAI technologies?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of structured human feedback exercises in the context of GAI risk measurement and management is to define use cases, contexts of use, capabilities, and negative impacts where these exercises would be most beneficial. They are aimed at monitoring and improving outputs, evaluating the quality and integrity of data used in training, and tracking risks or opportunities related to GAI that cannot be measured quantitatively.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 2, 'relevance': 2, 'score': 1.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Artificial Intelligence Decisionmaking', 'Biometric Information Privacy Act', 'Model Cards for Model Reporting', 'Adverse Action Notice Requirements', 'Explainable Artificial Intelligence (XAI)'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the risks associated with transparency and accountability as identified in the MAP function. While it specifies the topic of interest (risks, transparency, accountability, MAP function), it assumes familiarity with what the 'MAP function' refers to without providing any context or explanation. This makes the question unclear for those who do not know what the MAP function is or its relevance to transparency and accountability. To improve clarity and answerability, the question could include a brief description of the MAP function or specify the context in which it is used.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for instances that illustrate how principles of data privacy mitigate risks associated with unauthorized data collection and potential identity theft. It is clear in specifying the topic of interest (principles of data privacy) and the risks it aims to address (unauthorized data collection and identity theft). The intent is clear, seeking specific examples or instances. The question is self-contained and does not rely on external references or prior knowledge not included within the question itself.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the role of the National Institute of Standards and Technology in advancing artificial intelligence?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for specific actions to conduct a privacy risk assessment of an AI system, while the second question focuses on the relationship between AI red-teaming and stakeholder engagement in the context of privacy risk assessment. These questions have different constraints and requirements, as well as different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of creating measurement error models for pre-deployment metrics within the context of TEVV (Test, Evaluation, Verification, and Validation) processes. It is clear in specifying the topic of interest (measurement error models, pre-deployment metrics, TEVV processes) and seeks an explanation of the purpose behind this practice. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of using structured feedback mechanisms in relation to AI-generated content. It is clear in specifying the topic of interest (structured feedback mechanisms) and the context (AI-generated content). The intent is straightforward, seeking an explanation of the purpose or benefits of these mechanisms. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of Model Cards for Model Reporting in the context of artificial intelligence?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of timely human consideration in the context of automated systems. It is clear in specifying the topic of interest (timely human consideration) and the context (automated systems). The intent is to understand the significance of human involvement in automated processes, which is a specific and answerable query. The question is self-contained and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the importance of timely human consideration in the context of automated systems?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What objectives does the U.S. AI Safety Institute aim to achieve in relation to the standards and frameworks for managing AI risks as outlined by NIST?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about considerations for using automated systems in sensitive domains. It is clear in its intent, seeking information on factors to consider, and does not rely on external references or unspecified contexts. The question is specific and independent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What considerations should be taken into account when using automated systems in sensitive domains?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Content provenance is significant in managing risks associated with AI-generated synthetic content as it involves digital transparency mechanisms like provenance data tracking, which can trace the origin and history of content. This helps in distinguishing human-generated content from AI-generated synthetic content, facilitating greater information access about both authentic and synthetic content. Provenance data tracking can assist in assessing authenticity, integrity, intellectual property rights, and potential manipulations in digital content, thereby improving information integrity and upholding public trust.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Participatory engagement methods', 'Field testing', 'AI red-teaming', 'User feedback', 'Risk management', 'Pre-deployment testing', 'GAI system validity', 'Measurement gaps', 'Structured public feedback', 'AI Red-teaming'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the significance of human-AI configuration in managing GAI (General Artificial Intelligence) risks and ensuring information integrity. It is clear in specifying the topic of interest (human-AI configuration) and the aspects it is concerned with (managing risks and ensuring information integrity). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The options for high-priority AI risks include mitigating, transferring, avoiding, or accepting these risks. Specifically, for risks that do not surpass organizational risk tolerance, it is suggested to document trade-offs, decision processes, and relevant measurement and feedback results. For risks that surpass organizational risk tolerances, the recommended actions are to mitigate, transfer, or avoid those risks.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-enabled systems', 'Technological diffusion', 'Urban planning', 'Criminal justice system', 'Predictive policing'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What examples show how data privacy principles reduce risks of unauthorized data collection and identity theft?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of the National Institute of Standards and Technology (NIST) in advancing artificial intelligence. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks information about NIST's contributions to AI.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the roles of interdisciplinary teams and human-AI configuration in General Artificial Intelligence (GAI) risk management. It is clear in specifying the two elements of interest (interdisciplinary teams and human-AI configuration) and the context (GAI risk management). However, the use of abbreviations like 'GAI' and 'mgmt' might not be immediately clear to all readers. Expanding these abbreviations to 'General Artificial Intelligence' and 'management' would improve clarity. Additionally, specifying what aspects of risk management are of interest (e.g., risk identification, mitigation strategies) could further enhance the question's clarity and answerability.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of field testing in the evaluation of GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.INFO] seed question generated: "What concerns were raised by panelists regarding the use of predictive policing in the criminal justice system?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of using structured feedback mechanisms in relation to AI-generated content is to solicit and capture user input about the content to detect subtle shifts in quality or alignment with community and societal values.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI red-teaming and stakeholder engagement connect in privacy risk assessment by engaging directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance. This feedback is then used to guide the design of provenance data-tracking techniques, which is essential for addressing privacy risks identified during AI red-teaming assessments.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of Model Cards for Model Reporting within the context of artificial intelligence. It is specific, independent, and has a clear intent, making it understandable and answerable based on the details provided. The question does not rely on external references or additional context, and it clearly seeks an explanation of the purpose of Model Cards in AI.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the purpose of Model Cards for Model Reporting in the context of artificial intelligence?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['National Institute of Standards and Technology', 'Artificial intelligence', 'AI Safety Institute', 'Safe and trustworthy AI', '2023 Executive Order on Safe AI', 'AI Risk Management Framework', 'Trustworthy AI', 'Bias in Artificial Intelligence', 'GPT-4 Technical Report', 'Unsafe Diffusion'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes is to demonstrate construct validity for each metric, ensuring that the metric effectively operationalizes the desired concept. This involves measuring or estimating and documenting biases or statistical variance in applied metrics or structured human feedback processes, while leveraging domain expertise when modeling complex societal constructs such as hateful content.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about factors related to data privacy that must be aligned with GAI (General Artificial Intelligence) incident response plans. It is clear in specifying the topic of interest (data privacy factors) and the context (GAI incident response plans). The intent is to understand the alignment between data privacy considerations and incident response plans for GAI, which is specific and understandable without needing additional context. Therefore, the question meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What roles do interdisciplinary teams and human-AI config play in GAI risk mgmt?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the objectives of the U.S. AI Safety Institute in relation to the standards and frameworks for managing AI risks as outlined by NIST. It is specific in mentioning the U.S. AI Safety Institute and NIST, and it clearly seeks information about the objectives related to AI risk management standards and frameworks. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions seek examples of how data privacy principles protect against identity theft, with the second question also mentioning unauthorized data collection. However, the primary focus on identity theft remains consistent, sharing the same depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.INFO] seed question generated: "What are the concerns associated with unsafe diffusion in the context of AI-generated content?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'Disinformation and misinformation', 'Generative AI models', 'Information security risks', 'Cybersecurity attacks'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What factors related to data privacy must be aligned with GAI incident response plans?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['AI-enabled nudification technology', 'Image-based abuse', 'Non-consensual intimate images', 'AI-powered cameras', 'Road safety habits'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the factors that influence the need for human alternatives in automated systems for sensitive areas. It is clear in its intent, seeking information on the influencing factors, and does not rely on external references or unspecified contexts. The question is specific and independent, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What goals does the U.S. AI Safety Institute have for NIST's AI risk standards?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between timely human fallback and the effectiveness of automated systems. While it is clear in its intent to explore the relationship between human intervention and system performance, it is somewhat vague in specifying the context or type of automated systems being referred to. To improve clarity and answerability, the question could specify the domain or type of automated systems (e.g., industrial automation, AI-driven customer service) and what is meant by 'timely human fallback' (e.g., specific scenarios or conditions under which human intervention is considered timely).", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What connections exist between timely human fallback and the effectiveness of automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 0 times +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What factors influence the need for human alternatives in automated systems for sensitive areas?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential impacts of disinformation and misinformation facilitated by GAI systems on public trust?" +[ragas.testset.evolutions.INFO] seed question generated: "What problems does AI-enabled nudification technology seek to address and protect against?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of field testing in the evaluation of GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (field testing) and the context (evaluation of GAI systems), making the intent straightforward. The question is self-contained and does not rely on external references or additional context to be understood and answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What is the purpose of field testing in the evaluation of GAI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Algorithmic discrimination', 'Equity assessments', 'Representative data', 'Proactive testing'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role do Model Cards play in ensuring transparency and accountability in AI systems, particularly in light of privacy concerns highlighted by recent data breaches and surveillance practices?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses specifically on the significance of human-AI configuration in risk measurement and management for GAI systems, while the second question broadens the scope to include the role of interdisciplinary teams along with human-AI configuration in GAI risk management. This difference in scope leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Unemployment benefits system', 'Fraud detection system', 'Access to pain medication', 'Automated performance evaluation', 'Human alternatives'] +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions focus on data privacy considerations in the context of incident response plans for GAI technologies, requiring similar depth and breadth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) to realize its full commercial and societal benefits without harm to people or the planet. NIST has conducted both fundamental and applied work on AI for more than a decade and is helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The significance of human-AI configuration in managing GAI risks and ensuring information integrity lies in its role in evaluating content lineage and origin, adapting training programs for digital content transparency, developing certification programs for managing GAI risks, delineating human proficiency tests from GAI capabilities, and implementing systems to monitor and track outcomes of human-GAI configurations for future improvements. Involving end-users, practitioners, and operators in prototyping and testing activities is also crucial, especially in various scenarios including crisis situations or ethically sensitive contexts.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of using representative data in the development of automated systems?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': "The first question asks about the purpose of the AI Safety Institute established by NIST, while the second question focuses on the goals of the U.S. AI Safety Institute specifically related to NIST's AI risk standards. These questions have different focuses and requirements.", 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What issues arise from the lack of human alternatives in automated systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the concerns associated with unsafe diffusion in the context of AI-generated content. It is clear in specifying the topic of interest (unsafe diffusion) and the context (AI-generated content), making the intent clear and understandable. The question is self-contained and does not rely on external references or prior knowledge not shared within the question itself. Therefore, it meets the criteria for independence and clear intent.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the considerations for using automated systems in sensitive domains, while the second question focuses on the need for human input in such systems. These are related but distinct inquiries with different depths and requirements.', 'verdict': 0} +[ragas.testset.evolutions.INFO] seed question generated: "What role do civil rights play in the context of automated systems and technology according to the foreword?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 1, 'structure': 1, 'relevance': 1, 'score': 1.0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Automated systems', 'Derived data sources', 'Data reuse limits', 'Independent evaluation', 'Safety and effectiveness'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.0} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Lisa Feldman Barrett', 'Microsoft Corporation', 'National Association for the Advancement of Colored People', 'University of Michigan Ann Arbor', 'OSTP listening sessions', 'OSTP', 'Artificial intelligence', 'Biometric technologies', 'Request For Information (RFI)', 'Public comments'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the problems that AI-enabled nudification technology aims to address and protect against. It is clear in its intent, seeking specific information about the objectives and protective measures of this technology. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential impacts of disinformation and misinformation facilitated by GAI (Generative AI) systems on public trust. It is clear in specifying the topic of interest (disinformation and misinformation by GAI systems) and the specific aspect it seeks to explore (impact on public trust). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential impacts of disinformation and misinformation facilitated by GAI systems on public trust?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between timely human fallback and the effectiveness of automated systems. While it is clear in its intent to explore the relationship between human intervention and system performance, it is somewhat vague in specifying the context or type of automated systems being referred to. To improve clarity and answerability, the question could specify the domain or type of automated systems (e.g., industrial automation, AI-driven customer service) and what is meant by 'timely human fallback' (e.g., specific scenarios or conditions under which human intervention is considered timely).", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.evolutions.INFO] seed question generated: "What precautions should be taken when using derived data sources in automated systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the role of Model Cards in ensuring transparency and accountability in AI systems, with a specific focus on privacy concerns related to recent data breaches and surveillance practices. It is clear in its intent, specifying the aspect of AI systems (Model Cards) and the context (privacy concerns from data breaches and surveillance). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of using representative data in the development of automated systems. It is clear in its intent, seeking an explanation of why representative data is crucial in this context. The question is independent and does not rely on external references or additional context to be understood. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What is the importance of using representative data in the development of automated systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What actions did the OSTP take to engage with stakeholders regarding the use of artificial intelligence and biometric technologies?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Interdisciplinary teams play a crucial role in GAI risk management by reflecting a wide range of capabilities, competencies, demographic groups, domain expertise, educational backgrounds, lived experiences, professions, and skills. Their participation is documented, and opportunities for interdisciplinary collaboration are prioritized. Additionally, human-AI configuration is important as it addresses harmful bias and homogenization, ensuring that data or benchmarks used in risk measurement are representative of diverse in-context user populations.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the connections between field testing, user feedback, and GAI (General Artificial Intelligence) system evaluation. It is clear in its intent to understand the relationships among these three aspects. The question is independent and does not rely on external references or unspecified contexts, making it self-contained. However, it could benefit from specifying what kind of connections are of interest (e.g., methodological, impact on performance, user satisfaction) to further refine the scope of the answer.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How do Model Cards enhance AI transparency and accountability amid privacy issues?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Cyberattacks', 'Intellectual Property', 'Obscene and abusive content', 'CBRN weapons', 'Chemical and biological design tools'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the issues that arise from the lack of human alternatives in automated systems. It is clear in its intent, seeking information on the potential problems or challenges associated with relying solely on automated systems without human intervention or alternatives. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What issues arise from the lack of human alternatives in automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the concerns raised by panelists regarding the use of predictive policing in the criminal justice system. It is clear in specifying the topic of interest (concerns about predictive policing) and the context (criminal justice system). The intent is straightforward, seeking information on the specific concerns discussed by panelists. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] simple question generated: "What concerns were raised by panelists regarding the use of predictive policing in the criminal justice system?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data Privacy', 'Privacy Act of 1974', 'NIST Privacy Framework', 'Biometric identifying technology', 'Workplace surveillance'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Algorithmic discrimination', 'Automated systems', 'Bias testing', 'Equitable design', 'Systemic biases'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The need for human input in sensitive automated systems is driven by the requirement for timely human consideration and remedy when automated systems fail, produce errors, or when individuals wish to appeal or contest the impacts of these systems. Additionally, human input is necessary to ensure that automated systems are tailored to their intended purpose, provide meaningful access for oversight, and incorporate human consideration for adverse or high-risk decisions.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between field testing, user feedback, and GAI system evaluation?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential impacts of increased attack surfaces for cyberattacks on system availability and data integrity?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Decommissioning AI systems', 'GAI risks', 'Roles and responsibilities', 'Incident response procedures', 'Data security and retention'] +[ragas.testset.evolutions.INFO] seed question generated: "What are some real-life examples of how data privacy principles can be implemented through laws and policies?" +[ragas.testset.evolutions.INFO] seed question generated: "What measures are being taken to ensure equitable design in automated systems to protect against algorithmic discrimination?" +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What role does the inclusion of diverse and representative data play in ensuring that automated systems are free from algorithmic bias and meet the expectations for equitable outcomes?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 2, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Unemployment benefits system', 'Fraud detection system', 'Access to pain medication', 'Automated performance evaluation', 'Human alternatives'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for precautions to be taken when using derived data sources in automated systems. It is clear in specifying the topic of interest (precautions, derived data sources, automated systems) and seeks detailed information on safety or best practices. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the purpose of Model Cards in AI, while the second question focuses on how Model Cards enhance AI transparency and accountability, specifically amid privacy issues. These questions have different constraints and requirements, leading to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'AI-enabled nudification technology seeks to address and protect against image-based abuse, particularly the creation of non-consensual intimate images that disproportionately impact women. It aims to combat the proliferation of apps that allow users to create or alter images of individuals without their consent, which can lead to devastating harm to victims.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What is the importance of documenting roles and responsibilities related to managing AI risks within an organization?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI lifecycle', 'AI technology risks', 'Organizational practices for AI', 'Impact documentation process', 'Content provenance methodologies'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of civil rights in the context of automated systems and technology, specifically according to 'the foreword'. While it is clear in specifying the topic of interest (civil rights, automated systems, technology) and the source of information (the foreword), it assumes access to and understanding of 'the foreword' without providing its content or context. This makes the question unclear for those without direct access to the foreword. To improve clarity and answerability, the question could include a brief description or key points from the foreword, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] rewritten question: "What role do civil rights play in the context of automated systems and technology according to the foreword?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the actions taken by the OSTP to engage with stakeholders concerning the use of artificial intelligence and biometric technologies. It is specific in identifying the organization (OSTP) and the topics of interest (artificial intelligence and biometric technologies). The intent is clear, seeking information on stakeholder engagement activities. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the connections between GAI-facilitated misinformation and the erosion of public trust. It is clear in specifying the two concepts of interest (GAI-facilitated misinformation and public trust erosion) and seeks to understand the relationship between them. The intent is clear, and the question is independent as it does not rely on external references or unspecified contexts. However, it could be improved by specifying what 'GAI' stands for to ensure clarity for all readers.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential issues associated with automated performance evaluation in the workplace?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about the purpose of field testing in the evaluation of GAI systems, while the second question asks about the relationship between field testing, user feedback, and GAI evaluation. The second question has a broader scope and different requirements.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question generated: "What ethical and operational concerns did panelists express regarding the implications of AI-driven predictive policing on community safety and democratic values?" +[ragas.testset.evolutions.INFO] seed question generated: "What is the purpose of the impact documentation process in the context of GAI systems?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What connections exist between GAI-facilitated misinformation and public trust erosion?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for real-life examples of how data privacy principles can be implemented through laws and policies. It is clear in its intent, specifying that it seeks examples and focusing on the implementation of data privacy principles through legal and policy measures. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are some real-life examples of how data privacy principles can be implemented through laws and policies?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential impacts of increased attack surfaces for cyberattacks on system availability and data integrity. It is clear in specifying the topic of interest (increased attack surfaces, cyberattacks) and the aspects to be addressed (system availability, data integrity). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential impacts of increased attack surfaces for cyberattacks on system availability and data integrity?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Trustworthy AI', 'Transparency policies', 'Risk management activities', 'Information integrity', 'GAI capabilities'] +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the problems that arise when automated systems do not have options for human review. It is clear in its intent, seeking information on the potential issues or drawbacks of such systems. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The answer to given question is not present in context', 'verdict': -1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about measures being taken to ensure equitable design in automated systems to protect against algorithmic discrimination. It is clear in its intent, specifying the focus on 'equitable design' and 'algorithmic discrimination'. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.", 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question is clear and specific, asking about the role of diverse and representative data in preventing algorithmic bias and achieving equitable outcomes in automated systems. It does not rely on external references or unspecified contexts, making it self-contained and understandable. The intent is clear, seeking an explanation of the impact of data diversity on algorithmic fairness.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What problems emerge when automated systems lack human review options?" +[ragas.testset.evolutions.INFO] seed question generated: "What factors should be considered when evaluating the risk-relevant capabilities of GAI?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the importance of documenting roles and responsibilities related to managing AI risks within an organization. It is clear in its intent, seeking an explanation of the significance of this documentation. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the impact of misinformation facilitated by GAI systems on public trust. They share the same constraints and requirements, and the depth and breadth of the inquiry are similar.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about potential issues associated with automated performance evaluation in the workplace. It is clear in its intent, seeking information on the drawbacks or challenges of using automated systems for performance evaluation. The question is specific and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "How does diverse data help prevent algorithmic bias in automated systems?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Precautions that should be taken when using derived data sources in automated systems include careful tracking and validation of derived data, as it may be high-risk and could lead to feedback loops, compounded harm, or inaccurate results. Such data should be validated against the risk of collateral consequences.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Technology and democracy', 'Automated systems', 'Civil rights', 'AI Bill of Rights', 'Bias and discrimination'] +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Field testing, user feedback, and GAI evaluation are linked through structured public feedback mechanisms that assess how GAI systems perform in real-world conditions. Field testing evaluates risks and impacts in controlled settings, while user feedback, gathered through participatory engagement methods, helps organizations understand user interactions and experiences with AI-generated information. Together, these approaches inform the design, implementation, and governance of GAI systems.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks about the role of civil rights in the context of automated systems and technology, specifically according to 'the foreword'. While it is clear in specifying the topic of interest (civil rights, automated systems, technology) and the source of information (the foreword), it assumes access to and understanding of 'the foreword' without providing its content or context. This makes the question unclear for those without direct access to the foreword. To improve clarity and answerability, the question could include a brief description or key points from the foreword, or alternatively, frame the question in a way that does not rely on specific, unpublished documents.", 'verdict': 0} +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'Both questions address the problems that occur when human involvement is missing in automated systems, sharing the same constraints, requirements, and depth of inquiry.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] evolution_filter failed, retrying with 1 +[ragas.testset.evolutions.INFO] retrying evolution: 1 times +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the ethical and operational concerns expressed by panelists regarding AI-driven predictive policing and its implications on community safety and democratic values. It is clear in specifying the topic of interest (ethical and operational concerns, AI-driven predictive policing) and the context (community safety and democratic values). The intent is clear, and the question is self-contained, not relying on external references or unspecified contexts. Therefore, it is understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the purpose of the impact documentation process specifically in the context of GAI (General Artificial Intelligence) systems. It is clear in specifying the topic of interest (impact documentation process) and the context (GAI systems), making the intent of the question straightforward. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Environmental impacts of GAI', 'Harmful bias in AI systems', 'Generative AI energy consumption', 'Disparities in model performance', 'Trustworthy AI characteristics'] +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of bias and discrimination in automated systems on the rights of the American public?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'OSTP engaged with stakeholders regarding the use of artificial intelligence and biometric technologies by conducting two listening sessions for members of the public, which drew upwards of 300 participants. Additionally, OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and issued a Request For Information (RFI) on the use and governance of biometric technologies to understand their extent, variety, and the stakeholders impacted by their use or regulation.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses on the general importance of using representative data, while the second question specifically addresses how diverse data helps prevent algorithmic bias. These questions have different depths and specific focuses.', 'verdict': 0} +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Cyberattacks', 'Intellectual Property', 'Obscene and abusive content', 'CBRN weapons', 'Chemical and biological design tools'] +[ragas.testset.evolutions.DEBUG] [MultiContextEvolution] multicontext question compressed: "What concerns did panelists raise about AI in policing and its impact on safety and democracy?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for the connection between increased attack surfaces and the risks to system availability and data integrity. It is clear in its intent, seeking an explanation of the relationship between these concepts. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The importance of documenting roles and responsibilities related to managing AI risks within an organization is to ensure that these roles and lines of communication are clear to individuals and teams throughout the organization. This clarity helps in mapping, measuring, and managing AI risks effectively.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for laws and frameworks that illustrate data privacy principles in practice and their implications. It is clear in its intent, seeking specific examples of legal and regulatory frameworks related to data privacy and their practical implications. The question is independent and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': "The question asks for factors to consider when evaluating the risk-relevant capabilities of GAI (General Artificial Intelligence). It is clear in its intent, seeking specific factors related to risk evaluation. The question is independent and does not rely on external references or unspecified contexts. However, it could benefit from a brief clarification of what is meant by 'risk-relevant capabilities' to ensure a comprehensive understanding for all readers.", 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What are the environmental impacts associated with the energy consumption of generative AI systems?" +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['GAI systems', 'AI Actors', 'Unanticipated impacts', 'Information integrity', 'Content provenance'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 2, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.5} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Training data use', 'Intellectual property', 'Data privacy risks', 'Content provenance', 'Generative AI (GAI) risks', 'GAI systems', 'Information integrity', 'Human-AI configuration', 'Digital content transparency', 'Harmful bias and homogenization'] +[ragas.testset.filters.DEBUG] context scoring: {'clarity': 1, 'depth': 3, 'structure': 2, 'relevance': 3, 'score': 2.25} +[ragas.testset.evolutions.DEBUG] keyphrases in merged node: ['Data protection', 'Third-party considerations', 'Risk management', 'Pre-deployment testing', 'GAI systems'] +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What links increased attack surfaces to risks for system availability and data integrity?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the potential risks associated with the production and access to obscene and abusive content?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What laws and frameworks illustrate data privacy principles in practice and their implications?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The potential issues associated with automated performance evaluation in the workplace include workers being fired by an automated system without the possibility of human review, appeal, or other forms of recourse.', 'verdict': 1} +[ragas.testset.evolutions.INFO] seed question generated: "What measures are suggested to ensure information integrity in the context of AI systems?" +[ragas.testset.evolutions.INFO] seed question generated: "What are the implications of using GAI systems for organizations in terms of risk management and compliance?" +[ragas.testset.evolutions.INFO] seed question generated: "What should be assessed to understand data privacy risks in the use of training data?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to changes or the prevention of product launches to avoid public harm. Federal government agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have created best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question focuses specifically on predictive policing within the criminal justice system, while the second question addresses broader concerns about AI in policing, including its impact on safety and democracy. These differences lead to varying depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of bias and discrimination in automated systems on the rights of the American public. It is specific in its focus on bias and discrimination within automated systems and their impact on a particular group (the American public). The intent is clear, seeking an explanation of the consequences of these issues on public rights. The question is self-contained and does not rely on external references or prior knowledge beyond a general understanding of automated systems and public rights.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the potential risks associated with the production and access to obscene and abusive content. It is clear in its intent, seeking information on the risks involved. The question is self-contained and does not rely on external references or additional context to be understood. Therefore, it meets the criteria of independence and clear intent.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the potential risks associated with the production and access to obscene and abusive content?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks for real-life examples of data privacy principles implemented through laws and policies, requiring a broader and more detailed response. The second question is more specific, asking only about laws, which narrows the scope and depth of the inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question specifically asks about the impacts of increased attack surfaces on system availability and data integrity, requiring detailed analysis. The second question is broader, asking about the connection between attack surfaces and system/data risks without specifying the impacts or particular aspects like availability and integrity.', 'verdict': 0} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the environmental impacts associated with the energy consumption of generative AI systems. It is specific in its focus on environmental impacts and energy consumption, and it is clear in its intent to understand the consequences of using generative AI systems from an environmental perspective. The question is self-contained and does not rely on external references or additional context to be understood or answered. Therefore, it meets the criteria for clarity and answerability.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the environmental impacts associated with the energy consumption of generative AI systems?" +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks for measures to ensure information integrity in the context of AI systems. It is clear in specifying the topic of interest (information integrity) and the context (AI systems), making the intent clear and understandable. The question is self-contained and does not rely on external references or unspecified contexts, making it independent and answerable based on domain knowledge.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks what should be assessed to understand data privacy risks in the use of training data. It is clear in its intent, seeking specific information on the factors or criteria that need to be evaluated to understand data privacy risks. The question is independent and does not rely on external references or additional context, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the implications of using GAI (General Artificial Intelligence) systems for organizations, specifically focusing on risk management and compliance. It is clear in its intent, seeking information on the potential risks and compliance issues associated with GAI systems in an organizational context. The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] simple question generated: "What are the implications of using GAI systems for organizations in terms of risk management and compliance?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting or reproducing existing unwanted inequities. These outcomes can threaten people's opportunities, undermine their privacy, and lead to pervasive tracking of their activity, often without their knowledge or consent.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "The context discusses increased attack surfaces for targeted cyberattacks, which may compromise a system's availability or the confidentiality or integrity of training data, code, or model weights. This connection indicates that as attack surfaces increase, the risks to systems and data also escalate.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The purpose of the impact documentation process in the context of GAI systems is to document the risks and potential impacts of the AI technology designed, developed, deployed, evaluated, and used, and to communicate about these impacts more broadly.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the harms arising from easy access to obscene content and its production methods. It is clear in its intent, seeking information on the negative consequences associated with both the consumption and production of obscene content. The question is self-contained and does not rely on external references or prior knowledge, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the energy activities of GAI (General Artificial Intelligence) systems that lead to significant carbon emissions. It is clear in specifying the subject (GAI systems) and the aspect of interest (energy activities leading to carbon emissions). The intent is to understand which specific activities within GAI systems contribute to carbon emissions, making it specific and independent. No additional context or external references are needed to understand or answer the question.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': "Panelists raised concerns about the validity of AI systems used in policing, noting that adverse or irrelevant data can lead to a replication of unjust outcomes. They highlighted issues such as confirmation bias and the tendency to defer to potentially inaccurate automated systems. The impact of these systems on individuals and communities is seen as potentially severe, with concerns that they lack individualization, undermine the belief in people's ability to change for the better, and can lead to job loss and custody issues. Additionally, surveillance technologies can create chilling effects in communities and send negative signals about how community members are viewed. Panelists emphasized that while transparency is important, it is not sufficient for achieving accountability.", 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What harms arise from easy access to obscene content and its production methods?" +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What energy activities of GAI systems lead to significant carbon emissions?" +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'To understand data privacy risks in the use of training data, it is important to conduct appropriate diligence on training data use to assess intellectual property and privacy risks, including examining whether the use of proprietary or sensitive training data is consistent with applicable laws.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Suggested measures to ensure information integrity in the context of AI systems include employing methods to trace the origin and modifications of digital content, integrating tools designed to analyze content provenance and detect data anomalies, verifying the authenticity of digital signatures, and identifying patterns associated with misinformation or manipulation. Additionally, it is recommended to disaggregate evaluation metrics by demographic factors to identify discrepancies in how content provenance mechanisms work across diverse populations.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Diverse data helps prevent algorithmic bias in automated systems by ensuring that any data used in system development or assessment is representative of local communities based on the planned deployment setting. This data should be reviewed for bias considering the historical and societal context, and it should be sufficiently robust to identify and mitigate biases and potential harms.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The Privacy Act of 1974 exemplifies data privacy principles in action by requiring privacy protections for personal information in federal records systems, including limits on data retention and providing individuals a general right to access and correct their data. Additionally, federal law mandates employers to report the costs of surveilling employees during labor disputes, which serves as a transparency mechanism to protect worker organizing.', 'verdict': 1} +[ragas.testset.filters.DEBUG] filtered question: {'feedback': 'The question asks about the risk controls organizations should consider when using third-party Generative AI (GAI) systems for compliance. It is clear in its intent, specifying the context (third-party GAI systems) and the focus (risk controls for compliance). The question is self-contained and does not rely on external references or unspecified contexts, making it understandable and answerable based on the details provided.', 'verdict': 1} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question addresses both production and access to obscene and abusive content, while the second question focuses solely on the issues arising from easy access to obscene content. This difference in scope leads to different depths and breadths of inquiry.', 'verdict': 0} +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the overall environmental impacts of energy consumption in generative AI systems, which is broader. The second question specifically focuses on which activities within generative AI contribute most to carbon emissions, which is narrower in scope.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Factors to consider when evaluating the risk-relevant capabilities of GAI include abuses and impacts to information integrity, dependencies between GAI and other IT or data systems, harm to fundamental rights or public safety, presentation of obscene, objectionable, offensive, discriminatory, invalid or untruthful output, psychological impacts to humans (e.g., anthropomorphization, algorithmic aversion, emotional entanglement), possibility for malicious use, whether the system introduces significant new security vulnerabilities, anticipated system impact on some groups compared to others, and unreliable decision-making capabilities, validity, adaptability, and variability of GAI system performance over time.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] [ReasoningEvolution] question compressed: "What risk controls should orgs consider when using third-party GAI systems for compliance?" +[ragas.testset.filters.DEBUG] evolution filter: {'reason': 'The first question asks about the implications of using GAI systems for organizations in terms of risk management and compliance, which is broader and requires a more detailed analysis. The second question is more specific, focusing only on risk controls for third-party GAI in compliance.', 'verdict': 0} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Easy access to obscene content can lead to the production of and access to obscene, degrading, and/or abusive imagery, which can cause harm, including synthetic child sexual abuse material (CSAM) and nonconsensual intimate images (NCII) of adults.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'The GAI activities that contribute most to carbon emissions include training, maintaining, and operating GAI systems, particularly during the pre-training, fine-tuning, and inference stages. Current estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-trip flights between San Francisco and New York.', 'verdict': 1} +[ragas.testset.evolutions.DEBUG] answer generated: {'answer': 'Organizations can apply standard or existing risk controls and processes to proprietary or open-source GAI technologies, data, and third-party service providers, including acquisition and procurement due diligence, requests for software bills of materials (SBOMs), application of service level agreements (SLAs), and statement on standards for attestation engagement (SSAE) reports to help with third-party transparency and risk management for GAI systems.', 'verdict': 1} \ No newline at end of file diff --git a/Tasks/Task 3/Task3.md b/Tasks/Task 3/Task3.md new file mode 100644 index 0000000000000000000000000000000000000000..ad4f78cc9b3d28b15a150c79cac39af9c8235060 --- /dev/null +++ b/Tasks/Task 3/Task3.md @@ -0,0 +1,38 @@ + +# Deliverable 1 + +Assess your pipeline using the RAGAS framework including key metrics faithfulness, answer relevancy, context precision, and context recall. Provide a table of your output results. + +## Pipeline configuration + 1. Qdrant - Cloud hosted vector database + 2. PyMuPdfLoader - Pdf loader from langchain + 3. Snowflake/snowflake-arctic-embed-l - Open source embedding model used + 4. SemanticChunker & RecursiveCharacterText with contextual compression - Chunking strategy [Note that SemanticChunker seems to be unreliable and produces duplicate chunks] + 5. gpt-4o-mini - Generator LLM + 6. gpt-40 - Critic LLM + 7. Distribution - simple 0.5, multi_context 0.3 and reasoning 0.2 + 8. Ragas metrics - faithfulness, answer_relevancy, context_recall, context_precision,answer_correctness + 9. Sythetic Questions generated - 269 + +![tak3-del1](/task3-del1.png) + +![tak3-del1](/task3-del11.png) + +# Deliverable 2 +What conclusions can you draw about performance and effectiveness of your pipeline with this information? + + +## Observations: +Some observations from the results + +- **Faithfulness**: Mostly high faithfulness scores, indicating that the generated answers are generally true to the source material. But, there are some low score (e.g., 0.233333) which shows that the model may occasionally provide unfaithful or incomplete answers. + +- **Answer Relevancy**: The model seems to perform well in answer relevancy, with most scores being near 1. This suggests that even when faithfulness is low, the answers provided are still on-topic and relevant to the user's question. + +- **Context Recall & Precision**: There are several instances where **context recall** is 0.0, indicating that the context was not helpful in answering the question. However, when context recall is high, **context precision** is often perfect (1.0), showing that when the context is relevant, it is precise and accurate. + +- **Answer Correctness**: This metric shows a range of results. Although many answers are correct, a few are only partially correct, suggesting room for improvement in the correctness of generated answers. + +The pipeline performs well in generating relevant answers, but some improvements can be made to enhance the faithfulness and correctness of those answers. + +The **context recall** metric has room for improvement. There are several cases where relevant context is missing or inadequate, which can impact the overall effectiveness of the pipeline. \ No newline at end of file diff --git a/Tasks/Task 3/ai-safety-ragas-evaluation-result.csv b/Tasks/Task 3/ai-safety-ragas-evaluation-result.csv new file mode 100644 index 0000000000000000000000000000000000000000..81241bd0c0e844c55cffdb8d8975569ead14a9ec --- /dev/null +++ b/Tasks/Task 3/ai-safety-ragas-evaluation-result.csv @@ -0,0 +1,3849 @@ +question,contexts,answer,ground_truth,faithfulness,answer_relevancy,context_recall,context_precision,answer_correctness +What actions did the OSTP take to engage with stakeholders regarding the use of artificial intelligence and biometric technologies?,"['ā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and other data-driven technologies in their lives.\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below listed organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union']","The Office of Science and Technology Policy (OSTP) took the following actions to engage with stakeholders regarding the use of artificial intelligence and biometric technologies: + +1. **Soliciting Public Comments**: OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and other data-driven technologies in their lives. + +2. **Request for Information (RFI)**: OSTP issued a Request for Information (RFI) on the use and governance of biometric technologies. The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. + +The 130 responses to this RFI are available online and were submitted by a diverse range of organizations and individuals, including Accenture, Access Now, ACT | The App Association, AHIP, AIethicist.org, Airlines for America, Alliance for Automotive Innovation, Amelia Winger-Bearskin, and the American Civil Liberties Union, among others.","OSTP engaged with stakeholders regarding the use of artificial intelligence and biometric technologies by conducting two listening sessions for members of the public, which drew upwards of 300 participants. Additionally, OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and issued a Request For Information (RFI) on the use and governance of biometric technologies to understand their extent, variety, and the stakeholders impacted by their use or regulation.",1.0,0.971420787021264,0.6666666666666666,0.9999999999,0.832264507901183 +What are the potential issues associated with automated performance evaluation in the workplace?,[],"Automated performance evaluation systems in the workplace can introduce several potential issues, including: + +1. **Bias and Fairness**: Algorithms can perpetuate existing biases present in the data they were trained on. This can lead to unfair evaluations, particularly for minority groups or those underrepresented in the training data. + +2. **Transparency**: Automated systems often lack transparency, making it difficult for employees to understand how their performance is being assessed. This can lead to a lack of trust in the evaluation process. + +3. **Accuracy**: The metrics used by automated systems may not fully capture the nuances of an employee's performance, potentially overlooking important qualitative aspects of their work. + +4. **Privacy Concerns**: The data collected for automated performance evaluations can raise privacy issues, especially if sensitive or personal information is used without proper consent. + +5. **Over-reliance on Data**: Solely relying on quantitative data can ignore the context and complexities of certain job roles, leading to an incomplete or skewed assessment of an employee's performance. + +6. **Employee Morale**: Knowing that an impersonal system is evaluating their work can affect employee morale and motivation, particularly if they feel the system is unfair or opaque. + +7. **Adaptability**: Automated systems may struggle to adapt to unique or changing job requirements, potentially leading to outdated or irrelevant evaluations. + +8. **Feedback Quality**: Automated systems may provide less personalized and actionable feedback compared to human evaluators, which can limit an employee's ability to improve and develop. + +Addressing these issues requires careful design, ongoing monitoring, and a combination of automated and human evaluation processes to ensure fairness, accuracy, and transparency.","The potential issues associated with automated performance evaluation in the workplace include workers being fired by an automated system without the possibility of human review, appeal, or other forms of recourse.",1.0,0.9884787639433723,0.0,0.0,0.2891072538378265 +What role does synthetic content detection play in managing risks associated with AI-generated outputs?,[],"Synthetic content detection plays a crucial role in managing risks associated with AI-generated outputs by identifying and mitigating the potential misuse of AI technologies. Here are several ways in which it contributes to AI safety: + +1. **Misinformation Control**: Synthetic content detection helps to identify AI-generated fake news, deepfakes, and other forms of misinformation, thereby preventing the spread of false information that can influence public opinion or cause harm. + +2. **Intellectual Property Protection**: It can identify AI-generated content that mimics the style or substance of protected works, helping to prevent copyright infringement and protect the intellectual property rights of creators. + +3. **Fraud Prevention**: By detecting AI-generated content, businesses and financial institutions can prevent fraud and identity theft, which often use synthetic identities or documents. + +4. **Content Moderation**: Social media platforms and other content-hosting services can use synthetic content detection to identify and remove harmful AI-generated content, such as hate speech, violent imagery, or explicit material, thus maintaining a safer user environment. + +5. **Authenticity Verification**: For industries like news media and academia, synthetic content detection ensures the authenticity and credibility of published information, maintaining the integrity of these institutions. + +6. **Regulatory Compliance**: Synthetic content detection helps organizations comply with regulations that require the identification and labeling of AI-generated content, ensuring transparency and accountability. + +Overall, synthetic content detection is a key tool in the arsenal for managing the risks and ensuring the responsible use of AI technologies.","Synthetic content detection plays a crucial role in managing risks associated with AI-generated outputs by helping to distinguish human-generated content from AI-generated synthetic content. It facilitates greater information access about both authentic and synthetic content, enabling users to better understand the trustworthiness of AI systems. Additionally, it can assist in tracing negative outcomes back to their source, improving information integrity, and upholding public trust.",0.9444444444444444,0.9999999999999994,0.0,0.0,0.30881134564079704 +What role does risk management play in the implementation of feedback activities for AI systems?,['risk identification and management assessments and any steps taken to mitigate potential harms'],"Risk management plays a crucial role in the implementation of feedback activities for AI systems. It involves identifying, assessing, and mitigating potential risks to ensure that the feedback process enhances the system's performance without introducing new problems. Here are some specific ways risk management is applied: + +1. **Risk Identification**: This involves recognizing potential hazards that could arise from feedback activities. For instance, feedback loops could introduce biases, privacy concerns, or security vulnerabilities if not properly managed. + +2. **Risk Assessment**: Once risks are identified, they need to be evaluated in terms of their likelihood and potential impact. This helps in prioritizing which risks need more immediate attention and resources. + +3. **Mitigation Strategies**: Developing and implementing steps to reduce the identified risks is essential. For example: + - Ensuring data privacy by anonymizing feedback data. + - Regularly updating security protocols to protect against data breaches. + - Implementing bias detection and correction mechanisms to maintain fairness. + +4. **Monitoring and Review**: Continuous monitoring of the feedback process is necessary to ensure that the mitigation strategies are effective and to detect any new risks that may arise. This involves regular audits and updates to the risk management plan. + +Overall, effective risk management ensures that feedback activities contribute positively to the AI system's development and deployment, fostering trust and reliability in the system.",The answer to given question is not present in context,0.23333333333333334,0.9999999999999987,0.0,0.0,0.9206845746262214 +What concerns arise from companies using surveillance software to track employee discussions about union activity?,['Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67'],"The use of surveillance software by companies to track employee discussions about union activity raises several significant concerns: + +1. **Privacy Invasion**: Employees have a reasonable expectation of privacy in their communications, and monitoring their discussions can be seen as a significant invasion of that privacy. + +2. **Chilling Effect**: Knowing that their discussions are being monitored can discourage employees from freely expressing their thoughts and concerns, particularly regarding union activities, which can stifle open communication and the free exchange of ideas. + +3. **Intimidation and Retaliation**: Surveillance can lead to intimidation or retaliation against employees who are involved in or support union activities, potentially creating a hostile work environment and discouraging union participation. + +4. **Trust Erosion**: Continuous monitoring can erode trust between employees and management, leading to a more adversarial relationship and potentially decreasing overall morale and productivity. + +5. **Legal and Ethical Issues**: There are potential legal ramifications, as such surveillance might violate labor laws and employees' rights to organize and discuss union activities without interference. Ethically, it raises questions about the fairness and integrity of the company's practices. + +6. **Data Misuse**: The data collected from such surveillance can be misused or mishandled, leading to further breaches of privacy and potential legal consequences. + +Overall, the deployment of surveillance software to monitor union-related discussions can have profound negative implications for employee rights, workplace culture, and legal compliance.","Concerns arise from companies using surveillance software to track employee discussions about union activity, as it leads to the surveillance of individual employees and allows companies to surreptitiously intervene in discussions.",0.5652173913043478,0.9898117916148421,1.0,0.9999999999,0.6129296679690839 +What is the purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI?,[],"The purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI is to establish a set of guidelines and best practices that apply across different industries and sectors. This profile aims to address the unique risks and challenges associated with generative AI technologies by providing a comprehensive and unified approach to managing these risks. It helps organizations from various sectors to consistently identify, assess, and mitigate potential risks related to the deployment and use of generative AI systems, ensuring safe and responsible AI innovation and implementation.","The purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI is to assist organizations in deciding how to best manage AI risks in a manner that aligns with their goals, considers legal/regulatory requirements and best practices, and reflects risk management priorities. It offers insights into how risk can be managed across various stages of the AI lifecycle and for Generative AI as a technology.",1.0,0.9974163705263249,0.0,0.0,0.768943082057739 +What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?,[],"The Blueprint for an AI Bill of Rights proposes several measures to protect the rights of the American public, including: + +1. **Safe and Effective Systems**: Ensuring that AI systems are safe and effective before they are deployed. This involves rigorous testing, risk assessment, and continuous monitoring to prevent harm. + +2. **Algorithmic Discrimination Protections**: Safeguarding individuals from discriminatory outcomes in AI systems. This includes implementing fairness checks, bias mitigation strategies, and accountability measures to prevent discrimination based on race, gender, or other protected characteristics. + +3. **Data Privacy**: Establishing strong data privacy protections to prevent misuse of personal data by AI systems. This measure emphasizes transparency about data collection and usage, user consent, and data security practices. + +4. **Notice and Explanation**: Providing individuals with clear and understandable information about how AI systems are used and how decisions are made. This includes ensuring that people know when they are interacting with an AI system and can access explanations of how decisions that affect them are made. + +5. **Human Alternatives, Consideration, and Fallback**: Ensuring that individuals have access to human alternatives and can opt-out of AI-driven decisions when appropriate. This measure advocates for maintaining human oversight and intervention in critical decision-making processes to protect individual rights and autonomy. + +These measures are designed to create a framework that prioritizes the rights, safety, and well-being of individuals in the deployment and use of AI technologies.","The Blueprint for an AI Bill of Rights proposes a set of five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It includes expectations for automated systems, practical steps for implementation, and emphasizes transparency through reporting to ensure that rights, opportunities, and access are respected.",1.0,0.9930047968744876,0.0,0.0,0.8101262281234443 +What is the significance of the NSF Program on Fairness in Artificial Intelligence in collaboration with Amazon?,['The National Science Foundationā€™s program on Fairness in Artificial Intelligence also includes a specific interest in research foundations for explainable AI.96'],"The National Science Foundation (NSF) Program on Fairness in Artificial Intelligence, in collaboration with Amazon, plays a significant role in advancing the understanding and development of fair and equitable AI systems. This program focuses on promoting research that addresses crucial issues related to AI fairness, such as mitigating bias, ensuring fairness in AI decision-making processes, and improving the transparency and explainability of AI systems. + +One of the key interests of this program is in the foundational research for explainable AI, which aims to make AI systems more understandable and interpretable to humans. This is critical because it helps build trust in AI systems by providing clear and comprehensible explanations of how decisions are made. By fostering research in these areas, the program seeks to create AI technologies that are not only powerful but also ethical and aligned with societal values.",The answer to given question is not present in context,0.05555555555555555,0.9336706228244974,0.0,0.0,0.9279203908549005 +What measures should be taken to demonstrate the safety and effectiveness of automated systems?,"['Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.' + 'the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Designers, developers, and deployers of automat\xaded systems should provide generally accessible plain language docu\xadmentation including clear descriptions of the overall system func\xadtioning and the role automation plays, notice that such systems are in use, the individual or organization responsible for the system, and ex\xadplanations of outcomes that are clear, timely, and accessible. Such notice should be kept up-to-date and people impacted by the system should be notified of significant use case or key functionality chang\xades. You should know how and why an outcome impacting you was de\xadtermined by an automated system, including when the automated system is not the sole input determining the outcome. Automated systems should provide explanations that are technically valid, meaningful and useful to you and to any operators or others who']","To demonstrate the safety and effectiveness of automated systems, the following measures should be taken: + +1. **Independent Evaluation and Reporting:** + - Conduct independent evaluations and reporting to confirm that the system is safe and effective. + - Make the results of these evaluations public whenever possible. + - Report the steps taken to mitigate potential harms. + +2. **Performance Testing:** + - Include results of performance testing such as accuracy, differential demographic impact, and resulting error rates (both overall and per demographic group). + - Compare these results to previously deployed systems. + - Implement ongoing monitoring procedures and regular performance testing reports, specifying the monitoring frequency. + +3. **Accessible Documentation:** + - Provide generally accessible plain language documentation that includes clear descriptions of the overall system functioning and the role of automation. + - Notify people that such systems are in use and identify the individual or organization responsible for the system. + - Offer explanations of outcomes that are clear, timely, and accessible, ensuring people impacted by the system understand how and why an outcome was determined. + - Keep this documentation up-to-date and notify people of significant use case or key functionality changes. + +4. **Explanations of Outcomes:** + - Ensure that explanations provided by the automated systems are technically valid, meaningful, and useful to users, operators, and others impacted by the system. + +These measures help ensure transparency, accountability, and ongoing assessment of the automated system's performance and impact.","To demonstrate the safety and effectiveness of automated systems, the following measures should be taken: 1. Independent evaluation should be allowed, enabling access for independent evaluators such as researchers and auditors to the system and associated data. 2. Reporting should be regularly updated, including an overview of the system, data used for training, risk assessments, performance testing results, and ongoing monitoring procedures. Reports should be provided in plain language and machine-readable formats.",1.0,1.000000000000001,1.0,0.9999999999666667,0.8139385187321518 +What is the purpose of the impact documentation process in the context of GAI systems?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.']","The purpose of the impact documentation process in the context of General Artificial Intelligence (GAI) systems is to facilitate smoother sharing of information with relevant AI actors. This involves logging, recording, and analyzing GAI incidents, as well as maintaining regular information sharing, change management records, version history, and metadata. These practices empower AI actors to respond to and manage AI incidents more effectively by providing them with comprehensive and accurate information about past events and changes.","The purpose of the impact documentation process in the context of GAI systems is to document the risks and potential impacts of the AI technology designed, developed, deployed, evaluated, and used, and to communicate about these impacts more broadly.",0.8571428571428571,0.9317620208648149,0.0,0.9999999999,0.8931337530967289 +What should be assessed to understand data privacy risks in the use of training data?,"['a description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted, a summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications;']","To understand data privacy risks in the use of training data, several aspects should be assessed: + +1. **Data Sources**: Identify the origins of the data used. Ensure that the sources are legitimate and that data collection methods comply with privacy regulations. + +2. **Data Processing and Interpretation**: Examine how the data has been processed and interpreted. Verify that this processing does not inadvertently expose private information or allow for the re-identification of individuals. + +3. **Data Anonymization**: Assess the techniques used to anonymize the data, if applicable. Ensure that these techniques are robust enough to prevent re-identification. + +4. **Data Completeness and Accuracy**: Check for any missing, incomplete, or erroneous data that could lead to incorrect conclusions or biases that might impact individuals' privacy. + +5. **Relevancy Justifications**: Validate that all data included is relevant to the training objectives and does not include unnecessary personal information that could increase privacy risks. + +By thoroughly assessing these aspects, you can better understand and mitigate the data privacy risks associated with using training data for machine learning or other purposes.","To understand data privacy risks in the use of training data, it is important to conduct appropriate diligence on training data use to assess intellectual property and privacy risks, including examining whether the use of proprietary or sensitive training data is consistent with applicable laws.",0.2857142857142857,0.9868839320486401,0.0,0.9999999999,0.31584734941450265 +What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?,[],"The Blueprint for an AI Bill of Rights proposes several measures to protect the rights of the American public, which include: + +1. **Safe and Effective Systems**: Ensuring that AI systems are safe and effective before being deployed. This includes ongoing monitoring for any potential risks or harms. + +2. **Algorithmic Discrimination Protections**: Guarding against discrimination by ensuring that AI systems do not have biases that could lead to unfair treatment of individuals based on race, gender, or other protected characteristics. + +3. **Data Privacy**: Safeguarding individuals' data privacy by ensuring that AI systems use data responsibly and transparently, with individuals having control over their personal information. + +4. **Notice and Explanation**: Providing individuals with clear and transparent information about how AI systems are being used and how decisions that affect them are made. + +5. **Human Alternatives, Consideration, and Fallback**: Ensuring that individuals have access to a human alternative when interacting with AI systems and that there is a fallback mechanism in case of malfunction or errors in the AI system. + +These measures are designed to ensure that AI systems uphold the rights and freedoms of individuals and operate in a manner that is transparent, fair, and accountable.","The Blueprint for an AI Bill of Rights proposes a set of five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It includes expectations for automated systems, practical steps for implementation, and emphasizes transparency through reporting to ensure that rights, opportunities, and access are respected.",0.0,0.9930189744226393,0.0,0.0,0.837235082800928 +What actions were taken by the New York state legislature regarding biometric identifying technology in schools?,[],The New York state legislature passed a bill in June 2020 that placed a moratorium on the use of biometric identifying technology in schools until at least July 2022. This decision was made to allow time to study the technology's impact on student privacy and civil liberties.,"The New York state legislature banned the use of facial recognition systems and other biometric identifying technology in schools until July 1, 2022. Additionally, the law requires that a report on the privacy, civil rights, and civil liberties implications of the use of such technologies be issued before biometric identification technologies can be used in New York schools.",0.0,0.9933442163476744,0.0,0.0,0.7022592736659874 +What are the mental health impacts associated with increased use of surveillance technologies in schools and workplaces?,"[""the systems lack individualization and work against the belief that people can change for the better, system use can lead to the loss of jobs and custody of children, and surveillance can lead to chilling effects for communities and sends negative signals to community members about how they're viewed."" + 'Use of surveillance technologies has increased in schools and workplaces, and, when coupled with consequential management and evaluation decisions, it is leading to mental health harms such as lowered self-confidence, anxiety, depression, and']","The increased use of surveillance technologies in schools and workplaces has been associated with several mental health impacts. These include: + +- Lowered self-confidence +- Anxiety +- Depression + +Surveillance can lead to these mental health harms because it often involves consequential management and evaluation decisions, which can create stressful and oppressive environments for individuals.","The mental health impacts associated with increased use of surveillance technologies in schools and workplaces include lowered self-confidence, anxiety, depression, and a reduced ability to use analytical reasoning.",0.7142857142857143,0.9965261276697407,1.0,0.99999999995,0.7783199580171711 +What is the role of AI actors in the AI system lifecycle?,"['AI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","AI actors play crucial roles throughout the AI system lifecycle, ensuring the safe, effective, and ethical deployment and operation of AI systems. Here are key roles of AI actors: + +1. **AI Deployment**: This involves the actual implementation of AI systems in real-world environments. AI actors in this phase ensure that the AI system is properly integrated and operational. + +2. **AI Impact Assessment**: AI actors assess the potential impacts of AI systems on various stakeholders and environments. This includes evaluating risks, benefits, and any unintended consequences. + +3. **Domain Experts**: These are specialists with deep knowledge in specific fields relevant to the AI application. They provide essential insights and validate that the AI system is performing accurately and appropriately within the given domain. + +4. **End-Users**: These are the individuals or entities that interact with and utilize the AI system. Their feedback is crucial for ongoing improvements and identifying any issues that need to be addressed. + +5. **Operation and Monitoring**: AI actors continuously monitor AI systems to ensure they are functioning as intended. This includes tracking performance, identifying anomalies, and making necessary adjustments. + +6. **Testing, Evaluation, Verification, and Validation (TEVV)**: This involves rigorous testing to ensure that the AI system meets required specifications and standards. Verification and validation processes confirm that the system is built correctly and performs its intended functions. + +Documentation practices, such as logging, recording, and analyzing AI incidents, are also integral to these roles. Regular information sharing, maintaining change management records, version history, and metadata help AI actors respond to and manage incidents effectively, facilitating smoother collaboration and incident resolution.","AI actors play an active role in the AI system lifecycle, including organizations and individuals that deploy or operate AI.",1.0,0.9816597663298084,1.0,0.99999999995,0.4002437354174079 +What is the significance of human-AI configuration in ensuring the adequacy of GAI system user instructions?,[],"The human-AI configuration plays a crucial role in ensuring the adequacy of General Artificial Intelligence (GAI) system user instructions. Here are some key points highlighting its significance: + +1. **Understanding and Context**: A well-configured human-AI interaction ensures that the AI can understand the context and nuances of user instructions, which is vital for accurate and relevant responses. This involves the AI being trained on diverse datasets and designed to comprehend human language effectively. + +2. **Error Mitigation**: Proper configuration helps in identifying and mitigating errors in user instructions. The AI can prompt users for clarification or additional information, reducing the risk of misunderstandings and incorrect outputs. + +3. **Safety and Reliability**: Ensuring that the AI system is aligned with human values and ethical guidelines is essential for safety. Human-AI configuration helps in embedding these principles into the system, making it more reliable and trustworthy. + +4. **User-Friendly Design**: A significant aspect of configuration is making the AI system intuitive and easy to use. This involves designing interfaces and interactions that are user-friendly, which helps users provide clear and precise instructions to the AI. + +5. **Continuous Learning and Adaptation**: Human-AI configuration allows the system to learn and adapt from user interactions continuously. This iterative process helps in refining the system's understanding and performance, ensuring that user instructions are increasingly met with appropriate actions. + +In summary, the human-AI configuration is essential for creating an AI system that can accurately interpret and act on user instructions, maintain safety and ethical standards, and provide a user-friendly experience.",The significance of human-AI configuration in ensuring the adequacy of GAI system user instructions is highlighted in the context where it mentions verifying the adequacy of GAI system user instructions through user testing. This suggests that human-AI configuration plays a crucial role in assessing and improving the effectiveness of user instructions.,0.0,0.9147703172700018,0.0,0.0,0.4238218203644289 +What is the purpose of the AI Safety Institute established by NIST?,"['NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to continue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and trustworthy development and use of AI.']","The purpose of the AI Safety Institute established by NIST is to build the science necessary for the safe, secure, and trustworthy development and use of AI. This involves continuing efforts to ensure that AI technologies are developed and utilized in ways that are reliable and do not pose undue risks to society.","The purpose of the AI Safety Institute established by NIST is to continue efforts to build the science necessary for safe, secure, and trustworthy development and use of artificial intelligence (AI), in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",0.3333333333333333,1.0,1.0,0.9999999999,0.8088404737956175 +What criteria does the framework use to determine which automated systems are in scope for the AI Bill of Rights?,[],"The framework for the AI Bill of Rights determines which automated systems are in scope based on their potential impact on individuals and society. Specifically, it focuses on automated systems that: + +1. Have the potential to meaningfully impact the publicā€™s rights, opportunities, or access to critical resources or services. +2. Are used in settings where they can have consequential impacts on individuals or communities, particularly in contexts like employment, healthcare, education, or criminal justice. +3. Are designed to make or support decisions that could lead to significant effects on peopleā€™s lives, such as decisions about hiring, lending, or legal matters. + +If an automated system fits these criteria, it is considered to be within the scope of the AI Bill of Rights.","The framework uses a two-part test to determine which automated systems are in scope for the AI Bill of Rights: (1) automated systems that (2) have the potential to meaningfully impact the American publicā€™s rights, opportunities, or access to critical resources or services.",0.0,0.9910136220396836,0.0,0.0,0.5417228030805904 +What procedures should be developed and updated in incident response and recovery plans for GAI systems when a previously unknown risk is identified?,[],"When dealing with General Artificial Intelligence (GAI) systems, it is crucial to have robust incident response and recovery plans in place to manage previously unknown risks. Here are some key procedures that should be developed and updated: + +1. **Risk Identification and Analysis:** + - **Continuous Monitoring:** Implement systems to continuously monitor the GAI for unusual or unexpected behaviors. + - **Risk Assessment:** Develop methodologies for assessing newly identified risks, including their potential impact and likelihood. + +2. **Incident Reporting:** + - **Reporting Mechanisms:** Establish clear procedures for reporting incidents, including who should report, how to report, and what information needs to be included. + - **Communication Channels:** Ensure there are secure and reliable communication channels for reporting incidents. + +3. **Incident Response Team:** + - **Team Formation:** Assemble a dedicated incident response team with diverse expertise, including AI ethics, technical, legal, and operational knowledge. + - **Roles and Responsibilities:** Clearly define the roles and responsibilities of each team member in responding to an incident. + +4. **Containment and Mitigation:** + - **Immediate Actions:** Develop protocols for immediate actions to contain the risk and prevent further damage. + - **Mitigation Strategies:** Create strategies to mitigate the impact of the risk, which could include shutting down certain functionalities or isolating affected components. + +5. **Investigation and Analysis:** + - **Root Cause Analysis:** Implement procedures for conducting a thorough root cause analysis to understand how and why the risk emerged. + - **Data Collection:** Ensure that relevant data is collected and preserved to support the investigation. + +6. **Recovery Plans:** + - **System Restoration:** Develop plans for safely restoring system functionality after an incident. + - **Verification:** Include steps to verify that the system is functioning correctly and that the risk has been effectively mitigated. + +7. **Stakeholder Communication:** + - **Internal Communication:** Ensure that all internal stakeholders are informed about the incident and the steps being taken. + - **External Communication:** Prepare guidelines for communicating with external stakeholders, including regulators, customers, and the public. + +8. **Documentation and Reporting:** + - **Incident Documentation:** Keep detailed records of the incident, the response actions taken, and the outcomes. + - **Reporting Requirements:** Ensure compliance with any legal or regulatory reporting requirements. + +9. **Review and Update:** + - **Post-Incident Review:** Conduct a post-incident review to identify lessons learned and areas for improvement. + - **Plan Updates:** Regularly update the incident response and recovery plans based on new insights and evolving risks. + +10. **Training and Drills:** + - **Training Programs:** Develop and implement training programs for staff to ensure they are prepared to respond to incidents. + - **Simulation Drills:** Conduct regular simulation drills to test the effectiveness of the incident response and recovery plans. + +By implementing and regularly updating these procedures, organizations can be better prepared to handle previously unknown risks in GAI systems effectively.","Develop and update GAI system incident response and recovery plans and procedures to address the following: Review and maintenance of policies and procedures to account for newly encountered uses; Review and maintenance of policies and procedures for detection of unanticipated uses; Verify response and recovery plans account for the GAI system value chain; Verify response and recovery plans are updated for and include necessary details to communicate with downstream GAI system Actors: Points-of-Contact (POC), Contact information, notification format.",1.0,0.8651061065281809,0.0,0.0,0.3491648597914952 +What is the purpose of structured human feedback exercises in the context of GAI risk measurement and management?,[],"Structured human feedback exercises play a crucial role in the context of General Artificial Intelligence (GAI) risk measurement and management. The primary purposes of these exercises include: + +1. **Risk Identification and Assessment**: By collecting structured feedback from humans, it becomes possible to identify potential risks and failure modes that a GAI system might encounter. Human evaluators can provide insights into the nuanced and complex ways in which a GAI system could behave unexpectedly or cause harm. + +2. **Enhancing Training Data**: Structured feedback helps in creating high-quality training data, which can be used to improve the performance and safety of GAI systems. Feedback from humans can highlight specific areas where the AI may be lacking or making errors, allowing developers to fine-tune the system. + +3. **Evaluating Alignment**: Human feedback is essential for assessing whether a GAI system's actions align with human values and ethical standards. This helps in ensuring that the AI operates within acceptable moral and societal boundaries. + +4. **Monitoring and Correction**: Continuous structured feedback provides a mechanism for ongoing monitoring of the GAI system's behavior. It enables the detection of deviations from desired behavior and facilitates timely interventions to correct any issues. + +5. **Improving Robustness**: By understanding how GAI systems react to diverse inputs and scenarios, developers can enhance the robustness and reliability of these systems. Structured human feedback helps in identifying edge cases and stress-testing the AI, leading to more resilient systems. + +6. **Trust Building**: Transparent and systematic collection of human feedback can help build trust between the developers, users, and other stakeholders by demonstrating a commitment to safety and ethical considerations in AI deployment. + +In summary, structured human feedback exercises are essential for ensuring that GAI systems are safe, reliable, and aligned with human values, thereby mitigating potential risks associated with their deployment and operation.","The purpose of structured human feedback exercises in the context of GAI risk measurement and management is to define use cases, contexts of use, capabilities, and negative impacts where these exercises would be most beneficial. They are aimed at monitoring and improving outputs, evaluating the quality and integrity of data used in training, and tracking risks or opportunities related to GAI that cannot be measured quantitatively.",1.0,0.9410891609034383,0.0,0.0,0.8660376900116793 +What is the significance of human-AI configuration in managing GAI risks and ensuring information integrity?,['Human-AI Conļ¬guration; \nInformation Integrity'],"The significance of human-AI configuration in managing General Artificial Intelligence (GAI) risks and ensuring information integrity is multi-faceted: + +1. **Risk Management**: Proper human-AI configuration is essential to mitigate risks associated with GAI. This involves designing systems where human oversight and intervention are integral to AI operations. By establishing clear roles and responsibilities, humans can monitor AI behaviors, prevent unintended actions, and correct course when necessary. This reduces the likelihood of AI systems acting in ways that could be harmful or misaligned with human values. + +2. **Trusted Decision-Making**: Ensuring that AI systems are configured to work alongside humans helps maintain trust in decision-making processes. When humans have the ability to verify and validate AI outputs, they can ensure that the decisions are based on accurate and reliable information. This trust is crucial, especially in high-stakes domains like healthcare, finance, and security. + +3. **Ethical Considerations**: Human-AI configuration allows for the incorporation of ethical considerations into AI operations. Humans can enforce ethical guidelines and ensure that AI systems adhere to societal norms and regulations. This is particularly important as GAI systems become more autonomous and capable of making complex decisions. + +4. **Information Integrity**: Integrating humans into the loop helps maintain the integrity of information processed and generated by AI systems. Humans can detect and correct errors, biases, or misinformation that AI systems might produce. This oversight is crucial to prevent the spread of false information and ensure that AI outputs are accurate and trustworthy. + +5. **Adaptability and Learning**: Human-AI collaboration can enhance the learning and adaptability of AI systems. Humans can provide feedback and insights that help AI systems improve over time. This continuous learning process ensures that AI systems remain relevant and effective in dynamic environments. + +In summary, the human-AI configuration is vital for managing the risks associated with GAI and ensuring the integrity of information. It creates a framework where human oversight, ethical considerations, and continuous improvement are embedded into AI operations, thereby fostering safe, reliable, and trustworthy AI systems.","The significance of human-AI configuration in managing GAI risks and ensuring information integrity lies in its role in evaluating content lineage and origin, adapting training programs for digital content transparency, and delineating human proficiency tests from GAI capabilities. It also involves continual monitoring of human-GAI configurations and engaging end-users in prototyping and testing activities to address various scenarios, including crisis situations and ethically sensitive contexts.",0.9615384615384616,0.9577272667979959,0.0,0.9999999999,0.3231626240475285 +What criteria are used to measure AI system performance or assurance in deployment settings?,"['the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","In deployment settings, AI system performance and assurance are measured using various criteria, including: + +1. **Accuracy**: The degree to which the AI system's predictions or outputs match the true or desired outcomes. +2. **Differential Demographic Impact**: Analyzing how the AI system's performance varies across different demographic groups to identify and mitigate any potential biases. +3. **Resulting Error Rates**: Evaluating the error rates both overall and per demographic group to ensure the system performs reliably for all users. +4. **Comparisons to Previously Deployed Systems**: Comparing the new AI system's performance to that of previously deployed systems to assess improvements or regressions. +5. **Ongoing Monitoring Procedures**: Implementing regular monitoring to continuously evaluate the system's performance over time. +6. **Performance Testing Reports**: Regularly generating reports from performance testing to document findings and track the system's behavior. +7. **Monitoring Frequency**: Establishing how often the system should be monitored to ensure consistent performance and timely detection of issues. + +These criteria help ensure that the AI system remains effective, fair, and reliable throughout its deployment.",AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for conditions similar to deployment setting(s). Measures are documented.,1.0,0.9961701370913699,0.0,0.9999999999,0.4629767269119318 +What are some suggested actions to address GAI risks in AI systems?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.']","To address risks associated with General Artificial Intelligence (GAI) in AI systems, several actions are recommended: + +1. **Documentation Practices**: Implement comprehensive logging, recording, and analyzing of GAI incidents. This practice helps in understanding and mitigating risks by learning from past incidents. + +2. **Information Sharing**: Regularly share information related to GAI incidents with relevant AI actors. This includes sharing change management records, version history, and metadata. + +3. **Incident Response**: Empower AI actors by providing them with necessary information to respond to and manage AI incidents effectively. + +These actions contribute to a more transparent and collaborative approach to managing GAI risks, ensuring that all stakeholders are informed and prepared to handle potential issues.","Some suggested actions to address GAI risks in AI systems include: applying and documenting ML explanation results such as analysis of embeddings, counterfactual prompts, gradient-based attributions, model compression/surrogate models, and occlusion/term reduction. Additionally, documenting GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.",0.4666666666666667,0.9539490268630401,0.0,0.0,0.8565975630792517 +What role do GAI systems play in augmenting cybersecurity attacks?,"['GAI-based systems present two primary information security risks: GAI could potentially discover or enable new cybersecurity risks by lowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it expands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data poisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as hacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some vulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat actors might further these risks by developing GAI-powered security co-pilots for use in several parts of']","GAI (General Artificial Intelligence) systems can play a significant role in augmenting cybersecurity attacks in several ways: + +1. **Discovering Vulnerabilities**: GAI systems have the capability to identify vulnerabilities in hardware, software, and data systems. They can analyze large volumes of data quickly to find weaknesses that might be exploited. + +2. **Writing Exploit Code**: Once vulnerabilities are discovered, GAI systems can also generate code to exploit these vulnerabilities. This can make it easier for attackers to create and deploy malware or other malicious tools. + +3. **Automating Offensive Capabilities**: GAI can automate and enhance offensive cyber operations, such as hacking, malware deployment, and phishing attacks. This reduces the effort and expertise required to conduct such attacks, potentially increasing their frequency and sophistication. + +4. **Developing Security Co-pilots**: Sophisticated threat actors might utilize GAI to create advanced tools or ""security co-pilots"" that assist in planning and executing cyberattacks. These tools could provide real-time suggestions and optimizations for ongoing attacks, making them more effective. + +Overall, GAI systems can lower the barriers for conducting cyberattacks and enhance the capabilities of attackers, posing significant risks to information security.","GAI systems may augment cybersecurity attacks by advancing offensive cyber capabilities such as hacking, malware, and phishing. Reports indicate that large language models (LLMs) can discover vulnerabilities in systems and write code to exploit them. Sophisticated threat actors might develop GAI-powered security co-pilots to inform attackers on how to evade threat detection and escalate privileges after gaining system access.",0.7142857142857143,0.9816439757363168,0.6666666666666666,0.9999999999,0.5508268003870652 +What precautions should be taken when using derived data sources in automated systems?,"['justification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness.']","When using derived data sources in automated systems, several precautions should be taken to ensure the appropriateness and legality of their use: + +1. **Documentation and Justification**: Provide clear documentation and justification for each data attribute and source. Explain why it is appropriate to use the derived data to inform the results of the automated system. + +2. **Legal Compliance**: Ensure that the use of derived data does not violate any applicable laws. This includes considering privacy laws, data protection regulations, and any other legal constraints relevant to the dataā€™s use. + +3. **Overall Descriptions**: For high-dimensional and/or derived attributes, provide comprehensive descriptions of the attribute generation process. This should include how the data was derived and why it is suitable for the intended use. + +By following these precautions, you can help ensure the responsible and legal use of derived data in automated systems.","Precautions that should be taken when using derived data sources in automated systems include careful tracking and validation of derived data, as it is viewed as potentially high-risk and may lead to feedback loops, compounded harm, or inaccurate results. Such data should be validated against the risk of collateral consequences.",0.8571428571428571,0.9999999999999992,0.0,0.9999999999,0.31954470829501536 +What are the implications of the lack of explanation for decisions made by automated systems?,['The lack of an explanation made it harder for errors to be corrected in a timely manner.'],"The lack of explanation for decisions made by automated systems can have significant implications: + +1. **Difficulty in Error Correction:** Without clear explanations, it becomes challenging to identify and correct errors in the systemā€™s decisions in a timely manner. This can lead to persistent inaccuracies and potentially harmful outcomes. + +2. **Transparency and Trust:** A lack of explanation can undermine transparency and reduce trust in the system. Users and stakeholders may be less likely to trust decisions if they do not understand how they are made. + +3. **Accountability:** It becomes harder to hold the system or its developers accountable for decisions if the rationale behind those decisions is not clear. This can be problematic in scenarios where decisions have significant consequences. + +4. **Regulatory Compliance:** Many regulatory frameworks require explanations for automated decisions, especially in sensitive areas like finance, healthcare, and criminal justice. Lack of explanations can result in non-compliance with these regulations. + +5. **User Experience:** Users are more likely to be satisfied with and accept decisions if they understand the reasoning behind them. Lack of explanations can lead to frustration and reduced user satisfaction. + +6. **Bias and Fairness:** Without explanations, it is difficult to identify and mitigate biases in the systemā€™s decision-making processes. This can perpetuate or even exacerbate existing biases and lead to unfair outcomes. + +In summary, the lack of explanation for decisions made by automated systems can hinder error correction, reduce transparency and trust, complicate accountability, pose regulatory challenges, negatively impact user experience, and perpetuate biases.","The lack of explanation for decisions made by automated systems can lead to several implications, including the inability for individuals to understand or contest decisions that affect their lives. For instance, applicants may not know why their resumes were rejected, defendants may be unaware if their bail decisions are influenced by an automated system labeling them as 'high risk', and individuals may face difficulties in correcting errors or contesting decisions due to a lack of transparency. This opacity can result in unaccountable decision-making processes and can hinder the public's ability to trust the validity and reasonable use of automated systems.",0.84,0.9999999999999997,0.4,0.9999999999,0.39682659519163077 +What should users be notified about regarding automated systems that impact them?,"['You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who']","Users should be notified about the following aspects regarding automated systems that impact them: + +1. **Overall System Functioning**: Clear descriptions of how the system works and the role that automation plays in it. +2. **System Use**: Notice that such automated systems are being used. +3. **Responsible Parties**: Information about the individual or organization responsible for the system. +4. **Explanation of Outcomes**: Explanations of outcomes that are clear, timely, and accessible. +5. **Updates and Changes**: Notifications about significant use case or key functionality changes. +6. **Outcome Determination**: How and why an outcome impacting the user was determined by the automated system, including when the system is not the sole input determining the outcome. + +These notifications should be kept up-to-date to ensure users are always informed about the systems that affect them.","Users should be notified about the use of automated systems, the individual or organization responsible for the system, significant use case or key functionality changes, and how and why an outcome impacting them was determined by the automated system.",0.9230769230769231,0.9872824094511937,1.0,0.9999999999,0.5719857870815963 +What are the key considerations regarding data privacy in the context of the AI Bill of Rights?,[],"The AI Bill of Rights emphasizes several key considerations regarding data privacy to ensure the responsible use of artificial intelligence. These considerations typically include: + +1. **Data Minimization**: Collect only the data that is strictly necessary for the AI system to function, reducing the risk of sensitive information being misused or exposed. + +2. **Transparency**: Ensure that individuals are informed about what data is being collected, how it is being used, and with whom it may be shared. This includes clear communication about AI data practices. + +3. **Consent**: Obtain explicit consent from individuals before collecting their data, ensuring they understand how their information will be used and the potential risks involved. + +4. **Security**: Implement robust security measures to protect data from breaches, unauthorized access, and other threats. This can include encryption, access controls, and regular security audits. + +5. **Data Access and Control**: Provide individuals with the ability to access, review, and correct their data. They should also have the right to request the deletion of their data if it is no longer necessary for the intended purpose. + +6. **Accountability**: Establish mechanisms to hold organizations accountable for their data practices, including regular audits and the ability to report and address privacy violations. + +These principles aim to protect individual privacy rights while enabling the beneficial uses of AI technologies.",The answer to given question is not present in context,0.0,0.9917980246812917,1.0,0.0,0.17310271430887053 +What measures should be taken during disparity assessment of automated systems to ensure inclusivity and fairness?,"['The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.' + 'the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems;']","To ensure inclusivity and fairness during the disparity assessment of automated systems, several measures should be taken: + +1. **Inclusive Demographics:** The assessed groups should include a broad and inclusive range of demographics, such as race, color, ethnicity, sex (including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual orientation), religion, age, national origin, disability, veteran status, genetic information, or any other classification protected by law. + +2. **Comprehensive Measures:** The assessment should include a broad set of measures such as demographic performance measures, overall and subgroup parity assessment, and calibration. + +3. **Separation of Data:** Demographic data collected for disparity assessment should be separated from the data used for the automated system to protect privacy. + +4. **Privacy Protections:** Privacy protections should be instituted, and in some cases, it may be appropriate to perform the assessment using a data sample to enhance privacy. + +5. **Documentation of Disparities:** For every instance where the deployed automated system leads to different treatment or impacts disfavoring the identified groups, the governing, implementing, or using entity should document the disparity and provide a justification for any continued use of the system. + +6. **Algorithmic Impact Assessment:** Evaluation and plain language reporting in the form of an algorithmic impact assessment, including disparity testing results and mitigation information, should be performed and made public whenever possible. + +7. **Performance Testing Results:** The results of performance testing should include accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems. + +By implementing these measures, entities can work towards ensuring that their automated systems are fair and inclusive, minimizing negative impacts on any particular demographic group.","During disparity assessment of automated systems, measures should include testing using a broad set of measures to assess whether the system components produce disparities. The demographics of the assessed groups should be as inclusive as possible, covering aspects such as race, color, ethnicity, sex, religion, age, national origin, disability, and other classifications protected by law. The assessment should include demographic performance measures, overall and subgroup parity assessment, and calibration. Additionally, demographic data collected for disparity assessment should be separated from data used for the automated system, and privacy protections should be instituted.",0.8333333333333334,0.9907787341089711,1.0,0.9999999999666667,0.9918578262634513 +What are the potential risks associated with generative AI models in the context of disinformation and cybersecurity?,[],"Generative AI models pose several potential risks in the context of disinformation and cybersecurity: + +1. **Disinformation**: + - **Creation of Fake Content**: Generative AI can produce highly realistic text, images, and videos, making it easier to create and spread false information. This can mislead the public, influence elections, and damage reputations. + - **Deepfakes**: AI can generate convincing deepfake videos and audio recordings, which can be used to impersonate individuals, spread false narratives, or create fake news. + - **Amplification of Misinformation**: AI can automate the production and dissemination of false information on social media, making it more challenging to identify and counteract. + +2. **Cybersecurity**: + - **Phishing and Social Engineering**: Generative AI can craft highly personalized and convincing phishing emails or messages, increasing the likelihood that targets will be deceived. + - **Automated Attacks**: AI can be used to automate and scale cyberattacks, such as generating malicious code or identifying vulnerabilities in systems faster than human hackers. + - **Data Poisoning**: Adversaries can use AI to manipulate training data, leading to compromised or biased AI models. This can degrade the performance of AI systems or cause them to behave in unintended ways. + +These risks highlight the need for robust AI safety measures, including enhanced detection techniques, regulatory frameworks, and public awareness efforts to mitigate the potential harms associated with generative AI.","The potential risks associated with generative AI models in the context of disinformation include the ease of producing or disseminating false, inaccurate, or misleading content at scale, both unintentionally (misinformation) and deliberately (disinformation). GAI systems can enable malicious actors to create targeted disinformation campaigns, generate realistic deepfakes, and produce compelling imagery and propaganda. In terms of cybersecurity, GAI models may lower barriers for offensive capabilities, expand the attack surface, and assist in discovering vulnerabilities and writing exploit code, thereby augmenting cybersecurity attacks such as hacking, malware, and phishing.",1.0,0.9887289530836827,0.0,0.0,0.9883766056365315 +What topics were discussed regarding potential harms and oversight in the development of the AI Bill of Rights?,[],"The development of the AI Bill of Rights discussed several key topics related to potential harms and oversight, including: + +1. **Discriminatory Outcomes**: Ensuring AI systems do not perpetuate or exacerbate discrimination. This involves scrutinizing the data and algorithms used to prevent biased outcomes. + +2. **Privacy Violations**: Addressing concerns over how AI systems collect, store, and use personal data, advocating for robust privacy protections. + +3. **Lack of Transparency**: Promoting the need for AI systems to be transparent and explainable, so that users and regulators can understand how decisions are made. + +4. **Accountability Mechanisms**: Establishing clear accountability frameworks to hold developers and deployers of AI systems responsible for their impacts. + +5. **Safety and Security**: Ensuring AI systems are safe and secure, preventing them from being misused or causing unintended harm. + +These discussions aim to create guidelines and principles that safeguard individuals' rights and promote responsible AI development and deployment.","The context mentions that some meetings focused on providing ideas related to the development of the Blueprint for an AI Bill of Rights, and others provided useful general context on the positive use cases, potential harms, and/or oversight possibilities for these technologies. However, specific topics discussed regarding potential harms and oversight are not detailed in the provided context.",0.0,0.950745665614965,0.0,0.0,0.868285893912075 +What measures should be in place to ensure human alternatives and consideration in the use of automated systems?,"['receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.']","To ensure human alternatives and considerations in the use of automated systems, several measures should be in place: + +1. **Opt-out Mechanisms**: Users should have the ability to opt-out of using automated systems and receive a fallback to a human process. This ensures that those who are uncomfortable or disadvantaged by automation are not forced to use it. + +2. **Explanation of Decisions**: Providing clear explanations of how decisions are made by automated systems and how these systems work can help users understand and trust the technology while ensuring they can challenge decisions and seek human intervention when necessary. + +3. **Community Input**: Receiving community input into the design and use of these technologies can help ensure they meet the needs and values of the people they affect and allow for the consideration of diverse perspectives. + +4. **Consent Procedures**: Better notice and consent procedures that ensure privacy based on context and use case are essential. Users should be well-informed about the use of automated systems and their rights regarding opting out. + +5. **Public Reporting**: Public reporting on crucial elements of these systems promotes transparency, allowing the public to stay informed and hold organizations accountable for the use of automated systems. + +6. **Governance and Training**: Proper governance, including training for those using these systems, ensures that they are used appropriately and that human oversight remains a critical component. + +7. **Validation and Accountability**: Ensuring that technological use cases are genuinely related to the goal task and are locally validated to work is important. Additionally, third-party audits should be instituted and protected to maintain accountability and validate the systems' ongoing effectiveness and fairness. + +By implementing these measures, organizations can ensure that automated systems are used ethically and that human alternatives and considerations are always available.","Measures to ensure human alternatives and consideration in the use of automated systems include the ability to opt out from automated systems in favor of a human alternative where appropriate, access to timely human consideration and remedy through a fallback and escalation process if an automated system fails, and ensuring that human consideration and fallback are accessible, equitable, effective, and maintained. Additionally, automated systems in sensitive domains should be tailored to their purpose, provide meaningful access for oversight, include training for people interacting with the system, and incorporate human consideration for adverse or high-risk decisions.",0.5666666666666667,0.9982870170545075,0.42857142857142855,0.9999999999,0.5917610740109382 +What measures should be taken to ensure that automated systems are safe and effective?,"['to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible.' + 'risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Some companies have put in place safeguards that have prevented harm from occurring by ensuring that key development decisions are vetted by an ethics review; others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad tion processes that may be applied when considering the use of new automated systems, and existing product develop\xad ment and testing practices already protect the American public from many potential harms.']","To ensure that automated systems are safe and effective, the following measures should be taken: + +1. **Independent Evaluation and Reporting**: Conduct independent evaluations to confirm the system's safety and effectiveness. This includes reporting the steps taken to mitigate potential harms. The results of these evaluations should be made public whenever possible. + +2. **Risk Identification and Management**: Carry out risk identification and management assessments to recognize and manage potential risks. This includes taking proactive steps to mitigate any identified harms. + +3. **Performance Testing**: Perform comprehensive performance testing to evaluate accuracy, differential demographic impacts, error rates (both overall and per demographic group), and comparisons to previously deployed systems. + +4. **Ongoing Monitoring**: Implement ongoing monitoring procedures and conduct regular performance testing. This includes determining the monitoring frequency and updating the system as needed based on the findings. + +5. **Ethics Review**: Ensure that key development decisions are vetted by an ethics review to prevent potential harm. + +6. **Public Consultation Processes**: Apply existing public consultation processes when considering the deployment of new automated systems. This helps to incorporate public input and address any concerns prior to deployment. + +7. **Data Protection**: Protect individuals from inappropriate or irrelevant data use in the design, development, and deployment of automated systems. Additionally, safeguard against the compounded harm of data reuse. + +By implementing these measures, the safety and effectiveness of automated systems can be better ensured, minimizing potential harms and maximizing benefits.","To ensure that automated systems are safe and effective, measures should include consultation with diverse communities, stakeholders, and domain experts to identify concerns and risks. Systems should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring. These measures should demonstrate safety and effectiveness based on intended use, mitigate unsafe outcomes, and adhere to domain-specific standards. Additionally, independent evaluation and reporting should confirm safety and effectiveness, with results made public whenever possible.",0.7333333333333333,0.9875539117546253,0.5,0.9999999999666667,0.6106494028725776 +What considerations should be taken into account when using automated systems in sensitive domains?,"['The appropriate application of the principles set forth in this white paper depends significantly on the context in which automated systems are being utilized. In some circumstances, application of these principles' + 'For systems used in sensitive domains, reporting should include infor\xadmation about training and governance procedures for these technologies.' + 'ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Data provenance']","When using automated systems in sensitive domains, the following considerations should be taken into account: + +1. **AI Actor Credentials and Qualifications**: Ensure that the individuals or entities developing and managing the AI systems possess the necessary expertise and qualifications. + +2. **Alignment to Organizational Values**: Verify that the automated systems are aligned with the core values and ethical standards of the organization. + +3. **Auditing and Assessment**: Implement robust auditing and assessment procedures to regularly evaluate the performance, fairness, and safety of the AI systems. + +4. **Change-Management Controls**: Establish clear change-management controls to oversee and document any modifications made to the systems, ensuring that changes do not introduce new risks. + +5. **Data Provenance**: Maintain accurate records of the sources and quality of the data used to train and operate the AI systems, ensuring that the data is appropriate and reliable for the intended application. + +In addition to these considerations, it's also important to report on training and governance procedures for these technologies to ensure transparency and accountability.","When using automated systems in sensitive domains, considerations should include tailoring the systems to their intended purpose, providing meaningful access for oversight, ensuring training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions.",0.6666666666666666,0.9999999999999999,0.25,0.8333333332916666,0.8253567264372446 +What are some examples of harms caused by algorithmic bias in automated systems?,[],"Algorithmic bias in automated systems can lead to a variety of harms across different sectors. Some examples include: + +1. **Discrimination in Hiring**: Automated hiring systems may inadvertently favor certain demographics over others. For example, an AI system trained on historical hiring data might replicate existing biases, leading to gender or racial discrimination. + +2. **Bias in Criminal Justice**: Predictive policing algorithms might disproportionately target minority communities, leading to over-policing and unfair treatment. Similarly, risk assessment tools used in sentencing can result in harsher penalties for certain groups. + +3. **Healthcare Inequities**: AI systems used in healthcare can perpetuate biases if they are trained on non-representative data. For instance, an algorithm used to predict patient outcomes might not perform well for minority groups, leading to disparities in treatment. + +4. **Financial Services**: Credit scoring algorithms may unfairly disadvantage individuals from certain socioeconomic backgrounds. For example, a biased algorithm might deny loans to minority applicants at a higher rate than to others with similar financial profiles. + +5. **Content Moderation**: Automated systems used to filter and moderate online content can exhibit biases, leading to the unequal treatment of different groups. For example, these systems might disproportionately flag content from certain communities as inappropriate. + +These examples illustrate the importance of addressing algorithmic bias to ensure fairness and equity in automated decision-making systems.","Examples of harms caused by algorithmic bias in automated systems include: 1) A proprietary model predicting sepsis in hospitalized patients that underperformed and caused alert fatigue by falsely alerting likelihood of sepsis. 2) An automated moderation system on social media that silenced Black people who quoted and criticized racist messages, failing to distinguish their counter speech from the original hateful messages. 3) A device meant to help track lost items being misused by stalkers to track victims' locations, despite manufacturer attempts to implement safety measures. 4) An algorithm used for police deployment that sent officers to neighborhoods they regularly visited, rather than those with the highest crime rates, due to a feedback loop from previous data and predictions.",1.0,0.9669956621343284,0.0,0.0,0.7681683090963873 +What are the challenges associated with value chain and component integration in GAI systems?,[],"The challenges associated with value chain and component integration in General Artificial Intelligence (GAI) systems are multifaceted and can significantly impact the safety and effectiveness of these systems. Some key challenges include: + +1. **Complexity of Integration**: GAI systems often involve integrating numerous components, each with different functionalities and requirements. Ensuring seamless interoperability between these components can be highly complex and technically demanding. + +2. **Consistency and Coherence**: Ensuring that all integrated components work together consistently and coherently is crucial. Inconsistencies can lead to unpredictable behavior, which is particularly concerning in safety-critical applications. + +3. **Security Vulnerabilities**: Integrating multiple components can introduce new security vulnerabilities. Each component may have its own set of security challenges, and the integration process itself can create additional attack vectors. + +4. **Data Management**: GAI systems rely on large amounts of data from various sources. Ensuring that this data is properly managed, synchronized, and processed across all components is essential to maintain system integrity and performance. + +5. **Ethical and Value Alignment**: Ensuring that the integrated system adheres to ethical guidelines and aligns with human values is a significant challenge. Different components may have been designed with different ethical considerations, and integrating them requires careful alignment to avoid conflicts. + +6. **Scalability**: As GAI systems grow in complexity, scaling the integration of components without degrading performance or introducing errors becomes increasingly difficult. + +7. **Validation and Testing**: Comprehensive validation and testing of GAI systems are challenging due to their complexity. Ensuring that the integrated system performs as expected in all scenarios, including edge cases, requires extensive testing. + +8. **Maintenance and Upgrades**: Maintaining and upgrading GAI systems can be challenging due to the interdependencies between components. Changes in one part of the system can have unforeseen effects on others, making maintenance a delicate process. + +9. **Regulatory Compliance**: GAI systems must comply with various regulatory standards, which can vary by region and application. Ensuring that all components and their integration meet these standards adds another layer of complexity. + +Addressing these challenges requires a multidisciplinary approach, involving expertise in software engineering, cybersecurity, ethics, data science, and regulatory affairs, among others.","Challenges associated with value chain and component integration in GAI systems include the improper acquisition or vetting of third-party components such as datasets, pre-trained models, and software libraries, which can lead to diminished transparency and accountability. The scale of training data may be too large for humans to vet, and the difficulty of training foundation models can result in extensive reuse of a limited number of models. Additionally, it may be difficult to attribute issues in a system's behavior to any one of these sources, and errors in third-party GAI components can have downstream impacts on accuracy and robustness.",1.0,0.968024263195462,0.0,0.0,0.8747999107749116 +What considerations should be taken into account when determining model release approaches?,"['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']","When determining model release approaches, several considerations should be taken into account to ensure AI safety and ethical use. These considerations include: + +1. **External Use**: Define the scope of external use and ensure that the application of the AI model aligns with intended purposes and regulatory requirements. + +2. **Narrow vs. Broad Application Scope**: Decide whether the model is designed for narrow or broad applications. Narrow applications are easier to control and monitor, while broad applications may pose higher risks and require more extensive safeguards. + +3. **Fine-Tuning**: Consider the potential risks and benefits of allowing fine-tuning of the model by users. Fine-tuning can improve performance for specific tasks but may also introduce vulnerabilities or biases. + +4. **Varieties of Data Sources**: Evaluate the types of data sources used for training and grounding the model. Ensure that these sources are reliable, diverse, and free from biases to the greatest extent possible. + +5. **Data Privacy**: Address data privacy concerns to protect sensitive information. Ensure compliance with data protection laws and regulations. + +6. **Intellectual Property**: Protect intellectual property rights associated with the model. This includes managing licensing, usage rights, and potential infringement issues. + +7. **Accessibility and Accommodations**: Ensure that the model is accessible to a diverse range of users, including those with disabilities. Provide reasonable accommodations where necessary. + +8. **AI Actor Credentials and Qualifications**: Verify the qualifications and credentials of individuals or organizations using or deploying the model. This helps ensure responsible and knowledgeable use. + +9. **Alignment to Organizational Values**: Ensure that the model's deployment aligns with the ethical values and mission of the organization. Avoid applications that may conflict with these values. + +10. **Auditing and Assessment**: Implement regular auditing and assessment mechanisms to monitor the model's performance, safety, and ethical compliance. + +11. **Change-Management Controls**: Establish controls to manage changes to the model, including updates, retraining, and modifications. This helps maintain stability and reliability. + +12. **Commercial Use**: Consider the implications of commercializing the model, including potential impacts on different stakeholders and the broader market. + +13. **Data Provenance**: Maintain clear records of data provenance, ensuring that the origins and history of the data used to train the model are well-documented and traceable. + +By addressing these considerations, organizations can release AI models in a way that maximizes benefits while mitigating risks and ensuring ethical and responsible use.","When determining model release approaches, considerations should include documenting trade-offs, decision processes, and relevant measurement and feedback results for risks that do not surpass organizational risk tolerance. Additionally, different approaches for model release should be considered, such as leveraging a staged release approach and evaluating release approaches in the context of the model and its projected use cases.",0.6129032258064516,0.9558957567577732,0.0,0.99999999995,0.6188036636523309 +What considerations should be taken into account regarding intellectual property when conducting diligence on training data use?,[],"When conducting diligence on training data use, several considerations should be taken into account regarding intellectual property (IP): + +1. **Ownership and Rights Clearance**: Ensure that the data being used for training the AI model is either owned by the organization or that proper licenses and permissions have been obtained from the rightful owners. This helps prevent potential legal disputes over unauthorized use of data. + +2. **Licensing Terms and Conditions**: Carefully review the licensing agreements associated with any third-party data. Understand the terms and conditions, including any restrictions on usage, modification, redistribution, and sublicensing, to ensure compliance. + +3. **Data Provenance and Quality**: Verify the source of the data and its quality. Data should be sourced from reputable and legal channels. Maintaining records of data provenance helps in establishing the legitimacy of the data used. + +4. **Fair Use and Exceptions**: Understand the scope of fair use and any relevant exceptions under IP law that might apply to the use of data for training purposes. This can vary by jurisdiction and specific use case. + +5. **Derivative Works and Output**: Consider the IP implications of the output generated by the AI model. Determine whether the output could be considered a derivative work of the training data and what rights the original data owners might have over such output. + +6. **Confidentiality and Trade Secrets**: Ensure that the training data does not contain confidential information or trade secrets that could be inadvertently exposed or misused through the AI model. + +7. **Ethical and Legal Compliance**: Beyond legal compliance, consider the ethical implications of using certain types of data, particularly when it involves personal or sensitive information. Adhere to relevant data protection regulations and ethical guidelines. + +8. **Documentation and Audit Trails**: Maintain thorough documentation of data sources, usage permissions, and any legal advice obtained. This helps create an audit trail that can be useful in case of any IP disputes or compliance checks. + +By addressing these considerations, organizations can mitigate IP risks associated with training data and ensure that their AI systems are developed in a legally compliant and ethically responsible manner.","Considerations regarding intellectual property when conducting diligence on training data use include assessing risks related to intellectual property and privacy, and examining whether the use of proprietary or sensitive training data is consistent with applicable laws.",1.0,0.9804203955240482,0.0,0.0,0.2929274494566979 +What are some examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights?,[],"The Blueprint for an AI Bill of Rights aims to ensure that automated systems are designed and deployed in ways that protect the rights and safety of individuals. Examples of automated systems that should be covered by this blueprint include: + +1. **Hiring Algorithms**: Systems used to screen job applicants and make hiring decisions. +2. **Credit Scoring Systems**: Algorithms that assess creditworthiness and determine eligibility for loans or credit. +3. **Healthcare Diagnostics**: Automated systems that aid in diagnosing diseases or recommending treatments. +4. **Surveillance Systems**: Facial recognition and other monitoring technologies used in public and private spaces. +5. **Educational Tools**: AI systems used to evaluate student performance or provide personalized learning experiences. +6. **Law Enforcement Tools**: Predictive policing algorithms and systems used to assess the risk of reoffending. +7. **Social Media Algorithms**: Systems that curate content and influence what users see on platforms. + +These systems should be designed with fairness, transparency, and accountability in mind to protect individuals from potential harms such as discrimination, privacy invasion, and unfair treatment.",Examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights include: speech-related systems such as automated content moderation tools; surveillance and criminal justice system algorithms like risk assessments and predictive policing; voting-related systems such as signature matching tools; privacy-impacting systems like smart home systems and health-related data systems; education-related systems such as algorithms for detecting student cheating; housing-related systems like tenant screening algorithms; and employment-related systems that inform terms of employment.,0.043478260869565216,0.9287153161163794,0.0,0.0,0.8152041000591259 +What are some concerns related to data privacy in the context of sensitive domains?,"['Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship history and legal status such as custody and divorce information, and home, work, or school environmental data); or have the reasonable potential to be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm' + 'SENSITIVE DOMAINS: ā€œSensitive domainsā€ are those in which activities being conducted can cause material \nharms, including significant adverse effects on human rights such as autonomy and dignity, as well as civil liber\xad\nties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections \nor where such enhanced protections are reasonably expected by the public include, but are not limited to, \nhealth, family planning and care, employment, education, criminal justice, and personal finance. In the context \nof this framework, such domains are considered sensitive whether or not the specifics of a system context \nwould necessitate coverage under existing law, and domains and data that are considered sensitive are under\xad\nstood to change over time based on societal norms and context.' + 'ā€œSensitive domainsā€ are those in which activities being conducted can cause material harms, including signifi\xadcant adverse effects on human rights such as autonomy and dignity, as well as civil liberties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections or where such enhanced protections are reasonably expected by the public include, but are not limited to, health, family planning and care, employment, education, criminal justice, and personal finance.' + 'Enhanced protections and restrictions for data and inferences related to sensitive domains, including health, work, education, criminal justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and related inferences should only be used for necessary functions, and you should be protected by ethical review and use prohibitions. You and your communities should be free from unchecked surveillance; surveillance technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their potential harms and scope limits to protect privacy and civil liberties.']","Concerns related to data privacy in the context of sensitive domains include: + +1. **Material Harms**: Data activities in sensitive domains can cause significant adverse effects on human rights such as autonomy and dignity, as well as affect civil liberties and civil rights. + +2. **Enhanced Data Protections**: Sensitive domains historically have required enhanced data protections due to the potential for significant harm. This includes domains such as health, family planning and care, employment, education, criminal justice, and personal finance. + +3. **Inference Risks**: Data in sensitive domains can be used to infer sensitive information about individuals, such as disability status, biometric data, behavioral patterns, geolocation, criminal justice interactions, relationship history, and legal status. These inferences can lead to privacy breaches and other harms. + +4. **Exposure to Harm**: There is a reasonable potential for data in sensitive domains to be used in ways that expose individuals to meaningful harm, such as loss of privacy or financial harm. + +5. **Surveillance Concerns**: Unchecked surveillance in sensitive domains can infringe on privacy and civil liberties. Surveillance technologies in these domains should be subject to heightened oversight, including pre-deployment assessments of potential harms and scope limits. + +6. **Ethical Review**: Use of data and related inferences in sensitive domains should be subject to ethical review and use prohibitions to protect individuals and communities from harm. + +7. **Changing Norms**: The understanding of what constitutes sensitive data and domains can change over time based on societal norms and context, necessitating continuous evaluation and adjustment of data protection measures. + +In summary, data privacy concerns in sensitive domains revolve around the potential for significant harm, the need for enhanced protections, the risks of harmful inferences, surveillance issues, the necessity of ethical oversight, and the evolving nature of what is considered sensitive.","Concerns related to data privacy in sensitive domains include the lack of awareness among patients regarding the use of their medical data by insurance companies, the revelation of personal information (such as pregnancy) through targeted advertising, the monitoring of student conversations which may limit emotional expression and unfairly flag students with disabilities, the use of location data to identify individuals visiting abortion clinics, the collection of sensitive student data without parental consent, and the potential for discriminatory impacts from such data usage. Additionally, there are concerns about the accuracy of employee data transferred to third parties, which can affect job opportunities.",1.0,0.9935082489812371,1.0,0.999999999975,0.8531956410700543 +What considerations should be taken into account when reviewing vendor contracts for third-party GAI technologies?,['ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance'],"When reviewing vendor contracts for third-party General Artificial Intelligence (GAI) technologies, several critical considerations should be taken into account to ensure alignment with organizational values and to mitigate potential risks. Here are key factors to consider: + +1. **AI Actor Credentials and Qualifications**: + - Verify the vendor's qualifications, expertise, and track record in developing and maintaining GAI technologies. + - Assess the experience and credibility of the vendor's team members to ensure they are capable of delivering a secure and reliable product. + +2. **Alignment to Organizational Values**: + - Ensure that the vendor's practices and the AI technology align with your organization's ethical standards, mission, and values. + - Evaluate whether the AI technology supports your organization's goals without compromising on ethical considerations. + +3. **Auditing and Assessment**: + - Include provisions for regular audits and assessments of the AI technology to monitor its performance, security, and compliance with agreed standards. + - Ensure that the vendor agrees to transparency and cooperation during these auditing processes. + +4. **Change-Management Controls**: + - Define clear change-management procedures to handle updates, upgrades, and modifications to the AI technology. + - Ensure that any changes are documented, reviewed, and approved before implementation to prevent unintended consequences. + +5. **Commercial Use**: + - Clarify the terms of commercial use, including licensing, intellectual property rights, and any restrictions on how the AI technology can be used. + - Ensure that the terms are fair and do not impose unreasonable limitations on your organization's use of the technology. + +6. **Data Provenance**: + - Ensure the vendor provides clear information about the origin, quality, and governance of the data used to train and operate the AI technology. + - Verify that the data complies with legal and regulatory standards, and that the vendor has the right to use the data for these purposes. + +By addressing these considerations, you can better manage the risks associated with integrating third-party GAI technologies into your organization and ensure that the technology serves its intended purpose effectively and ethically.","When reviewing vendor contracts for third-party GAI technologies, considerations should include avoiding arbitrary or capricious termination of critical GAI technologies or vendor services, avoiding non-standard terms that may amplify or defer liability in unexpected ways, and preventing unauthorized data collection by vendors or third-parties. Additionally, there should be a clear assignment of liability and responsibility for incidents, acknowledgment of GAI system changes over time, and requirements for notification and disclosure for serious incidents arising from third-party data and systems. Service Level Agreements (SLAs) in vendor contracts should also address incident response, response times, and availability of critical support.",0.5238095238095238,0.9354318240594206,0.0,0.9999999999,0.7945972287522352 +What are the expectations for ensuring that automated systems are safe and effective?,"['to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible.']","The expectations for ensuring that automated systems are safe and effective include: + +1. **Proactive Protection**: Safeguards should be put in place to protect users from harms that could arise from unintended, yet foreseeable, uses or impacts of automated systems. + +2. **Appropriate Data Use**: Users should be protected from inappropriate or irrelevant data use in the design, development, and deployment of automated systems. This includes preventing the compounded harm that might result from the reuse of such data. + +3. **Independent Evaluation**: There should be independent evaluation and reporting to confirm that the system is safe and effective. This includes documenting the steps taken to mitigate potential harms. + +4. **Transparency**: The results of the evaluations and reports should be made public whenever possible to ensure transparency and accountability. + +These measures are essential to ensure that automated systems operate safely and effectively, minimizing potential risks to users.","The expectations for ensuring that automated systems are safe and effective include: 1) Safeguards to protect the public from harm in a proactive and ongoing manner; 2) Avoiding the use of data that is inappropriate or irrelevant to the task at hand; 3) Demonstrating the safety and effectiveness of the system. Additionally, there should be consultation with the public during the design and implementation phases, extensive testing before deployment, and identification and mitigation of potential risks.",0.5714285714285714,1.0000000000000002,0.5,0.9999999999,0.5865495931606068 +What are the key components of risk identification and mitigation in the development of automated systems?,['risk identification and management assessments and any steps taken to mitigate potential harms'],"The key components of risk identification and mitigation in the development of automated systems typically include: + +1. **Risk Identification**: + - **Hazard Analysis**: Identifying potential hazards that the automated system could encounter or cause. + - **Threat Modeling**: Understanding how the system could be exploited or fail, including both internal and external threats. + - **Vulnerability Assessment**: Identifying weaknesses in the system that could be exploited or could lead to failures. + +2. **Risk Assessment**: + - **Impact Analysis**: Evaluating the potential consequences of identified risks, including safety, security, financial, and reputational impacts. + - **Likelihood Estimation**: Assessing the probability of each identified risk occurring. + - **Risk Prioritization**: Ranking risks based on their potential impact and likelihood to prioritize mitigation efforts. + +3. **Risk Mitigation**: + - **Preventive Measures**: Implementing strategies to prevent risks from occurring, such as adding redundancies, improving security protocols, or enhancing system design. + - **Detective Measures**: Establishing mechanisms to detect when a risk is materializing, like monitoring systems, alerts, and anomaly detection. + - **Responsive Measures**: Developing plans to respond to risks when they occur, including contingency plans, incident response protocols, and recovery strategies. + - **Mitigation Controls**: Applying specific controls to reduce the impact or likelihood of risks, such as encryption, access controls, and fail-safes. + +4. **Continuous Monitoring and Review**: + - **Ongoing Assessment**: Regularly reviewing and updating risk assessments to reflect new information or changes in the system or its environment. + - **Feedback Loops**: Integrating feedback from incidents and near-misses to improve risk identification and mitigation strategies. + +5. **Documentation and Communication**: + - **Risk Register**: Maintaining a comprehensive log of identified risks, assessments, and mitigation measures. + - **Stakeholder Communication**: Ensuring all relevant stakeholders are informed about risks and the measures being taken to address them. + +These components help ensure that risks associated with automated systems are systematically identified, assessed, and managed to minimize potential harms.","The key components of risk identification and mitigation in the development of automated systems include pre-deployment testing, risk identification and mitigation processes, ongoing monitoring, and adherence to domain-specific standards. These components aim to ensure that systems are safe and effective based on their intended use and to mitigate unsafe outcomes, including those beyond the intended use.",0.58,1.0000000000000002,0.0,0.0,0.23524398341301264 +What are the implications of bias and discrimination in automated systems on the rights of the American public?,"['Among the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent.' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","The implications of bias and discrimination in automated systems on the rights of the American public are significant and multifaceted: + +1. **Limitation of Opportunities**: Automated systems that are biased can limit individuals' access to critical resources or services. For example, biased hiring algorithms can prevent qualified candidates from obtaining jobs, and discriminatory credit scoring systems can deny people loans or favorable interest rates. + +2. **Reproduction of Inequities**: These systems often reflect and reproduce existing societal inequities, thereby perpetuating cycles of disadvantage and discrimination. This can result in systemic exclusion of marginalized groups from economic, social, and political opportunities. + +3. **Privacy Violations**: Unchecked data collection and surveillance by these systems can undermine individuals' privacy. This pervasive tracking can occur without people's knowledge or consent, leading to a loss of personal autonomy and control over one's own information. + +4. **Adverse Impacts on Communities**: The use of biased automated systems can have severe impacts on communities, including job loss, loss of child custody, and other negative outcomes. It can also lead to a chilling effect, where community members feel surveilled and are discouraged from participating fully in societal activities. + +5. **Erosion of Trust**: The reliance on automated systems that lack transparency and accountability can erode public trust in institutions and technologies. When people defer to potentially inaccurate systems, it can lead to unjust outcomes and a lack of recourse for those affected. + +6. **Negative Perceptions and Stigmatization**: Surveillance and biased automated decision-making can send negative signals to community members about how they are viewed by society, leading to feelings of stigmatization and alienation. + +Overall, the presence of bias and discrimination in automated systems poses a serious threat to the rights and freedoms of the American public by exacerbating existing inequalities, violating privacy, and undermining trust in technology and institutions.","The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting or reproducing existing unwanted inequities. These outcomes can threaten people's opportunities, undermine their privacy, and lead to pervasive tracking of their activities, often without their knowledge or consent.",1.0,1.0,1.0,0.99999999995,0.631887953049973 +What measures are suggested to protect data privacy in evaluations involving human subjects?,[],"To protect data privacy in evaluations involving human subjects, several measures are typically suggested: + +1. **Informed Consent**: Ensuring that participants are fully informed about the nature of the study, the type of data being collected, how it will be used, and their rights to withdraw at any time. + +2. **Anonymization and De-identification**: Removing personally identifiable information (PII) from the data to ensure that individuals cannot be easily identified. This can involve techniques like pseudonymization, where names and other identifiers are replaced with pseudonyms. + +3. **Data Encryption**: Encrypting data both in transit and at rest to protect it from unauthorized access and breaches. + +4. **Access Controls**: Implementing strict access control measures to ensure that only authorized personnel have access to the data. This includes using strong passwords, multi-factor authentication, and role-based access controls. + +5. **Minimizing Data Collection**: Collecting only the data that is absolutely necessary for the evaluation to limit exposure and potential misuse. + +6. **Secure Data Storage**: Storing data in secure environments that are protected against physical and cyber threats. This includes using secure servers and cloud storage solutions with robust security measures. + +7. **Regular Audits and Monitoring**: Conducting regular audits and monitoring of data access and usage to detect and respond to any unauthorized activities or breaches. + +8. **Compliance with Legal and Ethical Standards**: Adhering to relevant laws and regulations, such as GDPR for data protection, as well as ethical guidelines for research involving human subjects. + +By implementing these measures, organizations can help protect the privacy of participants and ensure the integrity of the data collected during evaluations.","Suggested measures to protect data privacy in evaluations involving human subjects include: anonymizing data to protect the privacy of human subjects, leveraging privacy output filters, removing any personally identifiable information (PII) to prevent potential harm or misuse, and providing human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications.",1.0,0.9999999999999999,0.0,0.0,0.3567659632021673 +What is the purpose of AI impact assessment in relation to feedback from individuals and communities?,"['Information gathered from structured public feedback can inform design, implementation, deployment \napproval, maintenance, or decommissioning decisions.' + 'Use feedback from internal and external AI Actors, users, individuals, and communities, to assess impact of AI-generated content.']","The purpose of AI impact assessment in relation to feedback from individuals and communities is to inform and guide decisions regarding the design, implementation, deployment, maintenance, or decommissioning of AI systems. By incorporating feedback from internal and external AI actors, users, individuals, and communities, organizations can assess the impact of AI-generated content more effectively. This helps ensure that the AI systems are aligned with the needs and values of those affected, mitigate potential harms, and enhance the overall societal and ethical outcomes of AI deployment.","The purpose of AI impact assessment in relation to feedback from individuals and communities is to collect, consider, prioritize, and integrate feedback regarding the potential individual and societal impacts related to AI risks. This process ensures that organizational policies and practices are in place to address these impacts effectively.",0.42857142857142855,0.9999988525099145,0.5,0.99999999995,0.6375046958353049 +What principles are required for the design and use of trustworthy artificial intelligence in the federal government?,[],"The design and use of trustworthy artificial intelligence (AI) in the federal government should adhere to a set of principles to ensure that AI systems are reliable, ethical, and beneficial. These principles typically include: + +1. **Transparency**: AI systems should be designed and operated in a manner that is transparent. This includes clear documentation of how the AI works, the data it uses, and the decision-making processes it follows. Transparency helps build trust and allows stakeholders to understand and scrutinize AI systems. + +2. **Accountability**: There should be clear lines of responsibility and accountability for AI systems. This means identifying who is responsible for the development, deployment, and oversight of AI systems, and ensuring there are mechanisms in place to address any issues that arise. + +3. **Fairness and Non-Discrimination**: AI systems should be designed to avoid bias and discrimination. This involves using diverse and representative datasets, as well as implementing measures to detect and mitigate biases in AI algorithms. + +4. **Privacy and Security**: The design and use of AI should prioritize the protection of privacy and ensure robust security measures are in place. This includes safeguarding personal data and preventing unauthorized access or malicious use of AI systems. + +5. **Safety and Robustness**: AI systems should be safe and robust, meaning they should operate reliably and withstand various types of disruptions or adversarial inputs. This also involves continuous monitoring and updating of AI systems to address vulnerabilities. + +6. **Public Engagement and Participation**: Engaging with the public and other stakeholders is important to ensure that AI systems align with societal values and needs. This includes soliciting input and feedback from diverse communities and considering their concerns and perspectives in the design and deployment of AI systems. + +7. **Ethical Use**: AI should be used in a manner that is ethical and respects human rights. This involves making decisions that consider the moral implications and potential impacts of AI on individuals and society. + +8. **Inclusivity**: Ensuring that AI systems are accessible and beneficial to all segments of society, including marginalized and underserved communities. + +By adhering to these principles, the federal government can foster the development and use of AI systems that are trustworthy, ethical, and aligned with public interests.","The principles required for the design and use of trustworthy artificial intelligence in the federal government include: (a) lawful and respectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) safe, secure, and resilient; (e) understandable; (f) responsible and traceable; (g) regularly monitored; (h) transparent; and (i) accountable.",1.0,0.9720053525577105,0.0,0.0,0.3292424242567069 +What organizational risk tolerances should be applied to the utilization of third-party GAI resources?,[],"When utilizing third-party General Artificial Intelligence (GAI) resources, organizations should consider several risk tolerances to ensure safety and align with their strategic goals. Here are some key risk tolerances to apply: + +1. **Data Privacy and Security**: Assess the third-party GAI providerā€™s measures for protecting sensitive data. Ensure they comply with relevant regulations (e.g., GDPR, CCPA) and have robust data encryption, access controls, and incident response plans. + +2. **Operational Reliability and Continuity**: Evaluate the provider's track record for uptime, reliability, and their disaster recovery plans. Understand their service level agreements (SLAs) and ensure they meet your organization's operational requirements. + +3. **Compliance and Legal Risks**: Verify that the third-party GAI resources adhere to all industry-specific regulations and legal requirements. This includes intellectual property rights, data sovereignty laws, and contractual obligations. + +4. **Ethical and Bias Considerations**: Ensure the AI models and datasets used by the third party are free from biases and ethically sourced. This includes understanding how the AI makes decisions and ensuring it aligns with your organization's ethical standards. + +5. **Performance and Accuracy**: Assess the performance and accuracy benchmarks of the third-party GAI. Ensure they are sufficient for your intended use case and that there are mechanisms for continuous monitoring and improvement. + +6. **Financial Risk**: Consider the financial stability of the third-party provider and the cost implications of their services. Evaluate the total cost of ownership, including any hidden costs or potential for price increases. + +7. **Vendor Lock-in and Portability**: Understand the risks of vendor lock-in, including the ability to migrate to another provider if necessary. Ensure there are clear data portability options and exit strategies in place. + +8. **Transparency and Accountability**: Ensure the third-party provider offers transparency in their operations and is accountable for any failures or breaches. This includes clear reporting mechanisms and accountability clauses in the contract. + +By carefully evaluating these risk tolerances, organizations can make informed decisions about leveraging third-party GAI resources while mitigating potential risks.","Organizational risk tolerances that should be applied to the utilization of third-party GAI resources include applying risk tolerances to the utilization of third-party datasets and other GAI resources, fine-tuned third-party models, and existing third-party models adapted to a new domain. Additionally, it involves reassessing risk measurements after fine-tuning third-party GAI models.",1.0,0.9189988930193206,0.0,0.0,0.4142148523040823 +What role do legal protections play in addressing algorithmic discrimination?,"['Many companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases.']","Legal protections play a crucial role in addressing algorithmic discrimination by establishing and enforcing standards that ensure fairness and prevent harm to the public. These protections can include: + +1. **Regulatory Standards and Guidelines**: Federal government agencies develop standards and guidelines for the use of automated systems, which help prevent bias and ensure that these systems operate fairly. + +2. **Accountability and Transparency**: Legal frameworks can mandate that companies and organizations conduct audits and impact assessments to identify potential algorithmic discrimination. These measures provide transparency to the public about how biases are being mitigated. + +3. **Bias Testing and Quality Assessment**: Legal requirements can compel companies to incorporate bias testing as part of their product quality assessment and launch procedures. This ensures that any potential discriminatory impacts are identified and addressed before products are released to the market. + +4. **Enforcement and Penalties**: Legal protections can include enforcement mechanisms and penalties for non-compliance, which incentivize companies to adhere to anti-discrimination standards and take proactive measures to prevent bias. + +Overall, legal protections create a framework within which companies, non-profits, and government agencies can work to ensure that automated systems are used responsibly and do not perpetuate or exacerbate existing biases.","The context mentions that algorithmic discrimination may violate legal protections depending on specific circumstances, indicating that legal protections play a role in addressing algorithmic discrimination.",0.35,1.0000000000000002,0.0,0.9999999999,0.29305549154270116 +What protections should be in place for data and inferences related to sensitive domains?,[],"When dealing with data and inferences related to sensitive domains, several protections should be in place to ensure safety, privacy, and compliance with regulations. Here are some key protections: + +1. **Data Encryption**: Encrypt data both at rest and in transit to protect it from unauthorized access. + +2. **Access Control**: Implement strict access controls to ensure that only authorized personnel can access sensitive data. This includes role-based access control (RBAC) and multi-factor authentication (MFA). + +3. **Data Anonymization**: Anonymize or pseudonymize data to protect individual identities, especially when sharing data for research or analysis. + +4. **Compliance with Regulations**: Adhere to relevant data protection regulations such as GDPR, HIPAA, and others, depending on the jurisdiction and nature of the data. + +5. **Regular Audits**: Conduct regular security audits and assessments to identify and mitigate potential vulnerabilities. + +6. **Data Minimization**: Collect only the data that is necessary for the task at hand and avoid storing unnecessary sensitive information. + +7. **Secure Data Storage**: Use secure and compliant data storage solutions that offer robust security features. + +8. **Inference Controls**: Implement controls to prevent unintended inferences that could lead to privacy breaches, such as differential privacy techniques. + +9. **Transparency and Consent**: Ensure transparency with data subjects regarding how their data will be used and obtain informed consent where necessary. + +10. **Incident Response Plan**: Have a robust incident response plan in place to quickly address and mitigate any data breaches or security incidents. + +By implementing these protections, organizations can better safeguard sensitive data and inferences, thereby reducing the risk of privacy breaches and ensuring compliance with legal and ethical standards.","Enhanced protections and restrictions for data and inferences related to sensitive domains, including health, work, education, criminal justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and related inferences should only be used for necessary functions, and you should be protected by ethical review and use prohibitions.",1.0,0.9360451457930993,0.0,0.0,0.8586668152681487 +What are the potential consequences of using automated systems without protections against algorithmic discrimination?,[],"Using automated systems without protections against algorithmic discrimination can lead to several potential consequences, including: + +1. **Bias and Inequity**: Automated systems may perpetuate or even exacerbate existing biases if they are trained on biased data or if their algorithms are not carefully designed to mitigate bias. This can result in unfair treatment of certain groups based on race, gender, age, or other protected characteristics. + +2. **Legal and Regulatory Risks**: Organizations that deploy biased automated systems may face legal challenges and regulatory scrutiny. Discriminatory practices can lead to violations of anti-discrimination laws, resulting in legal penalties, fines, and damage to the organization's reputation. + +3. **Loss of Trust**: When users or customers perceive that an automated system is biased or unfair, it can erode trust in the system and the organization behind it. Loss of trust can lead to decreased usage, negative public perception, and harm to the organization's brand. + +4. **Negative Social Impact**: Algorithmic discrimination can have broader societal implications, reinforcing social inequalities and contributing to systemic discrimination. This can undermine social cohesion and perpetuate disadvantage for marginalized communities. + +5. **Economic Inefficiencies**: Discriminatory algorithms can lead to suboptimal decisions that do not align with an organization's objectives. For example, biased hiring algorithms may overlook qualified candidates, resulting in a less diverse and potentially less effective workforce. + +6. **Ethical Concerns**: The use of biased automated systems raises ethical issues related to fairness, justice, and the responsible use of technology. Organizations have a moral obligation to ensure that their systems do not cause harm to individuals or groups. + +To mitigate these risks, it is important for organizations to implement protections against algorithmic discrimination, such as using diverse and representative training data, regularly auditing algorithms for bias, and incorporating fairness considerations into the design and deployment of automated systems.","The potential consequences of using automated systems without protections against algorithmic discrimination include inequitable outcomes, wrongful and discriminatory arrests due to facial recognition technology, discriminatory hiring decisions informed by biased algorithms, and healthcare algorithms that may discount the severity of diseases in certain racial groups. These issues can lead to systemic biases being amplified and harm to underserved communities.",1.0,1.0,0.0,0.0,0.5019468846730333 +What measures should be taken to address confabulation in GAI system outputs?,[],"Confabulation in General Artificial Intelligence (GAI) systems refers to the generation of false or misleading information that appears plausible but is not based on actual data or knowledge. To address confabulation in GAI system outputs, several measures can be taken: + +1. **Improved Training Data**: Ensure that the training data is comprehensive, accurate, and diverse to minimize the likelihood of the system generating incorrect information. Regularly update and curate the data to reflect the most current and accurate information available. + +2. **Fact-Checking Mechanisms**: Implement real-time fact-checking algorithms that can verify the generated information against reliable sources. This can help in identifying and correcting false information before it is presented to users. + +3. **Transparency and Explainability**: Enhance the transparency and explainability of the GAI system. By understanding how the system arrives at its conclusions, developers can identify and address potential sources of confabulation. + +4. **Human-in-the-Loop**: Incorporate a human-in-the-loop approach where human experts review and validate the outputs of the GAI system, especially in high-stakes scenarios where accuracy is critical. + +5. **Regular Audits and Monitoring**: Conduct regular audits and continuous monitoring of the GAI systemā€™s outputs to detect and address any instances of confabulation. This helps in maintaining the reliability and trustworthiness of the system. + +6. **Feedback Loops**: Establish feedback loops where users can report inaccuracies or confabulated information. This feedback can then be used to refine and improve the system. + +7. **Ethical Guidelines and Standards**: Develop and adhere to ethical guidelines and standards for AI development and deployment. These guidelines should emphasize the importance of accuracy, reliability, and the avoidance of misinformation. + +8. **Robust Evaluation Metrics**: Create and utilize robust evaluation metrics to assess the accuracy and reliability of the GAI systemā€™s outputs. Metrics should be designed to specifically detect and measure instances of confabulation. + +By implementing these measures, the risk of confabulation in GAI system outputs can be significantly reduced, thereby enhancing the overall trustworthiness and reliability of the system.","To address confabulation in GAI system outputs, the following measures should be taken: review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities (MS-2.5-003), and avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments (MS-2.5-001).",1.0,0.9354057887071603,0.0,0.0,0.27489447492036456 +What are some concerns related to data privacy in the context of sensitive domains?,"['Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship history and legal status such as custody and divorce information, and home, work, or school environmental data); or have the reasonable potential to be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm' + 'SENSITIVE DOMAINS: ā€œSensitive domainsā€ are those in which activities being conducted can cause material \nharms, including significant adverse effects on human rights such as autonomy and dignity, as well as civil liber\xad\nties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections \nor where such enhanced protections are reasonably expected by the public include, but are not limited to, \nhealth, family planning and care, employment, education, criminal justice, and personal finance. In the context \nof this framework, such domains are considered sensitive whether or not the specifics of a system context \nwould necessitate coverage under existing law, and domains and data that are considered sensitive are under\xad\nstood to change over time based on societal norms and context.' + 'ā€œSensitive domainsā€ are those in which activities being conducted can cause material harms, including signifi\xadcant adverse effects on human rights such as autonomy and dignity, as well as civil liberties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections or where such enhanced protections are reasonably expected by the public include, but are not limited to, health, family planning and care, employment, education, criminal justice, and personal finance.' + 'Enhanced protections and restrictions for data and inferences related to sensitive domains, including health, work, education, criminal justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and related inferences should only be used for necessary functions, and you should be protected by ethical review and use prohibitions. You and your communities should be free from unchecked surveillance; surveillance technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their potential harms and scope limits to protect privacy and civil liberties.']","Concerns related to data privacy in the context of sensitive domains include: + +1. **Material Harms and Significant Adverse Effects:** Activities within sensitive domains can cause material harms, including significant adverse effects on human rights such as autonomy, dignity, civil liberties, and civil rights. + +2. **Enhanced Data Protections:** Sensitive domains often require enhanced data protections because they involve activities that can lead to significant harm if data privacy is not adequately safeguarded. + +3. **Potential for Inference:** Data and metadata in sensitive domains can be used to infer sensitive information about individuals, such as health conditions, disabilities, criminal justice interactions, and personal relationships. This inferential capability raises privacy concerns. + +4. **Risk of Harmful Use:** Data from sensitive domains have the potential to be used in ways that can expose individuals to meaningful harm, such as loss of privacy, financial harm, or discrimination. + +5. **Heightened Expectations:** The public reasonably expects enhanced protections in sensitive domains like health, family planning, employment, education, criminal justice, and personal finance. Failure to meet these expectations can lead to loss of trust and potential misuse of data. + +6. **Ethical Considerations:** There are ethical considerations around using data from sensitive domains. Ethical review and use prohibitions are necessary to ensure that data is used appropriately and does not infringe on individuals' rights. + +7. **Surveillance Concerns:** Surveillance technologies used in sensitive domains need to be subject to heightened oversight to protect privacy and civil liberties. This includes pre-deployment assessments to understand potential harms and scope limits on their use. + +8. **Protection of Youth:** Data pertaining to youth in sensitive domains needs to be handled with special care to protect their privacy and ensure their data is not misused. + +These concerns underscore the importance of implementing strict data privacy measures and ethical guidelines when handling data from sensitive domains.","Concerns related to data privacy in sensitive domains include the lack of awareness among patients regarding the use of their medical data by insurance companies, the revelation of personal information (such as pregnancy) through targeted advertising, the monitoring of student conversations which may limit emotional expression and unfairly flag students with disabilities, the use of location data to identify individuals visiting abortion clinics, the collection of sensitive student data without parental consent, and the potential for discriminatory impacts from such data usage. Additionally, there are concerns about the accuracy of employee data transferred to third parties, which can affect job opportunities.",1.0,0.9935082489812371,1.0,0.999999999975,0.8382769846256153 +What factors should be considered when evaluating the risk-relevant capabilities of GAI?,"['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'a description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted, a summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","When evaluating the risk-relevant capabilities of General Artificial Intelligence (GAI), several factors should be considered to ensure a comprehensive assessment. These factors include: + +1. **Data Privacy and Intellectual Property**: Understanding how data is sourced, processed, and protected is crucial. This includes ensuring that data privacy laws are adhered to and that intellectual property rights are respected. + +2. **AI Actor Credentials and Qualifications**: Evaluating the credentials and qualifications of the individuals and organizations developing and deploying the AI system is essential. This helps in assessing their capability to manage and mitigate risks. + +3. **Auditing and Assessment**: Regular auditing and assessment of the AI system should be conducted to identify and address potential risks. This includes both internal audits and external assessments by independent parties. + +4. **Change-Management Controls**: Implementing robust change-management controls helps manage the integration of new features or updates to the AI system without introducing new risks. + +5. **Commercial Use**: Considering the implications of commercial use, including how the AI system will be marketed and used by end-users, is important for understanding potential risks. + +6. **Data Provenance**: Keeping detailed records of the origin, processing, and interpretation of data used to train the AI system is necessary to ensure its integrity and reliability. + +7. **Information Integrity and Security**: Ensuring that the AI system maintains the integrity and security of information it processes, preventing unauthorized access and tampering. + +8. **Handling Dangerous, Violent, or Hateful Content**: Implementing mechanisms to detect and mitigate the generation or dissemination of harmful content is critical. + +9. **Data Relevancy and Quality**: Justifying the relevancy and quality of data used in training the AI, including identifying and addressing any missing, incomplete, or erroneous data. + +10. **Public Consultation and Concerns**: Considering the outcomes of public consultations, addressing concerns raised, and documenting decisions made in response to these concerns. + +11. **Risk Identification and Management**: Conducting thorough risk assessments and implementing steps to mitigate potential harms associated with the AI system. + +12. **Performance Testing and Monitoring**: Regular performance testing, including accuracy assessments, differential demographic impact analysis, error rate evaluations, and comparisons to previously deployed systems. Ongoing monitoring procedures and reporting are also necessary to track the system's performance over time. + +By carefully considering these factors, stakeholders can better evaluate and manage the risk-relevant capabilities of GAI, ensuring its safe and responsible deployment.","Factors to consider when evaluating the risk-relevant capabilities of GAI include abuses and impacts to information integrity, dependencies between GAI and other IT or data systems, harm to fundamental rights or public safety, presentation of obscene, objectionable, offensive, discriminatory, invalid or untruthful output, psychological impacts to humans (e.g., anthropomorphization, algorithmic aversion, emotional entanglement), possibility for malicious use, whether the system introduces significant new security vulnerabilities, anticipated system impact on some groups compared to others, and unreliable decision-making capabilities, validity, adaptability, and variability of GAI system performance over time.",0.8,0.9605360373476303,0.3333333333333333,0.999999999975,0.8765040882323387 +What considerations should be taken into account when using automated systems in sensitive domains?,"['The appropriate application of the principles set forth in this white paper depends significantly on the context in which automated systems are being utilized. In some circumstances, application of these principles' + 'For systems used in sensitive domains, reporting should include infor\xadmation about training and governance procedures for these technologies.' + 'ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Data provenance']","When using automated systems in sensitive domains, several key considerations should be taken into account to ensure safety and reliability. These include: + +1. **AI Actor Credentials and Qualifications**: Ensuring that the individuals or entities responsible for developing, implementing, and maintaining the automated systems are qualified and have the appropriate credentials. + +2. **Alignment to Organizational Values**: The system should align with the core values and ethical standards of the organization deploying it. + +3. **Auditing and Assessment**: Regular auditing and assessment of the systems should be conducted to ensure they are functioning as intended and to identify any potential issues. + +4. **Change-Management Controls**: Implementing controls to manage and document any changes to the system, including updates and modifications, to ensure they do not introduce new risks. + +5. **Data Provenance**: Ensuring the quality, source, and integrity of the data used by the system to make decisions. This involves understanding where the data comes from and evaluating its accuracy and relevance. + +These considerations are crucial for maintaining the integrity, safety, and ethical standards of automated systems, especially in sensitive domains where the impact of these systems can be significant.","When using automated systems in sensitive domains, considerations should include tailoring the systems to their intended purpose, providing meaningful access for oversight, ensuring training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions. Additionally, there should be a focus on accessibility, equity, effectiveness, and the maintenance of these systems, along with public reporting on human governance processes and their outcomes.",0.38461538461538464,0.9911915938715693,0.3333333333333333,0.5833333333041666,0.30739569134393185 +What should be included in the summary reporting for automated systems?,"['Reporting that includes summary information about these automated systems in plain language and assessments of the clarity and quality of the notice and explanations should be made public whenever possible.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Designers, developers, and deployers of automat\xaded systems should provide generally accessible plain language docu\xadmentation including clear descriptions of the overall system func\xadtioning and the role automation plays, notice that such systems are in use, the individual or organization responsible for the system, and ex\xadplanations of outcomes that are clear, timely, and accessible.' + 'ā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can be provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should be made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law enforcement, or national security considerations may prevent public release. Where public reports are not possible, the information should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguarding individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have' + 'Reporting should be provided in a plain language and machine-readable manner.' + 'Reporting should also include documentation of goals and assessment of meeting those goals, consideration of data included, and documentation of the governance of reasonable access to the technology.']","The summary reporting for automated systems should include the following elements: + +1. **Plain Language Summary**: Information about the automated systems should be reported in plain language that is easy to understand. + +2. **System Functioning and Role of Automation**: Clear descriptions of the overall system functioning and the role automation plays in it. + +3. **Notice of System Use**: Notice that such automated systems are in use. + +4. **Responsible Individual or Organization**: Identification of the individual or organization responsible for the system. + +5. **Explanations of Outcomes**: Explanations of outcomes that are clear, timely, and accessible. + +6. **Business Processes and System Goals**: Descriptions of the organizationā€™s business processes, system goals, any human-run procedures forming part of the system, and specific performance expectations. + +7. **Data Used**: A description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted, a summary of what data might be missing, incomplete, or erroneous, and justifications for data relevancy. + +8. **Public Consultation Results**: The results of public consultation, including concerns raised and decisions made due to these concerns. + +9. **Risk Identification and Management**: Assessments of risk identification and management, and any steps taken to mitigate potential harms. + +10. **Performance Testing Results**: The results of performance testing, including accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems. + +11. **Ongoing Monitoring Procedures**: Documentation of ongoing monitoring procedures and regular performance testing reports, including the monitoring frequency. + +12. **Documentation of Goals and Assessments**: Documentation of goals and assessments of whether those goals are being met. + +13. **Governance of Access**: Considerations of data included and documentation of the governance of reasonable access to the technology. + +These elements are crucial for transparency, allowing the public, regulators, auditors, industry standards groups, or other oversight bodies to review and understand the functioning and impact of automated systems.","The summary reporting for automated systems should include: the responsible entities for accountability purposes; the goal and use cases for the system; identified users and impacted populations; the assessment of notice clarity and timeliness; the assessment of the explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment of how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of risk.",1.0,0.974534706620862,0.7142857142857143,0.7708333333140625,0.5624389614309446 +What are the key considerations for testing and deployment of automated systems to ensure their safety and effectiveness?,"['Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.' + 'the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Designers, developers, and deployers of automat\xad ed systems should provide generally accessible plain language docu\xad mentation including clear descriptions of the overall system func\xad tioning and the role automation plays, notice that such systems are in use, the individual or organization responsible for the system, and ex\xad planations of outcomes that are clear, timely, and accessible. Such notice should be kept up-to-date and people impacted by the system should be notified of significant use case or key functionality chang\xad es. Automated systems should provide explanations that are technically valid, meaningful and useful to you and to any operators or others who']","To ensure the safety and effectiveness of automated systems, there are several key considerations for testing and deployment: + +1. **Independent Evaluation and Reporting**: + - Independent evaluation and reporting should be conducted to confirm that the system is safe and effective. + - The results, including steps taken to mitigate potential harms, should be made public whenever possible. + +2. **Performance Testing**: + - Performance testing should include metrics such as accuracy, differential demographic impact, resulting error rates (both overall and per demographic group), and comparisons to previously deployed systems. + - There should be ongoing monitoring procedures and regular performance testing reports, including details on monitoring frequency. + +3. **Accessible Documentation**: + - Designers, developers, and deployers should provide plain language documentation that is generally accessible. + - This documentation should include clear descriptions of the overall system functioning, the role of automation, notice of system usage, the responsible individual or organization, and explanations of outcomes. + +4. **Transparency and Notification**: + - Systems should notify users and those impacted by the system of its use, especially in the case of significant use case or key functionality changes. + - Explanations provided should be technically valid, meaningful, and useful to users, operators, or others who may be affected. + +By adhering to these considerations, the development and deployment of automated systems can be better managed to ensure they are both safe and effective for their intended use.","Key considerations for testing and deployment of automated systems to ensure their safety and effectiveness include extensive testing before deployment, following domain-specific best practices, considering the roles of human operators, mirroring real-world conditions during testing, comparing system performance with existing human-driven procedures, and identifying and mitigating potential risks proactively. Testing should include both automated and human-led testing, and decision possibilities should include the option of not deploying the system if performance does not meet standards.",1.0,0.9691810477474044,0.0,0.99999999995,0.9023731708876218 +What is the purpose of pre-deployment testing in the development of automated systems?,"['Testing. Systems should undergo extensive testing before deployment. This testing should follow domain-specific best practices, when available, for ensuring the technology will work in its real-world context. Such testing should take into account both the specific technology used and the roles of any human operators or reviewers who impact system outcomes or effectiveness; testing should include both automated systems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the conditions in which the system will be deployed, and new testing may be required for each deployment to account for material differences in conditions from one deployment to another.' + 'others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes.']","The purpose of pre-deployment testing in the development of automated systems is to ensure that the technology will work effectively in its real-world context. This involves extensive testing that follows domain-specific best practices and considers both the specific technology and the roles of any human operators or reviewers who influence system outcomes or effectiveness. Pre-deployment testing aims to identify and mitigate potential harms, ensuring the system operates correctly and safely under conditions that closely mirror its intended deployment environment. This rigorous testing process helps to prevent issues that could arise once the system is deployed, thereby enhancing its reliability and safety.","The purpose of pre-deployment testing in the development of automated systems is to identify risks and potential impacts of the system, ensuring that it is safe and effective based on its intended use, and to mitigate unsafe outcomes, including those beyond the intended use.",0.6923076923076923,0.9933331082141891,0.0,0.99999999995,0.6909995111531863 +What is the purpose of the AI Bill of Rights in relation to the Executive Order on trustworthy artificial intelligence?,[],"The purpose of the AI Bill of Rights in relation to the Executive Order on trustworthy artificial intelligence is to establish a framework of principles and practices that ensure the development and deployment of AI technologies are aligned with values such as fairness, transparency, accountability, and protection of individual rights. It aims to guide the responsible use of AI by setting standards that prevent harm, promote inclusivity, and uphold civil liberties, complementing the directives of the Executive Order to foster trust and reliability in AI systems.","The Blueprint for an AI Bill of Rights is consistent with the Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the Federal Government, which requires federal agencies to adhere to nine principles when using AI.",0.0,0.9999999999999997,0.0,0.0,0.8354453086949354 +What are some examples of how data privacy principles aim to protect against identity theft?,"['A data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of thousands of people to potential identity theft. 65' + 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive.']","Data privacy principles aim to protect against identity theft through several mechanisms, including: + +1. **Built-in Protections:** Systems should have privacy protections embedded by default. This means that users should not have to take extra steps to safeguard their information; the system should be designed to protect their data. + +2. **User Agency:** Users should have control over how their data is used. They should be able to give permission and have their decisions regarding the collection, use, access, transfer, and deletion of their data respected. + +3. **Privacy by Design:** Systems should be designed to ensure privacy protections are included from the outset. This includes collecting only the data necessary for a specific context and ensuring this data collection meets reasonable expectations. + +4. **Transparency and Consent:** Designers, developers, and deployers of automated systems should seek user permission and be transparent about how data is being used. Users should be informed and should consent to data collection and processing activities. + +5. **Avoiding Privacy-Invasive Defaults:** Systems should not use design choices that make it difficult for users to protect their privacy or that default to privacy-invasive settings. Instead, they should be designed to facilitate easy privacy management by users. + +6. **Alternative Safeguards:** When it is not possible to seek user permission, alternative privacy by design safeguards should be implemented. These may include anonymization, encryption, and other measures to protect personal data from unauthorized access and breaches. + +By adhering to these principles, systems can reduce the risk of identity theft by minimizing unnecessary data collection, ensuring data is used appropriately, and providing robust protections against unauthorized access.","Examples of how data privacy principles aim to protect against identity theft include: a data broker harvesting large amounts of personal data and suffering a breach that exposes individuals to potential identity theft, and an insurer collecting data from a person's social media presence to determine life insurance rates, which could lead to misuse of personal information.",0.7916666666666666,0.9695103690808091,0.5,0.9999999999,0.40121461254552937 +What are the concerns associated with unsafe diffusion in the context of AI-generated content?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Concerns associated with unsafe diffusion in the context of AI-generated content primarily revolve around the following issues: + +1. **Information Integrity**: AI-generated content can sometimes produce false or misleading information. If this content is widely disseminated without proper checks, it can lead to misinformation and the erosion of trust in reliable sources of information. + +2. **Information Security**: AI-generated content can be manipulated to include malicious code or phishing attempts. If users are not cautious, they might unknowingly compromise their personal information or the security of their systems. + +3. **Dangerous, Violent, or Hateful Content**: AI systems can sometimes generate content that is violent, hateful, or otherwise harmful. If this content spreads unchecked, it can incite violence, spread hate speech, or lead to psychological harm. + +Overall, ensuring that AI-generated content is safe involves implementing robust checks and filters to prevent the dissemination of harmful, misleading, or malicious information.",The answer to given question is not present in context,1.0,0.9967270103384741,1.0,0.0,0.176388017057866 +What measures should entities take to maintain data quality in sensitive domains?,['ā€¢ Auditing and assessment \nā€¢ Data provenance'],"Maintaining data quality in sensitive domains is crucial to ensure reliability, security, and ethical standards. Here are some measures entities should take: + +1. **Data Provenance**: Track the origin and history of data to ensure its authenticity and integrity. This involves documenting where data comes from, how it has been processed, and any transformations it has undergone. + +2. **Auditing and Assessment**: Regularly audit and assess data quality to identify and rectify any inconsistencies, inaccuracies, or anomalies. This helps in maintaining high standards and adhering to compliance requirements. + +3. **Data Validation**: Implement rigorous data validation processes to check for accuracy, completeness, and consistency before data is used in any analysis or decision-making processes. + +4. **Access Control**: Limit access to sensitive data to authorized personnel only. Implement robust authentication and authorization mechanisms to prevent unauthorized access and potential data breaches. + +5. **Data Encryption**: Use encryption techniques to protect data both in transit and at rest. This ensures that even if data is intercepted, it remains unreadable without the proper decryption keys. + +6. **Regular Updates and Maintenance**: Keep data management systems and software up-to-date to protect against vulnerabilities and ensure compatibility with the latest security protocols. + +7. **User Training**: Train employees and stakeholders on best practices for data handling, security measures, and the importance of maintaining data quality. + +8. **Data Cleansing**: Periodically clean data to remove duplicates, correct errors, and update outdated information. This helps in maintaining the relevance and accuracy of the data. + +By implementing these measures, entities can ensure high data quality in sensitive domains, thereby enhancing the reliability and security of their data-driven operations.","Entities should be especially careful to maintain the quality of data in sensitive domains to avoid adverse consequences arising from decision-making based on flawed or inaccurate data. This includes conducting regular, independent audits and taking prompt corrective measures to maintain accurate, timely, and complete data.",0.023255813953488372,1.0000000000000016,0.5,0.9999999999,0.47133914171337843 +What is the purpose of implementing a supplier risk assessment framework in evaluating third-party entities?,[],"The purpose of implementing a supplier risk assessment framework in evaluating third-party entities is to identify, assess, and mitigate risks that could potentially impact the organization. This includes risks related to financial stability, compliance with regulations, cybersecurity, operational performance, and reputation. By systematically evaluating these risks, organizations can make informed decisions about which suppliers to engage with, ensuring they maintain high standards of quality, security, and reliability. This process helps in minimizing disruptions, safeguarding sensitive information, and maintaining compliance with legal and regulatory requirements.","The purpose of implementing a supplier risk assessment framework in evaluating third-party entities is to assess and monitor their performance and adherence to content provenance standards, detect anomalies and unauthorized changes, manage services acquisition and value chain risks, and ensure legal compliance.",0.0,0.967639896771649,0.0,0.0,0.48277928151665445 +What is the purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes?,"['Create measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent.']","The purpose of creating measurement error models for pre-deployment metrics in the context of Testing, Evaluation, Verification, and Validation (TEVV) processes is to ensure construct validity for each metric. This involves: + +1. **Demonstrating Construct Validity**: Ensuring that the metrics effectively operationalize the desired concepts they are meant to measure. This means confirming that the metrics truly reflect the aspects of performance, safety, or functionality they are intended to represent. + +2. **Estimating and Documenting Biases or Variance**: Measuring or estimating and documenting any biases or statistical variance in the applied metrics or structured human feedback processes. This helps in understanding the reliability and accuracy of the metrics, ensuring they are not unduly influenced by errors or biases. + +3. **Leveraging Domain Expertise**: Utilizing expertise in the relevant field to model complex societal constructs, such as identifying and categorizing hateful content accurately. This ensures that the metrics are not only statistically sound but also contextually relevant and meaningful. + +Overall, creating these models helps in building robust, reliable, and valid metrics that can be trusted to evaluate the AI system's performance accurately before deployment, thereby enhancing the safety and effectiveness of the AI system in real-world applications.","The purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes is to demonstrate construct validity for each metric, ensuring that the metric effectively operationalizes the desired concept. This involves measuring or estimating and documenting biases or statistical variance in applied metrics or structured human feedback processes, while leveraging domain expertise when modeling complex societal constructs such as hateful content.",0.45454545454545453,0.996170153567106,1.0,0.9999999999,0.6178559553344672 +What role do legal protections play in addressing algorithmic discrimination?,"['Many companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases.']","Legal protections play a crucial role in addressing algorithmic discrimination by setting standards and guidelines to ensure fairness and prevent bias in automated systems. These protections can help: + +1. **Establish Accountability**: Legal frameworks can hold companies and developers accountable for the outcomes of their algorithms, ensuring they take necessary steps to mitigate bias and discrimination. + +2. **Ensure Compliance**: By mandating compliance with anti-discrimination laws, legal protections can force organizations to implement bias testing and audits as part of their product development and deployment processes. + +3. **Promote Transparency**: Legal requirements for transparency can compel companies to disclose how their algorithms work and the measures they have taken to prevent discrimination, providing the public with greater insight and trust in automated systems. + +4. **Protect Individuals**: Legal protections can provide recourse for individuals who have been adversely affected by algorithmic decisions, ensuring they have the means to challenge discriminatory outcomes and seek redress. + +Overall, legal protections help create an environment where the development and use of algorithms are conducted responsibly, with a focus on fairness and equity.","The context mentions that algorithmic discrimination may violate legal protections, indicating that legal protections play a role in addressing algorithmic discrimination by providing a framework that designers, developers, and deployers of automated systems must adhere to in order to protect individuals and communities from unjustified different treatment based on various classifications.",0.0,1.0000000000000002,0.0,0.9999999999,0.5346382582842233 +What are the potential risks associated with the production and access to obscene and abusive content?,[],"The production and access to obscene and abusive content pose several significant risks: + +1. **Psychological Harm**: Exposure to such content can lead to severe psychological distress, including anxiety, depression, and trauma, particularly for vulnerable individuals such as children and those with a history of trauma. + +2. **Normalization of Violence and Abuse**: Regular exposure to abusive content can desensitize individuals to violence and abusive behavior, potentially leading to an increase in real-world violence and abusive actions. + +3. **Legal and Ethical Issues**: The production and distribution of obscene and abusive content often involve illegal activities, including the exploitation of individuals, particularly minors. This carries serious legal repercussions and ethical concerns. + +4. **Social and Cultural Impact**: The widespread availability of such content can undermine social and cultural norms, promoting a more tolerant attitude towards deviant and harmful behavior. + +5. **Cybersecurity Risks**: Websites and platforms that host obscene and abusive content are often hotbeds for malicious activities, including malware distribution, hacking, and identity theft. + +6. **Economic Costs**: The negative impacts on mental health and the potential for increased criminal behavior can lead to substantial economic costs related to healthcare, law enforcement, and social services. + +Ensuring robust safeguards and regulatory measures to prevent the production and access to such content is crucial for protecting individuals and maintaining social and ethical standards.","The potential risks associated with the production and access to obscene and abusive content include eased production of and access to obscene, degrading, and/or abusive imagery, which can cause harm. This includes synthetic child sexual abuse material (CSAM) and nonconsensual intimate images (NCII) of adults.",0.0,0.9957616164662495,0.0,0.0,0.8598736804943439 +What efforts is NIST making to ensure the development of safe and trustworthy AI?,"['About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI.' + 'National Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.']","NIST is making several efforts to ensure the development of safe and trustworthy AI, including: + +1. **Standards and Frameworks**: NIST develops measurements, technology, tools, and standards aimed at advancing reliable, safe, transparent, explainable, privacy-enhanced, and fair AI. These efforts are designed to ensure that AI's commercial and societal benefits can be realized without causing harm to people or the planet. + +2. **AI Risk Management Framework**: NIST has developed the AI Risk Management Framework, which provides guidance on identifying and managing risks associated with AI. This framework covers various aspects such as AI risks, trustworthiness, and profiles to ensure a comprehensive approach to AI safety. + +3. **U.S. AI Safety Institute**: NIST established the U.S. AI Safety Institute to continue efforts initiated by the 2023 Executive Order on Safe, Secure, and Trustworthy AI. The institute focuses on building the science necessary for the safe, secure, and trustworthy development and use of AI. + +4. **Research and Development**: NIST has been conducting both fundamental and applied research on AI for more than a decade. This research supports the development of technologies and methodologies that contribute to AI safety and trustworthiness. + +5. **AI Safety Institute Consortium**: Alongside the U.S. AI Safety Institute, NIST has formed the AI Safety Institute Consortium. This consortium brings together stakeholders from various sectors to collaborate on advancing the science and best practices for AI safety. + +These efforts collectively aim to create a robust foundation for the development and deployment of AI systems that are safe, secure, and trustworthy.","NIST is making efforts to ensure the development of safe and trustworthy AI by developing measurements, technology, tools, and standards that advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence. They have established the U.S. AI Safety Institute and the AI Safety Institute Consortium to build the necessary science for safe, secure, and trustworthy development and use of AI, in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",0.5,0.9999999999999994,1.0,0.99999999995,0.5422457684998256 +What considerations are important for governing across the AI value chain in the context of generative AI?,['ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance'],"Governing across the AI value chain in the context of generative AI involves several critical considerations to ensure safe, ethical, and effective deployment. Here are some key aspects: + +1. **AI Actor Credentials and Qualifications**: Ensure that all individuals and entities involved in the AI value chain possess the necessary skills, knowledge, and qualifications. This includes developers, data scientists, and other key stakeholders who must be well-versed in AI safety, ethics, and technical competencies. + +2. **Alignment to Organizational Values**: Policies and practices should be in place to ensure that generative AI systems align with the core values and ethical standards of the organization. This includes fairness, transparency, accountability, and respect for user privacy. + +3. **Auditing and Assessment**: Regular auditing and assessment of generative AI systems are essential to identify and mitigate risks. This includes evaluating the system's performance, checking for biases, and assessing compliance with legal and regulatory requirements. + +4. **Change-Management Controls**: Implement robust change-management controls to handle updates and modifications to AI systems. This ensures that changes do not introduce new risks and that the system remains aligned with organizational goals and regulatory standards. + +5. **Commercial Use**: Consider the implications of using generative AI for commercial purposes. This includes understanding the potential impact on customers, ensuring data security, and maintaining trust by being transparent about how AI-generated outputs are used in commercial products and services. + +6. **Data Provenance**: Maintain a clear record of the sources and lineage of data used to train and operate generative AI systems. This helps in understanding and managing the quality, reliability, and ethical implications of the data, including issues related to copyright and data ownership. + +By addressing these considerations, organizations can better govern the use of generative AI across its value chain, ensuring it is used responsibly and effectively.","The important considerations for governing across the AI value chain in the context of generative AI include organizational governance, oversight levels, human-AI configurations, human review, tracking and documentation, and management oversight. Additionally, governance tools and protocols that apply to other types of AI systems can also be applied to generative AI systems, including accessibility, AI actor credentials, alignment to organizational values, auditing, change-management controls, commercial use, and data provenance.",0.6,0.981140746922629,0.5,0.9999999999,0.4436495246907691 +What are the suggested actions to address confabulation in GAI systems?,[],"Addressing confabulation in General Artificial Intelligence (GAI) systems involves several strategies to ensure the reliability and accuracy of their outputs. Some suggested actions include: + +1. **Improving Training Data**: Ensuring that the training data used for GAI systems is comprehensive, accurate, and representative of real-world scenarios can help reduce the likelihood of confabulation. + +2. **Enhanced Validation and Testing**: Implementing rigorous validation and testing protocols to identify and correct instances where the GAI system generates false or misleading information. + +3. **Explainability and Transparency**: Developing methods to make the decision-making processes of GAI systems more transparent and understandable, so that any potential confabulations can be more easily detected and addressed. + +4. **Feedback Loops**: Creating feedback mechanisms where users can report inaccuracies or confabulations, allowing the system to learn and correct these errors over time. + +5. **Robustness to Ambiguity**: Designing GAI systems to handle ambiguous or incomplete information more effectively, reducing the chances that they will generate confabulated responses. + +6. **Human Oversight**: Ensuring that there is human oversight in critical decision-making processes involving GAI systems to catch and correct any confabulations. + +7. **Continual Learning and Adaptation**: Enabling GAI systems to continually learn and adapt based on new data and experiences, which can help in refining their responses and reducing the occurrence of confabulation. + +By implementing these strategies, developers and researchers can help mitigate the risk of confabulation in GAI systems, leading to more reliable and trustworthy AI.","The suggested actions to address confabulation in GAI systems include: 1) Avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments (MS-2.5-001). 2) Review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities (MS-2.5-003). 3) Evaluate GAI system performance in real-world scenarios to observe its behavior in practical environments and reveal issues that might not surface in controlled and optimized testing environments (MS-4.2-002).",1.0,0.9219777736869438,0.0,0.0,0.2789644568106634 +What are the implications of bias and discrimination in automated systems on the rights of the American public?,"['Among the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent.' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","The implications of bias and discrimination in automated systems on the rights of the American public are significant and multifaceted: + +1. **Limited Opportunities and Access to Resources**: Automated systems that are biased can limit individuals' opportunities by unfairly influencing decisions in critical areas like hiring and credit. This can prevent people from getting jobs or loans, exacerbating existing inequalities. + +2. **Unsafe and Ineffective Systems**: In fields like healthcare, biased systems can lead to unsafe and ineffective patient care. This can result in inadequate treatment for certain groups, furthering health disparities. + +3. **Privacy Violations**: Unchecked social media data collection and other forms of surveillance can undermine individuals' privacy. This pervasive tracking often occurs without individuals' knowledge or consent, infringing on their right to privacy. + +4. **Replication of Unjust Outcomes**: Automated systems that rely on adverse or irrelevant data can replicate existing unjust outcomes. This can entrench systemic inequalities and discrimination, making it harder to achieve social justice and equality. + +5. **Loss of Jobs and Custody**: The use of automated systems in decision-making can lead to severe consequences for individuals, such as the loss of jobs or custody of children. This can have long-lasting negative effects on people's lives and well-being. + +6. **Chilling Effects on Communities**: Surveillance and biased automated systems can create chilling effects, where individuals and communities feel monitored and mistrusted. This can discourage free expression and participation in democratic processes, affecting how community members view themselves and their role in society. + +Overall, the presence of bias and discrimination in automated systems poses a threat to the fundamental rights and freedoms of the American public, necessitating careful oversight and corrective measures to ensure fairness and justice.","The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting and reproducing existing unwanted inequities. These outcomes can undermine civil rights and democratic values, which are foundational American principles.",1.0,1.0,0.5,0.99999999995,0.3744988012475712 +What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?,"['The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation.']","The purpose of the Request For Information (RFI) issued by the Office of Science and Technology Policy (OSTP) regarding biometric technologies was to understand the extent and variety of these technologies in past, current, or planned use. It aimed to gather information on the domains in which these technologies are being used, the entities making use of them, the current principles, practices, or policies governing their use, and the stakeholders that are, or may be, impacted by their use or regulation.","The purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation.",0.8333333333333334,0.9841324321224986,1.0,0.9999999999,0.9974537848519671 +What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?,[],"To address risks associated with intellectual property (IP) infringement in organizational Generative AI (GAI) systems, several actions are recommended: + +1. **Implement Robust IP Policies**: Organizations should establish and enforce comprehensive IP policies that specifically address the use of GAI systems. These policies should outline acceptable use, the importance of respecting third-party IP rights, and the procedures for handling potential infringements. + +2. **Training and Awareness**: Regular training programs should be conducted to educate employees and users on the importance of IP rights and the potential risks associated with GAI systems. This includes understanding what constitutes IP infringement and how to avoid it. + +3. **Monitoring and Auditing**: Implement systems for continuous monitoring and auditing of content generated by GAI systems to ensure compliance with IP laws. This can involve using advanced tools and technologies to detect potential IP infringements. + +4. **Legal Consultation**: Engage with legal experts to understand the evolving landscape of IP law as it pertains to AI-generated content. Legal counsel can provide guidance on best practices and help in drafting policies that mitigate IP risks. + +5. **Technological Safeguards**: Use technological solutions such as watermarking, digital rights management (DRM), and other IP protection technologies to safeguard original content and detect unauthorized use. + +6. **Clear Attribution**: Ensure that any content generated by GAI systems that includes third-party material is appropriately attributed and that necessary permissions or licenses have been obtained. + +7. **Collaborative Efforts**: Work with other organizations, industry groups, and policymakers to develop and promote standards and best practices for the ethical use of GAI systems in relation to IP. + +By taking these steps, organizations can better manage the risks associated with IP infringement in the use of GAI systems.","The suggested action to address risks associated with intellectual property infringement in organizational GAI systems is to compile statistics on actual policy violations, take-down requests, and intellectual property infringement, and analyze transparency reports across demographic and language groups.",1.0,0.9478939334369315,0.0,0.0,0.22944928454758337 +What role does human-AI integration play in enhancing customer service?,[],"Human-AI integration plays a significant role in enhancing customer service by combining the strengths of both humans and artificial intelligence to provide a more efficient, responsive, and personalized customer experience. Here are some key ways this integration can improve customer service: + +1. **24/7 Availability**: AI-driven chatbots and virtual assistants can handle customer inquiries around the clock, providing immediate responses to common questions and issues, which improves accessibility and convenience for customers. + +2. **Efficiency and Speed**: AI can quickly process and analyze large amounts of data, allowing it to provide rapid responses and solutions to customer queries. This reduces wait times and increases the overall efficiency of the customer service process. + +3. **Personalization**: AI systems can leverage customer data to offer personalized recommendations and solutions. By understanding individual customer preferences and histories, AI can tailor interactions to meet specific needs, enhancing the customer experience. + +4. **Handling Routine Tasks**: AI can automate routine and repetitive tasks, such as answering frequently asked questions, processing orders, or managing reservations. This frees up human agents to focus on more complex and nuanced issues that require empathy, critical thinking, and problem-solving skills. + +5. **Data-Driven Insights**: AI can analyze customer interactions to identify trends, patterns, and areas for improvement. These insights help businesses to refine their customer service strategies and address common pain points more effectively. + +6. **Enhanced Decision-Making**: AI can assist human agents by providing them with relevant information and suggestions during customer interactions. This support helps agents make better, faster decisions and enhances the overall quality of service. + +Overall, human-AI integration leverages the speed and data-processing capabilities of AI while retaining the empathy, creativity, and problem-solving abilities of human agents, resulting in a more robust and effective customer service experience.","Human-AI integration plays a key role in enhancing customer service by allowing companies to provide faster customer care through partially automated customer service platforms. These systems help answer customer questions and compile common problems for human agents to review, while maintaining human agents to respond to complicated requests. This integration is viewed as essential for successful customer service.",0.0,0.986176886247662,0.0,0.0,0.3657697510614297 +What is the purpose of the NIST AI Risk Management Framework?,"['The National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy,' + 'The AI RMF was released in January 2023, and is intended for voluntary use and to improve the ability of organizations to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems.']","The purpose of the NIST AI Risk Management Framework (AI RMF) is to help manage the risks posed by AI to individuals, organizations, and society. It aims to incorporate trustworthiness considerationsā€”such as accuracy, explainability and interpretability, reliability, and privacyā€”into the design, development, use, and evaluation of AI products, services, and systems. The framework is intended for voluntary use and was developed through a consensus-driven, open, transparent, and collaborative process. Its goal is to foster innovative approaches to addressing the trustworthiness of AI technologies.","The purpose of the NIST AI Risk Management Framework is to help incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It aims to foster the development of innovative approaches to address characteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, robustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of harmful uses.",1.0,1.0000000000000007,1.0,0.99999999995,0.7931151311305599 +What are the different stages of the AI lifecycle where risks can arise?,[],"The AI lifecycle consists of various stages where risks can arise. These stages include: + +1. **Data Collection**: Risks can emerge from biased, incomplete, or inaccurate data, which can lead to biased or incorrect AI models. + +2. **Data Preparation**: Improper handling of data during cleaning, labeling, or augmentation can introduce errors or biases. + +3. **Model Training**: During this stage, overfitting, underfitting, and the use of inappropriate algorithms can result in poor model performance and unintended consequences. + +4. **Model Evaluation**: Inadequate evaluation metrics or testing on non-representative data can lead to an overestimation of the model's performance and robustness. + +5. **Deployment**: Risk arises from the model's behavior in real-world settings, including potential misuse, adversarial attacks, or failure to generalize to new data. + +6. **Monitoring and Maintenance**: Post-deployment, the model's performance can degrade over time due to changes in data, requiring continuous monitoring and updating. + +7. **Decommissioning**: Risks include the improper disposal of models and data, which can lead to data breaches or misuse of the retired AI systems. + +Each of these stages requires careful consideration and management to mitigate potential risks and ensure the safe and ethical use of AI.","Risks can arise during the design, development, deployment, operation, and/or decommissioning stages of the AI lifecycle.",1.0,0.9943012667624886,0.0,0.0,0.38937864600379213 +What role do technical protections play in the implementation of the Blueprint for an AI Bill of Rights?,[],"Technical protections play a crucial role in the implementation of the Blueprint for an AI Bill of Rights. They help ensure that AI systems are designed and deployed in ways that uphold the principles outlined in the Blueprint. These protections can include measures such as: + +1. **Data Privacy and Security**: Implementing robust encryption, secure data storage, and access controls to protect individuals' data from unauthorized access and breaches. + +2. **Algorithmic Fairness**: Developing and utilizing algorithms that are fair and unbiased, and regularly auditing these algorithms to ensure they do not perpetuate discrimination or inequality. + +3. **Transparency**: Ensuring that AI systems are transparent about how they make decisions, which can involve providing clear explanations and documentation of the AI's decision-making processes. + +4. **Accountability**: Setting up mechanisms for accountability, including logging and monitoring AI system activities, to ensure that any misuse or malfunction can be quickly identified and addressed. + +5. **Safety and Reliability**: Incorporating fail-safes, redundancy, and rigorous testing to ensure that AI systems operate reliably and safely under a variety of conditions. + +By embedding these technical protections into AI systems, developers and organizations can help align their AI technologies with the ethical guidelines and rights outlined in the AI Bill of Rights, thereby fostering trust and safeguarding the interests of individuals and society at large.","Technical protections and practices laid out in the Blueprint for an AI Bill of Rights help guard the American public against many potential and actual harms associated with automated systems. They provide a framework for the design, use, and deployment of these systems to protect the rights of individuals, ensuring transparency and accountability in their operation.",0.0,1.0,0.0,0.0,0.526083489616403 +What protections does the AI Bill of Rights provide against algorithmic discrimination?,['>>>\nAlgorithmic \nDiscrimination \nProtections\n>>>'],"The AI Bill of Rights provides several protections against algorithmic discrimination to ensure fairness and equity in the deployment and use of AI systems. Key protections include: + +1. **Protection from Harm:** AI systems should be designed and used in ways that do not cause or contribute to harm, including discrimination. This means that AI must be tested and monitored to ensure it does not produce biased outcomes that unfairly disadvantage individuals or groups. + +2. **Algorithmic Discrimination Protections:** AI systems must be evaluated and monitored for discriminatory impacts, and steps must be taken to mitigate any identified biases. This includes the use of diverse data sets, bias mitigation techniques, and regular audits to assess and address potential discriminatory effects. + +3. **Data Privacy:** Ensuring that personal data used in AI systems is protected and that individuals have control over their data. This helps prevent misuse of data that could lead to discriminatory practices. + +4. **Transparency and Explainability:** AI systems should be transparent and provide explanations for their decisions and outcomes. This allows for accountability and the ability to challenge or question decisions that may appear biased or unfair. + +5. **Human Alternatives, Consideration, and Fallback:** In critical areas where AI decisions can have significant impacts, individuals should have access to human review and the ability to seek recourse if they believe they have been harmed by an AI system. + +These protections aim to create a fair and just framework for the development and implementation of AI technologies, reducing the risk of discriminatory practices and ensuring that AI benefits all individuals equitably.","The AI Bill of Rights provides protections against algorithmic discrimination by ensuring that individuals should not face discrimination by algorithms. It mandates that systems should be designed and used in an equitable way, taking proactive and continuous measures to protect individuals and communities from algorithmic discrimination. This includes conducting proactive equity assessments, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing, and providing clear organizational oversight. Additionally, independent evaluation and reporting, including algorithmic impact assessments and disparity testing results, should be made public whenever possible to confirm these protections.",0.36,1.0000000000000009,0.0,0.9999999999,0.470673156613459 +What role does the 2023 Executive Order on Safe AI play in NIST's efforts to develop trustworthy artificial intelligence?,"['NIST, which has conducted both fundamental and applied work on AI for more than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy AI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to continue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and trustworthy development and use of AI.']","The 2023 Executive Order on Safe, Secure, and Trustworthy AI plays a pivotal role in guiding NIST's efforts toward the development of trustworthy artificial intelligence. This Executive Order has set in motion initiatives to ensure the safe, secure, and reliable development and use of AI technologies. To fulfill the directives of this Executive Order, NIST has established the U.S. AI Safety Institute and the AI Safety Institute Consortium. These entities are dedicated to building the scientific foundation necessary for the development of safe, secure, and trustworthy AI systems. Through these efforts, NIST aims to advance the science and practices that underpin trustworthy AI, aligning with the goals outlined in the Executive Order.","The 2023 Executive Order on Safe, Secure, and Trustworthy AI plays a significant role in NIST's efforts by guiding the establishment of the U.S. AI Safety Institute and the AI Safety Institute Consortium, which are aimed at building the necessary science for the safe, secure, and trustworthy development and use of AI.",1.0,0.9883643501544807,1.0,0.9999999999,0.9298324130692041 +What is the importance of transparency in the context of watch lists used by predictive policing systems?,[],"Transparency is crucial in the context of watch lists used by predictive policing systems for several reasons: + +1. **Accountability**: Transparency ensures that the criteria and processes used to generate watch lists are open to scrutiny. This helps in holding the creators and users of these lists accountable for their decisions and actions. + +2. **Fairness**: It allows for the identification and correction of biases in the algorithms and data used. Without transparency, there is a risk that these systems could perpetuate or even exacerbate existing inequalities and discriminatory practices. + +3. **Trust**: Public trust in law enforcement and predictive policing systems is bolstered when there is clear and accessible information about how watch lists are created and used. Lack of transparency can lead to suspicion and resistance from the community. + +4. **Ethical Compliance**: Transparency helps ensure that the systems comply with ethical standards and legal requirements. It provides a basis for evaluating whether the use of such systems respects individual rights and freedoms. + +5. **Error Correction**: When the processes are transparent, it becomes easier to identify and rectify errors in the data or the algorithms, thereby improving the overall accuracy and reliability of the predictive policing system. + +In summary, transparency in the context of watch lists used by predictive policing systems is essential for ensuring accountability, fairness, public trust, ethical compliance, and the ability to correct errors.","Transparency is important in the context of watch lists used by predictive policing systems because both police and the public deserve to understand why and how the system makes its determinations. Without transparency, individuals may be placed on a watch list without explanation, leading to a lack of accountability and understanding of the system's conclusions.",1.0,0.9855215021494069,0.0,0.0,0.8638369009669251 +What is the purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics?,[],"The purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics is to ensure that the AI systems are developed, deployed, and maintained in ways that are ethical, fair, and aligned with the needs and values of those who are most affected by them. This feedback helps to: + +1. **Identify Bias and Inequities**: By engaging with diverse groups, developers can identify and mitigate biases and inequities that might not be apparent during the initial stages of development. + +2. **Improve Relevance and Utility**: Feedback from actual users and communities ensures that the AI system meets real-world needs and is practical and beneficial in its intended context. + +3. **Enhance Trust and Transparency**: Involving end users and impacted communities in the evaluation process fosters transparency and builds trust, as these stakeholders can see and influence how decisions are made. + +4. **Ensure Accountability**: Continuous feedback loops help hold developers and organizations accountable for the impacts of their AI systems, promoting responsible and ethical AI development. + +5. **Adapt and Improve Systems**: Ongoing feedback allows for iterative improvements, ensuring that the AI systems can evolve to better serve their users and adapt to changing needs and conditions. + +By incorporating these feedback processes, AI developers can create systems that are more inclusive, ethical, and effective, ultimately leading to better outcomes for all stakeholders involved.","The purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics is to allow these groups to report problems and appeal system outcomes, ensuring that the impact of AI-generated content on different social, economic, and cultural groups is assessed and understood.",1.0,0.9999970073291631,0.0,0.0,0.8891877125905085 +What measures are suggested to ensure information integrity in the context of AI systems?,"['ā€¢ Auditing and assessment \nā€¢ Data provenance' + 'MS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic data.']","To ensure information integrity in the context of AI systems, several measures are recommended: + +1. **Auditing and Assessment**: Regular audits and assessments of AI systems help identify and mitigate potential risks, ensuring that the system operates as intended and maintains the integrity of the data it processes. + +2. **Data Provenance**: Tracking the origin and history of data used in AI systems is crucial. Understanding where data comes from and how it has been processed helps maintain its integrity and ensures that it is reliable and accurate. + +3. **Verification of Deduplication**: Specifically for Generative AI (GAI) systems, it is important to verify the deduplication of training data samples. This is particularly true for synthetic data, where redundancy can lead to biases and inaccuracies in the AI model. Ensuring that duplicate data is identified and removed helps maintain the quality and integrity of the training dataset. + +Implementing these measures can significantly enhance the reliability and trustworthiness of AI systems by maintaining the integrity of the information they use and produce.","Suggested measures to ensure information integrity in the context of AI systems include employing methods to trace the origin and modifications of digital content, integrating tools designed to analyze content provenance and detect data anomalies, verifying the authenticity of digital signatures, and identifying patterns associated with misinformation or manipulation. Additionally, it is recommended to disaggregate evaluation metrics by demographic factors to identify discrepancies in how content provenance mechanisms work across diverse populations.",0.125,0.9924111926067477,0.4,0.9999999999,0.6384378869408196 +What are the limitations of current pre-deployment testing approaches for GAI applications?,[],"Current pre-deployment testing approaches for General AI (GAI) applications face several limitations, including: + +1. **Scalability**: Testing complex AI systems comprehensively can be extremely resource-intensive and time-consuming, making it difficult to cover all possible scenarios and edge cases. + +2. **Unpredictability**: GAI systems are often more unpredictable than narrow AI systems because they can operate across a broader range of tasks and environments. This makes it challenging to anticipate and test for all potential behaviors. + +3. **Lack of Real-world Data**: Pre-deployment testing usually relies on simulated environments or historical data, which may not accurately represent real-world conditions and dynamics. This can lead to a gap between tested performance and actual performance in the field. + +4. **Emergent Behaviors**: GAI systems might exhibit emergent behaviors that were not explicitly programmed or foreseen during testing. Detecting and mitigating these behaviors pre-deployment is a significant challenge. + +5. **Ethical and Safety Concerns**: Some potential behaviors and impacts of GAI systems may be difficult to test ethically or safely in a pre-deployment setting. For example, testing for harmful outcomes or adversarial attacks in real-world scenarios can pose risks. + +6. **Adaptability and Learning**: GAI systems that can learn and adapt over time might change their behavior post-deployment in ways that were not evident during initial testing phases. This continuous evolution makes static pre-deployment testing insufficient. + +7. **Interdisciplinary Complexity**: Ensuring the safety and robustness of GAI systems often requires expertise from multiple disciplines, including computer science, ethics, psychology, and more. Integrating these perspectives into a coherent testing framework is challenging. + +8. **Regulatory and Standardization Gaps**: There are often insufficient regulatory guidelines and standardized testing protocols for GAI, making it difficult to ensure consistent and comprehensive safety evaluations. + +Addressing these limitations requires ongoing research, cross-disciplinary collaboration, and possibly new paradigms in AI safety and testing methodologies.","Current pre-deployment TEVV processes used for GAI applications may be inadequate, non-systematically applied, or fail to reflect or be mismatched to deployment contexts. Anecdotal testing of GAI system capabilities through video games or standardized tests designed for humans does not guarantee GAI system validity or reliability. Additionally, jailbreaking or prompt engineering tests may not systematically assess validity or reliability risks. Measurement gaps can arise from mismatches between laboratory and real-world settings, and current testing approaches often remain focused on laboratory conditions or restricted to benchmark test datasets that may not extrapolate well to real-world conditions.",1.0,0.9461171924758212,0.0,0.0,0.7804607612079594 +What measures are suggested to ensure effective human-AI configuration in the context of GAI systems?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","Effective human-AI configuration in the context of GAI (General Artificial Intelligence) systems can be ensured through several measures, including: + +1. **Documentation Practices**: Keeping thorough logs, records, and analyses of AI incidents are crucial. This facilitates smoother information sharing with relevant AI actors and stakeholders. + +2. **Information Sharing**: Regularly sharing information about AI system changes, updates, and incidents helps in maintaining transparency and preparedness. + +3. **Change Management Records**: Maintaining detailed records of any changes made to the AI system ensures that all modifications are tracked and can be reviewed if necessary. + +4. **Version History and Metadata**: Keeping a comprehensive version history and associated metadata for the AI system allows for better tracking of system evolution and aids in incident response and management. + +These measures collectively help in creating a robust framework for managing and responding to any issues that arise in GAI systems, ensuring that human oversight and intervention are effective and well-informed.",The suggested measures to ensure effective human-AI configuration in the context of GAI systems include documenting the instructions given to data annotators or AI red-teamers (MS-2.8-002) and verifying the adequacy of GAI system user instructions through user testing (MS-2.8-004).,0.7,0.9915011723961432,0.0,0.9999999999,0.33781167614009094 +What issues does the automated sentiment analyzer address regarding bias in online statements?,[],"The automated sentiment analyzer addresses several issues regarding bias in online statements: + +1. **Detection and Mitigation of Bias:** Automated sentiment analyzers can help identify and mitigate biases in online statements by analyzing the tone, emotion, and sentiment expressed in the text. This can be useful in understanding and addressing any underlying prejudices or discriminatory language. + +2. **Consistency and Objectivity:** Unlike human reviewers, automated systems can provide a more consistent and objective analysis of sentiment. This reduces the likelihood of subjective biases that might come from individual human interpretations. + +3. **Scalability:** Automated systems can process large amounts of data quickly, making it feasible to analyze sentiment across vast datasets, such as social media platforms, forums, and other online communities. This broad analysis can help identify widespread biases and trends that might not be visible through manual review. + +4. **Real-time Monitoring:** Automated sentiment analyzers can provide real-time monitoring of online statements, allowing for the immediate identification of harmful bias and enabling quicker responses to address and correct such biases. + +Overall, while automated sentiment analyzers are not perfect and can themselves be subject to biases based on their training data, they offer valuable tools for identifying and addressing bias in online statements at scale.","The automated sentiment analyzer addresses bias in online statements by identifying that it was found to be biased against Jews and gay people. For instance, it marked the statement 'Iā€™m a Jew' as negative while identifying 'Iā€™m a Christian' as positive. This bias could lead to the preemptive blocking of social media comments such as 'Iā€™m gay.'",0.0,1.0000000000000002,0.0,0.0,0.43635137122829337 +What are the expectations for automated systems regarding safety and effectiveness?,"['Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.']","The expectations for automated systems regarding safety and effectiveness include independent evaluation and reporting that confirms the system is both safe and effective. Additionally, it is important that the steps taken to mitigate potential harms are documented and made public whenever possible. This ensures transparency and accountability in the deployment and use of automated systems.","The expectations for automated systems regarding safety and effectiveness include the need for independent evaluation, where evaluators should have access to the system and associated data to perform evaluations. Additionally, entities responsible for automated systems should provide regularly-updated reports that cover an overview of the system, data used for training, risk management assessments, performance testing results, and ongoing monitoring procedures. These reports should be presented in plain language and a machine-readable format.",0.6666666666666666,1.0,0.25,0.9999999999,0.6330636755109516 +What criteria are used to measure AI system performance or assurance in deployment settings?,"['the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","In deployment settings, AI system performance or assurance is typically measured using several criteria, which include: + +1. **Accuracy**: How correctly the AI system performs its tasks or makes predictions. +2. **Differential Demographic Impact**: How the AI system's performance affects different demographic groups, ensuring fairness and equity. +3. **Resulting Error Rates**: The frequency of incorrect outputs, both overall and broken down by demographic groups. +4. **Comparisons to Previously Deployed Systems**: Evaluating the AI system's performance in relation to systems that were previously deployed to understand improvements or regressions. +5. **Ongoing Monitoring Procedures**: Regular and systematic tracking of the AI system's performance over time. +6. **Regular Performance Testing Reports**: Documented outcomes of performance tests conducted periodically. +7. **Monitoring Frequency**: How often the AI system is evaluated and tested after deployment to ensure it continues to perform as expected. + +These criteria ensure that the AI system is not only accurate and effective but also fair and continuously reliable throughout its deployment.",AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for conditions similar to deployment setting(s). Measures are documented.,0.75,0.9835542845035684,0.0,0.9999999999,0.2874085614410775 +What threat does automatic signature verification software pose to U.S. voters?,[],"Automatic signature verification software can pose several potential threats to U.S. voters, including: + +1. **Accuracy Concerns**: If the software is not sufficiently accurate, it might incorrectly reject valid signatures or accept invalid ones. This can lead to legitimate votes being discarded or fraudulent ones being counted. + +2. **Bias**: The software may have inherent biases that disproportionately affect certain groups of voters. For example, individuals with disabilities, older adults, or those with less consistent signatures might be unfairly penalized. + +3. **Transparency**: Voters might not have insight into how their signatures are being evaluated, leading to a lack of transparency and trust in the electoral process. + +4. **Security**: If the software is not properly secured, it could be vulnerable to hacking or tampering, which could compromise the integrity of the voting process. + +5. **Legal and Ethical Issues**: There could be legal challenges regarding the use of such software, particularly if it leads to significant numbers of votes being rejected or accepted improperly. + +Overall, while automatic signature verification software can streamline the voting process, it is crucial to address these potential threats to ensure a fair and secure election.",Automatic signature verification software threatens to disenfranchise U.S. voters.,0.0,0.9869330631800645,0.0,0.0,0.37120119416003855 +What measures are being taken to ensure equitable design in automated systems to protect against algorithmic discrimination?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.' + 'The broad set of measures assessed should include demographic performance mea\xad sures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity assessment should be separated from data used for the automated system and privacy protections should be instituted; in some cases it may make sense to perform such assessment using a data sample. For every instance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xadfied groups, the entity governing, implementing, or using the system should document the disparity and a justification for any continued use of the system.']","To ensure equitable design in automated systems and protect against algorithmic discrimination, several measures are being implemented: + +1. **Algorithmic Impact Assessment**: This involves evaluating and reporting the impact of algorithms, including disparity testing results and mitigation information, to confirm protections against discrimination. These assessments should be made public whenever possible. + +2. **Demographic Performance Measures**: A broad set of measures is assessed, including overall and subgroup parity assessment, and calibration to ensure fairness across different demographic groups. + +3. **Data Separation and Privacy Protections**: Demographic data collected for the purpose of disparity assessment should be kept separate from the data used by the automated system. Privacy protections should be instituted to safeguard this information. + +4. **Sample-based Assessment**: In some cases, it may be sensible to perform disparity assessments using a data sample to ensure fairness without compromising the system's overall functionality. + +5. **Documentation and Justification**: For every instance where the deployed automated system results in different treatment or impacts that disfavor identified groups, the governing, implementing, or using entity should document the disparity and provide a justification for any continued use of the system. + +These measures aim to identify, assess, and mitigate potential biases in automated systems to promote fairness and equity across all user groups.","Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to changes or the prevention of product launches to avoid public harm. Federal government agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have created best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.",0.8125,0.9710047057468804,0.0,0.99999999995,0.7479198459943456 +What factors should be considered to ensure information integrity in the context of GAI risk management?,"['ā€¢ Data provenance' + 'Assess the accuracy, quality, reliability, and authenticity of GAI output by comparing it to a set of known ground truth data and by using a variety of evaluation methods (e.g., human oversight and automated evaluation, proven cryptographic techniques, review of content inputs).']","To ensure information integrity in the context of Generative AI (GAI) risk management, the following factors should be considered: + +1. **Accuracy**: Assess the correctness of the GAI output by comparing it to known ground truth data. This helps in identifying and correcting any discrepancies. + +2. **Quality**: Evaluate the overall quality of the information generated by GAI. This can include aspects like coherence, relevance, and completeness. + +3. **Reliability**: Ensure that the GAI system consistently produces accurate and high-quality information over time and under various conditions. + +4. **Authenticity**: Verify the authenticity of the GAI output. This can involve using proven cryptographic techniques to ensure the data has not been tampered with and reviewing the sources of content inputs. + +5. **Human Oversight**: Implement human review processes to cross-check and validate the information produced by GAI. Human oversight acts as a safety net to catch errors that automated systems might miss. + +6. **Automated Evaluation**: Use automated tools and methods to continuously monitor and evaluate the performance of GAI systems. This includes employing algorithms that can detect anomalies or inconsistencies in the output. + +7. **Provenance**: Track the data provenance to understand the origins and transformation of data throughout its lifecycle. Knowing where the data came from and how it has been processed helps in assessing its trustworthiness. + +By considering these factors, organizations can better manage the risks associated with the use of GAI and ensure the integrity of the information it generates.","Factors to consider to ensure information integrity in the context of GAI risk management include abuses and impacts to information integrity, dependencies between GAI and other IT or data systems, harm to fundamental rights or public safety, presentation of obscene, objectionable, offensive, discriminatory, invalid or untruthful output, psychological impacts to humans, possibility for malicious use, introduction of significant new security vulnerabilities, anticipated system impact on some groups compared to others, and unreliable decision-making capabilities.",0.7,0.9438960397627678,0.0,0.49999999995,0.8160816887409181 +What are the reasons for implementing enhanced data protections in sensitive domains?,"['ā€œSensitive domainsā€ are those in which activities being conducted can cause material harms, including signifi\xad cant adverse effects on human rights such as autonomy and dignity, as well as civil liberties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections or where such enhanced protections are reasonably expected by the public include, but are not limited to, health, family planning and care, employment, education, criminal justice, and personal finance.' + 'SENSITIVE DOMAINS: ā€œSensitive domainsā€ are those in which activities being conducted can cause material \nharms, including significant adverse effects on human rights such as autonomy and dignity, as well as civil liber\xad\nties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections \nor where such enhanced protections are reasonably expected by the public include, but are not limited to, \nhealth, family planning and care, employment, education, criminal justice, and personal finance. In the context \nof this framework, such domains are considered sensitive whether or not the specifics of a system context \nwould necessitate coverage under existing law, and domains and data that are considered sensitive are under\xad\nstood to change over time based on societal norms and context.' + 'Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship history and legal status such as custody and divorce information, and home, work, or school environmental data); or have the reasonable potential to be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm']","Enhanced data protections are implemented in sensitive domains for several key reasons: + +1. **Prevention of Material Harms**: Activities in sensitive domains can cause significant adverse effects on human rights, including autonomy and dignity, as well as civil liberties and civil rights. Enhanced protections help mitigate these potential harms. + +2. **Protection of Human Rights**: Sensitive domains often involve data that can impact fundamental human rights. Ensuring enhanced protections helps safeguard these rights from infringement. + +3. **Expectation of Privacy**: The public reasonably expects higher levels of data protection in certain areas, such as health, personal finance, and criminal justice. Enhanced protections align with these societal expectations. + +4. **Historical Precedent**: Domains such as health, employment, and education have historically been recognized as requiring greater data protection due to the potential for significant personal and societal harm. + +5. **Dynamic Sensitivity**: What is considered sensitive can change over time based on societal norms and context. Enhanced protections provide a flexible framework that can adapt to evolving perceptions of sensitivity and privacy. + +6. **Prevention of Inference and Misuse**: Data and metadata in sensitive domains can be used to infer other sensitive information about individuals. Enhanced protections help prevent misuse that could lead to loss of privacy, financial harm, or other meaningful harms. + +Overall, the goal of enhanced data protections in sensitive domains is to provide a higher level of security and privacy to safeguard individuals from potential risks and harms associated with the misuse of their personal information.","Enhanced data protections in sensitive domains are implemented due to the intimate nature of these domains, the inability of individuals to opt out meaningfully, and the historical discrimination that has often accompanied data knowledge. Additionally, the protections afforded by current legal guidelines may be inadequate given the misuse of tracking technologies and the extensive data footprints individuals leave behind. The American public deserves assurances that data related to sensitive domains is protected and used appropriately, only in narrowly defined contexts with clear benefits to individuals and society.",0.6666666666666666,1.0000000000000007,0.0,0.9999999999666667,0.6365725019182954 +What are some of the potential harms associated with automated systems?,"['Many of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public.' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible.' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","Automated systems can pose several potential harms, including: + +1. **Inappropriate or Irrelevant Data Use**: Automated systems can use data in ways that are not suitable or relevant, leading to privacy violations and the misuse of personal information. + +2. **Replication of Unjust Outcomes**: Systems may perpetuate or amplify existing biases and injustices if they rely on flawed or biased data, resulting in discriminatory outcomes. + +3. **Confirmation Bias**: There is a risk that people might defer to automated systems without critical evaluation, thereby reinforcing potentially inaccurate decisions. + +4. **Lack of Individualization**: Automated systems often lack the capacity to consider individual circumstances, which can be crucial for fair decision-making. This can lead to severe consequences for individuals, such as loss of jobs or custody of children. + +5. **Surveillance and Chilling Effects**: The use of automated systems for surveillance can lead to a chilling effect, where individuals change their behavior due to the perception of being constantly watched. This can also send negative signals to communities about how they are viewed and treated. + +6. **Loss of Jobs**: Automation can lead to job displacement, affecting individuals' livelihoods and leading to economic and social challenges. + +These harms highlight the need for careful design, development, deployment, and ongoing monitoring of automated systems to mitigate potential negative impacts.","Some potential harms associated with automated systems include: reliance on unproven technologies that may not work as intended, causing substantial and unjustified harm; the use of historical data that can lead to irrelevant information affecting decision-making; technologies designed to violate safety, such as those facilitating stalking; unintended harms from intended or unintended uses; and issues like alert fatigue from false alerts, as seen in a sepsis prediction model. Additionally, automated moderation systems may fail to distinguish between counter-speech and hateful messages, silencing critics.",1.0,0.9935824708280475,0.5,0.9999999999666667,0.3707390644854311 +What is the significance of human-AI configuration in managing GAI risks and ensuring information integrity?,"['Human-AI Conļ¬guration; \nInformation Integrity' + 'Humans bring their unique perspectives, experiences, or domain-speciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and how they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus deprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may over-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that produced by other sources. This phenomenon is an example of automation bias, or excessive deference to automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation or risks of bias or homogenization.']","Human-AI configuration plays a crucial role in managing the risks associated with General Artificial Intelligence (GAI) and ensuring the integrity of information produced or processed by these systems. Here are some key points highlighting its significance: + +1. **Balancing Expertise and Trust**: + - **Human Expertise**: Humans bring unique perspectives, experiences, and domain-specific expertise to the interaction with AI systems. This human input is essential for validating and contextualizing the outputs of GAI systems. + - **Understanding Limitations**: Many human experts may lack detailed knowledge of how AI systems work, which can lead to either undue skepticism or over-reliance on these systems. A well-configured human-AI interaction helps balance this by ensuring that humans are informed about the capabilities and limitations of GAI. + +2. **Preventing Automation Bias**: + - **Over-Reliance**: Due to the complexity and perceived reliability of GAI technology, humans might over-rely on these systems, assuming their output is always of high quality. This phenomenon, known as automation bias, can lead to uncritical acceptance of AI-generated information. + - **Mitigating Risks**: Proper human-AI configuration can help mitigate automation bias by ensuring that humans remain critical evaluators of AI outputs, rather than passive recipients. + +3. **Ensuring Information Integrity**: + - **Quality Control**: Humans can provide a layer of quality control, cross-referencing AI-generated information with other sources and their own expertise to ensure its accuracy and relevance. + - **Bias and Homogenization**: GAI systems can sometimes introduce bias or homogenize information. Human intervention can help detect and correct these issues, maintaining diverse and unbiased information streams. + +4. **Risk Management**: + - **Confabulation**: GAI systems can produce plausible but incorrect information. Human oversight is necessary to identify and correct such errors, ensuring that the information used for decision-making is reliable. + - **Ethical Considerations**: Humans can apply ethical principles and contextual understanding that GAI systems may lack, preventing the use of AI in ways that might be harmful or unjust. + +In summary, the human-AI configuration is significant for managing GAI risks and ensuring information integrity by leveraging human expertise, preventing automation bias, ensuring quality control, and managing ethical and contextual considerations.","The significance of human-AI configuration in managing GAI risks and ensuring information integrity lies in its role in evaluating content lineage and origin, adapting training programs for digital content transparency, developing certification programs for managing GAI risks, delineating human proficiency tests from GAI capabilities, and implementing systems to monitor and track outcomes of human-GAI configurations for future improvements. Involving end-users, practitioners, and operators in prototyping and testing activities is also crucial, especially in various scenarios including crisis situations or ethically sensitive contexts.",0.6923076923076923,0.9390165526981417,0.0,0.49999999995,0.40639866290124 +What are the key oversight functions involved in the GAI lifecycle?,['ā€¢ Auditing and assessment'],"The key oversight functions involved in the General Artificial Intelligence (GAI) lifecycle typically include: + +1. **Auditing and Assessment**: Regularly evaluating the AI systems to ensure they meet safety, ethical, and performance standards. This can involve both internal and external audits. + +2. **Monitoring and Reporting**: Continuously monitoring the AI systems for compliance with predefined metrics and promptly reporting any deviations or anomalies. This ensures ongoing oversight and helps in identifying potential risks early. + +3. **Regulatory Compliance**: Ensuring that the AI systems comply with all relevant laws, regulations, and industry standards. This includes data privacy laws, ethical guidelines, and other applicable regulations. + +4. **Risk Management**: Identifying, assessing, and mitigating risks associated with the deployment and use of AI systems. This involves putting in place strategies to manage both known and unknown risks. + +5. **Ethical Review**: Regularly reviewing the AI systems to ensure they align with ethical standards and societal values. This can include considerations around fairness, bias, transparency, and accountability. + +6. **Stakeholder Engagement**: Involving a diverse range of stakeholders in the oversight process to ensure that multiple perspectives are considered. This can include users, impacted communities, experts, and regulatory bodies. + +7. **Security Management**: Ensuring that AI systems are secure from cyber threats and malicious attacks. This involves implementing robust security measures and regularly updating them to address new vulnerabilities. + +Each of these functions plays a critical role in ensuring that GAI systems are safe, ethical, and effective throughout their lifecycle.","The key oversight functions involved in the GAI lifecycle include senior leadership, legal, compliance, and internal evaluation.",0.02631578947368421,0.9611086617504818,0.0,0.0,0.22699205913205736 +What is the purpose of the AI Safety Institute established by NIST?,"['NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to continue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and trustworthy development and use of AI.']","The purpose of the AI Safety Institute established by NIST is to build the science necessary for the safe, secure, and trustworthy development and use of AI. This institute, along with the AI Safety Institute Consortium, aims to continue efforts to ensure that AI technologies are developed and utilized in ways that are safe and reliable.","The purpose of the AI Safety Institute established by NIST is to continue efforts to build the science necessary for safe, secure, and trustworthy development and use of artificial intelligence (AI), in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",0.6666666666666666,1.0,1.0,0.9999999999,0.7461228068321668 +What is the purpose of using structured feedback mechanisms in relation to AI-generated content?,['Evaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input.'],"The purpose of using structured feedback mechanisms in relation to AI-generated content is to identify and mitigate potential biases and stereotypes that the AI may produce. By systematically gathering and analyzing feedback from a diverse range of users, developers can better understand how the AI's outputs are perceived and where they may fall short. This feedback can then be used to refine the AI models, ensuring they generate more accurate, fair, and unbiased content. Structured feedback mechanisms help in providing a consistent and organized way to evaluate the AI's performance and make necessary adjustments to improve its safety and reliability.",The purpose of using structured feedback mechanisms in relation to AI-generated content is to solicit and capture user input about the content to detect subtle shifts in quality or alignment with community and societal values.,0.07692307692307693,1.000000000000001,0.0,0.9999999999,0.8757469864689666 +What measures are suggested to mitigate risks related to harmful bias in generative AI systems?,[],"To mitigate risks related to harmful bias in generative AI systems, several measures can be suggested: + +1. **Diverse and Representative Data**: Ensure that the training data is diverse and representative of all groups to minimize biases that stem from unbalanced datasets. + +2. **Bias Audits**: Regularly conduct bias audits and assessments to identify and address biases in the AI system. + +3. **Fairness Constraints**: Implement fairness constraints and algorithms that can help ensure equitable outcomes across different demographic groups. + +4. **Transparency and Explainability**: Increase transparency and explainability of AI models so that biases can be more easily identified and understood. + +5. **Human Oversight**: Incorporate human oversight in the decision-making process to catch and correct biased outputs. + +6. **Continuous Monitoring and Updating**: Continuously monitor the AI system after deployment and update it as new biases are discovered or as societal norms evolve. + +7. **Ethical Guidelines and Policies**: Develop and adhere to ethical guidelines and policies that prioritize fairness and non-discrimination. + +8. **Cross-disciplinary Collaboration**: Engage with experts from diverse fields such as ethics, law, and social sciences to better understand and address potential biases. + +These measures can help create more equitable and fair generative AI systems by addressing biases at multiple levels of the development and deployment process.","To mitigate risks related to harmful bias in generative AI systems, the suggested measures include applying explainable AI (XAI) techniques as part of ongoing continuous improvement processes, documenting how pre-trained models have been adapted for specific generative tasks, and documenting sources and types of training data along with potential biases present in the data.",0.0,0.993178251996626,0.0,0.0,0.3008500821386147 +What are the implications of bias and discrimination in automated systems on the rights of the American public?,"['Among the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent.' + ""the impact of these systems on individuals and communities is potentially severe: the systems lack individualization and work against the belief that people can change for the better, system use can lead to the loss of jobs and custody of children, and surveillance can lead to chilling effects for communities and sends negative signals to community members about how they're viewed.""]","Bias and discrimination in automated systems have significant implications for the rights of the American public. These implications include: + +1. **Limiting Opportunities:** Automated systems used in hiring and credit decisions can reflect and reproduce existing inequities or embed new harmful biases. This can prevent individuals from accessing job opportunities, loans, and other critical resources, thereby limiting their economic and social mobility. + +2. **Threatening Privacy:** Unchecked data collection by social media and other platforms can undermine individuals' privacy. People are often tracked pervasively without their knowledge or consent, leading to potential misuse of their personal information. + +3. **Loss of Critical Resources and Services:** Systems that are biased or discriminatory can lead to individuals being unfairly denied access to essential services such as healthcare. For example, biased algorithms in patient care can result in unsafe or ineffective treatment for certain groups. + +4. **Erosion of Trust:** The use of biased and discriminatory systems can lead to a loss of trust in technology and institutions. People may feel unfairly judged and surveilled, creating a chilling effect where individuals alter their behavior due to the fear of being monitored or unfairly treated. + +5. **Negative Social Signals:** Surveillance and biased system use can send negative signals to communities about how they are viewed by society, potentially fostering feelings of disenfranchisement and marginalization. + +Overall, the implications of bias and discrimination in automated systems are profound, as they can exacerbate existing social injustices, undermine democratic principles, and harm the social fabric of communities.","The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting or reproducing existing unwanted inequities. These outcomes can threaten people's opportunities, undermine their privacy, and lead to pervasive tracking of their activity, often without their knowledge or consent.",1.0,0.9994649819896518,1.0,0.99999999995,0.9916023599691439 +What are the main principles outlined in the AI Bill of Rights and how do they aim to protect the rights of the American public?,"['To advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values.']","The AI Bill of Rights, as outlined by the White House Office of Science and Technology Policy, includes five main principles designed to protect the rights of the American public in the context of artificial intelligence. These principles aim to ensure that automated systems are designed, used, and deployed in ways that safeguard the public and uphold societal values. The principles are as follows: + +1. **Safe and Effective Systems:** Automated systems should be designed to be safe and effective, ensuring that they do not cause harm to individuals and that they function as intended. + +2. **Algorithmic Discrimination Protections:** These protections aim to prevent and mitigate discrimination that can arise from biased algorithms. This principle ensures that automated systems do not perpetuate or exacerbate existing biases and inequalities. + +3. **Data Privacy:** Individuals' data should be protected, and automated systems should be designed to prioritize privacy. This includes ensuring that data collection and usage are transparent and that individuals have control over their personal information. + +4. **Notice and Explanation:** People should be informed when an automated system is being used and should be provided with understandable explanations about how decisions are made by these systems. This principle emphasizes transparency and accountability in the deployment of AI technologies. + +5. **Human Alternatives, Consideration, and Fallback:** There should be alternative options available for individuals to engage with, and human oversight should be present to review and address concerns or errors that arise from automated systems. This ensures that people are not solely dependent on automated systems and that there are mechanisms in place for human intervention when necessary. + +These principles collectively aim to protect the American public by ensuring that AI technologies are developed and used in ways that are safe, equitable, transparent, and accountable, thereby reinforcing the highest values of society.","The main principles outlined in the AI Bill of Rights are not explicitly listed in the provided context. However, the context discusses the Blueprint for an AI Bill of Rights, which consists of five principles aimed at guiding the design, use, and deployment of automated systems to protect the rights of the American public. It emphasizes the importance of technical protections and practices to guard against potential harms and outlines expectations for automated systems, including transparency and reporting.",0.23333333333333334,0.9261068416433295,0.6666666666666666,0.9999999999,0.5747555085321088 +What measures are suggested to assess the environmental impact of AI model training and management activities?,[],"To assess the environmental impact of AI model training and management activities, several measures are suggested. These include: + +1. **Energy Consumption Tracking**: Monitoring and recording the amount of electricity used during the training and operation of AI models. This can be done using tools that measure energy usage at the hardware level or through software solutions that estimate the energy consumption based on the computational resources used. + +2. **Carbon Footprint Analysis**: Calculating the carbon emissions associated with the energy consumption. This involves understanding the sources of the electricity (e.g., renewable vs. non-renewable) and converting the energy usage into equivalent carbon emissions. + +3. **Efficiency Metrics**: Developing and using metrics to assess the efficiency of AI models. This includes measuring the trade-off between the performance of the model (e.g., accuracy) and the resources consumed (e.g., compute time, memory). + +4. **Lifecycle Assessment (LCA)**: Conducting a comprehensive lifecycle assessment that considers the environmental impact at every stage of the AI model's lifecycle, from data collection and preprocessing to training, deployment, and maintenance. + +5. **Sustainable Practices**: Implementing practices that reduce environmental impact, such as optimizing algorithms to be more efficient, using more energy-efficient hardware, and selecting data centers that are powered by renewable energy sources. + +By integrating these measures, organizations can better understand and mitigate the environmental impact of their AI activities.","The suggested measures to assess the environmental impact of AI model training and management activities include: 1) Assessing safety to physical environments when deploying GAI systems, 2) Documenting anticipated environmental impacts of model development, maintenance, and deployment in product design decisions, 3) Measuring or estimating environmental impacts such as energy and water consumption for training, fine-tuning, and deploying models, and verifying trade-offs between resources used at inference time versus additional resources required at training time, and 4) Verifying the effectiveness of carbon capture or offset programs for GAI training and applications, while addressing green-washing concerns.",1.0,1.0000000000000004,0.0,0.0,0.23560039571248534 +What role do fraud detection algorithms play in the adjudication of benefits and penalties?,"['Systems related to access to benefits or services or assignment of penalties such as systems that support decision-makers who adjudicate benefits such as collating or analyzing information or matching records, systems which similarly assist in the adjudication of administrative or criminal penalties, fraud detection algorithms, services or benefits access control algorithms, biometric systems used as access control, and systems which make benefits or services related decisions on a fully or partially autonomous basis (such as a determination to revoke benefits).']","Fraud detection algorithms play a crucial role in the adjudication of benefits and penalties by identifying potentially fraudulent activities. These algorithms analyze patterns and behaviors in the data to detect anomalies that might indicate fraud. This can include: + +1. **Collating or Analyzing Information**: Fraud detection algorithms process large volumes of data to identify inconsistencies or patterns that suggest fraudulent activity. This helps decision-makers to focus on suspicious cases that require further investigation. + +2. **Matching Records**: They assist in matching records across different databases to ensure that individuals are not receiving benefits or services they are not entitled to. For example, they can detect when someone is attempting to claim benefits under multiple identities. + +3. **Assisting Adjudication**: By flagging suspicious cases, these algorithms help administrative or criminal adjudicators make more informed decisions regarding the allocation or revocation of benefits and the assignment of penalties. + +4. **Access Control**: Biometric systems and other access control algorithms can prevent unauthorized access to benefits and services, thereby reducing opportunities for fraud. + +In summary, fraud detection algorithms enhance the efficiency and accuracy of the adjudication process by providing critical insights and automated checks that help prevent and penalize fraudulent activities.",Fraud detection algorithms assist in the adjudication of benefits and penalties by analyzing information and matching records to support decision-makers.,0.9444444444444444,1.0000000000000007,1.0,0.9999999999,0.4229766857375674 +What role does community participation play in the design of technology for democratic values?,"['receiving community input into the design and use of technologies' + 'people throughout the United States, public servants across Federal agencies, and members of the international community spoke up about both the promises and potential harms of these technologies, and played a central role in shaping the Blueprint for an AI Bill of Rights.']","Community participation plays a crucial role in the design of technology for democratic values. When people from diverse backgrounds, including citizens, public servants, and members of the international community, provide input, it helps ensure that the technology developed aligns with the needs, values, and rights of the society it serves. This inclusive approach can highlight both the promises and potential harms of new technologies, thereby contributing to more ethical, equitable, and effective technological solutions. For example, community input was central in shaping the Blueprint for an AI Bill of Rights, which aims to safeguard democratic values in the use and development of AI technologies.","Community participation plays a crucial role in the design of technology for democratic values by emphasizing human-computer interaction that involves the community, ensuring that the technology is non-discriminatory, explainable, and privacy-aware. Engaging with impacted communities helps to understand the potential harms of technologies and build protection by design into future systems.",0.2857142857142857,1.000000000000001,0.3333333333333333,0.99999999995,0.8293995380986372 +What are the policies and procedures related to human-AI configuration in the oversight of AI systems?,[],"The specific policies and procedures related to human-AI configuration in the oversight of AI systems can vary significantly depending on the organization and the regulatory environment they operate in. However, here are some common elements that are typically included: + +1. **Clear Role Definitions**: Establishing clear roles and responsibilities for both humans and AI systems. This includes specifying what decisions AI can make autonomously and what requires human intervention. + +2. **Transparency and Explainability**: Ensuring that AI systems are transparent and their decision-making processes can be explained to human overseers. This helps in understanding how decisions are made and identifying any biases or errors. + +3. **Human-in-the-Loop (HITL) Systems**: Implementing HITL systems where human oversight is integrated into the AI decision-making process. This could involve humans approving or disapproving AI decisions or providing feedback to improve system performance. + +4. **Ethical Guidelines**: Adhering to ethical guidelines that dictate how AI systems should be developed and used. This includes considerations around privacy, fairness, and accountability. + +5. **Risk Management**: Conducting risk assessments to identify potential risks associated with AI systems and developing mitigation strategies. This includes continuous monitoring and updating of risk management plans. + +6. **Training and Education**: Providing training and education for all stakeholders involved in the oversight of AI systems. This ensures that everyone understands how the AI system works and what their role is in its oversight. + +7. **Compliance and Auditing**: Establishing procedures for regular auditing and compliance checks to ensure that AI systems adhere to relevant laws, regulations, and internal policies. + +8. **Incident Response Plans**: Developing and maintaining incident response plans to address any issues or failures in AI systems quickly and effectively. + +9. **Feedback Mechanisms**: Implementing feedback mechanisms to continuously improve AI systems based on human oversight and real-world performance. + +These elements help ensure that AI systems are used responsibly and effectively, with appropriate human oversight to mitigate risks and enhance their benefits.","Policies and procedures are in place to define and differentiate roles and responsibilities for human-AI configurations and oversight of AI systems. This includes establishing acceptable use policies for GAI interfaces, modalities, and human-AI configurations, as well as defining criteria for the kinds of queries GAI applications should refuse to respond to.",1.0,0.0,0.0,0.0,0.3065417592822528 +What is the purpose of the AI Risk Management Framework for Generative AI?,"['This document is a cross-sectoral proļ¬le of and companion resource for the AI Risk Management \nFramework (AI RMF 1.0) for Generative AI,1 pursuant to President Bidenā€™s Executive Order (EO) 14110 on \nSafe, Secure, and Trustworthy Artiļ¬cial Intelligence.2 The AI RMF was released in January 2023, and is \nintended for voluntary use and to improve the ability of organizations to incorporate trustworthiness \nconsiderations into the design, development, use, and evaluation of AI products, services, and systems. \nA proļ¬le is an implementation of the AI RMF functions, categories, and subcategories for a speciļ¬c \nsetting, application, or technology ā€“ in this case, Generative AI (GAI) ā€“ based on the requirements, risk \ntolerance, and resources of the Framework user. AI RMF proļ¬les assist organizations in deciding how to \nbest manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory' + 'This document defines risks that are novel to or exacerbated by the use of GAI. After introducing and describing these risks, the document provides a set of suggested actions to help organizations govern, map, measure, and manage these risks.']","The purpose of the AI Risk Management Framework (AI RMF) for Generative AI is to help organizations improve their ability to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It aims to assist organizations in managing AI risks in a way that aligns with their goals and considers legal and regulatory requirements. The framework provides a structured approach to govern, map, measure, and manage risks that are novel to or exacerbated by the use of Generative AI.","The purpose of the AI Risk Management Framework (AI RMF) for Generative AI is to improve the ability of organizations to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It assists organizations in deciding how to best manage AI risks in alignment with their goals, legal/regulatory requirements, and best practices.",0.625,1.0000000000000007,1.0,0.99999999995,0.9365460111387904 +What does the term 'underserved communities' refer to in the context of the AI Bill of Rights?,[],"In the context of the AI Bill of Rights, the term 'underserved communities' refers to groups of people who have historically been marginalized or disadvantaged and thus have had less access to resources, opportunities, or services. These communities may include racial and ethnic minorities, low-income populations, people with disabilities, rural communities, and other groups that have been systematically excluded or face significant barriers to accessing benefits and opportunities. The AI Bill of Rights aims to ensure that these communities are protected and that AI systems are used in ways that promote fairness and equity, rather than perpetuating existing biases and disparities.","The term 'underserved communities' refers to communities that have been systematically denied a full opportunity to participate in aspects of economic, social, and civic life.",0.0,0.9999999999999991,0.0,0.0,0.8926894132037901 +What are the challenges associated with value chain and component integration in GAI systems?,[],"The challenges associated with value chain and component integration in General Artificial Intelligence (GAI) systems are multifaceted. Here are some key issues: + +1. **Interoperability**: Ensuring different components of the GAI system can work together seamlessly is a significant challenge. Components developed by different teams or organizations may not easily integrate due to differences in design, protocols, or standards. + +2. **Complexity Management**: GAI systems are inherently complex, involving numerous subsystems and layers of functionality. Managing this complexity to ensure reliable and predictable performance is difficult. + +3. **Security**: Integrating various components and systems increases the attack surface for potential security breaches. Ensuring that the entire value chain is secure and that components do not introduce vulnerabilities is crucial. + +4. **Consistency and Coherence**: Ensuring that all components of the GAI system adhere to the same set of values and operational principles is challenging. Discrepancies can lead to unpredictable or undesirable behavior. + +5. **Scalability**: Integrating components in a way that allows the system to scale effectively as demands increase is another challenge. Each component must handle increased loads without degrading performance. + +6. **Data Integration and Quality**: Integrating data from various sources and ensuring its quality and consistency is critical for the effective functioning of GAI systems. Poor data quality can lead to incorrect inferences and decisions. + +7. **Ethical and Bias Considerations**: Different components may have different ethical guidelines or biases programmed into them. Ensuring a unified ethical stance and minimizing bias across the entire system is a significant challenge. + +8. **Regulatory Compliance**: Ensuring that the entire value chain complies with relevant regulations and standards is complex, especially when components are sourced from multiple jurisdictions with differing legal requirements. + +9. **Maintenance and Upgrades**: Keeping the entire system up-to-date and maintaining compatibility during upgrades or patches is challenging, particularly in a rapidly evolving field like AI. + +10. **Accountability and Transparency**: Assigning accountability and ensuring transparency in decision-making processes across integrated components can be difficult. Understanding how decisions are made by the system as a whole requires clear documentation and traceability. + +Addressing these challenges requires a coordinated effort across multiple disciplines, including software engineering, cybersecurity, data science, ethics, and regulatory compliance.","Challenges associated with value chain and component integration in GAI systems include the improper acquisition or vetting of third-party components such as datasets, pre-trained models, and software libraries, which can lead to diminished transparency and accountability. The scale of training data may be too large for humans to vet, and the difficulty of training foundation models can result in extensive reuse of a limited number of models. Additionally, it may be difficult to attribute issues in a system's behavior to any one of these sources, and errors in third-party GAI components can have downstream impacts on accuracy and robustness.",1.0,0.968024263195462,0.0,0.0,0.8542571737249749 +"What should entities do to proactively identify and manage risks associated with collecting, using, sharing, or storing sensitive data?",['risk identification and management assessments and any steps taken to mitigate potential harms'],"Entities should take several proactive steps to identify and manage risks associated with collecting, using, sharing, or storing sensitive data. Here are some key practices: + +1. **Conduct Regular Risk Assessments**: Regularly perform comprehensive risk assessments to identify potential vulnerabilities and threats associated with the handling of sensitive data. This includes evaluating the data lifecycle from collection to storage. + +2. **Implement Robust Data Governance Policies**: Establish clear data governance policies that outline how sensitive data should be handled. This includes defining roles and responsibilities for data management and ensuring compliance with relevant regulations. + +3. **Data Minimization**: Collect only the data that is necessary for the intended purpose. Avoid collecting or storing excessive or irrelevant data to reduce the risk of exposure. + +4. **Ensure Data Encryption**: Use strong encryption methods to protect sensitive data both in transit and at rest. This ensures that even if data is intercepted or accessed without authorization, it remains unreadable. + +5. **Access Control and Authentication**: Implement strict access controls to ensure that only authorized personnel can access sensitive data. Use multi-factor authentication (MFA) to add an extra layer of security. + +6. **Regular Audits and Monitoring**: Conduct regular audits and continuous monitoring to detect any unusual activities or potential breaches. This helps in identifying and responding to threats in a timely manner. + +7. **Employee Training and Awareness**: Provide regular training to employees on data protection practices and the importance of safeguarding sensitive data. Ensure that they are aware of potential risks and how to mitigate them. + +8. **Incident Response Plan**: Develop and maintain an incident response plan to quickly address any data breaches or security incidents. This plan should outline the steps to be taken in the event of a breach, including notification procedures and mitigation strategies. + +9. **Third-Party Risk Management**: Assess and manage risks associated with third-party vendors who may have access to sensitive data. Ensure that they adhere to the same data protection standards and practices. + +10. **Privacy Impact Assessments (PIAs)**: Conduct Privacy Impact Assessments to evaluate the impact of data collection and processing activities on individuals' privacy. This helps in identifying and mitigating privacy risks. + +By adopting these practices, entities can better identify and manage risks associated with sensitive data, thereby reducing the likelihood of data breaches and ensuring compliance with data protection regulations.","Entities that collect, use, share, or store sensitive data should attempt to proactively identify harms and seek to manage them to avoid, mitigate, and respond appropriately to identified risks. Appropriate responses include determining not to process data when the privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks.",1.0,0.9662746771752876,1.0,0.9999999999,0.412425685306379 +What role do algorithmic harms play in shaping the principles of the Blueprint for an AI Bill of Rights?,"['The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable.']","Algorithmic harms play a significant role in shaping the principles of the Blueprint for an AI Bill of Rights. These harms highlight the potential negative impacts of AI technologies on individuals and communities, emphasizing the need for safeguards to prevent such outcomes. The recognition of these harms drives the development of principles aimed at ensuring that AI technologies are designed and used in ways that protect people's rights and well-being. This includes principles related to fairness, accountability, transparency, and the protection of privacy, all aimed at preventing and mitigating the potential harms that can arise from the use of AI systems.",The answer to given question is not present in context,0.7142857142857143,0.9772518485924877,1.0,0.0,0.925030344344802 +What is the purpose of reviewing transparency artifacts in the context of third-party models?,[],"The purpose of reviewing transparency artifacts in the context of third-party models is to verify that these models align with safety and ethical standards. Transparency artifacts can include documentation, logs, and other materials that provide insight into how the model was developed, trained, and tested. By reviewing these artifacts, third parties can ensure that the model operates as intended and does not pose unforeseen risks or biases. This process helps build trust, enhance accountability, and promote the responsible use of AI technology.","The purpose of reviewing transparency artifacts in the context of third-party models is to ensure information integrity, security, and effective value chain and component integration.",0.0,0.9999999999999997,0.0,0.0,0.65129221401519 +What types of automated systems should be covered by the AI Bill of Rights?,[],"The AI Bill of Rights should cover a broad range of automated systems, especially those that significantly impact individuals' lives and society. This typically includes, but is not limited to: + +1. **Healthcare Systems**: Automated systems used for diagnosing diseases, recommending treatments, or managing patient data. +2. **Financial Systems**: Algorithms used in credit scoring, loan approvals, trading, and fraud detection. +3. **Employment and Hiring Systems**: Systems used for screening resumes, conducting interviews, and making hiring decisions. +4. **Legal and Judicial Systems**: Automated systems used in risk assessments, sentencing, and legal advice. +5. **Education Systems**: Systems that are used for grading, personalized learning, and admissions. +6. **Public Services**: Automated systems used in welfare distribution, law enforcement, and public resource management. +7. **Consumer Services**: Algorithms used in recommendations, customer service chatbots, and personalized advertising. +8. **Transportation Systems**: Autonomous vehicles and systems used for traffic management. +9. **Surveillance Systems**: Systems used for monitoring and identifying individuals in public and private spaces. + +These systems should be designed, developed, and deployed with considerations for fairness, transparency, accountability, privacy, and security to ensure they do not harm individuals or society.","The types of automated systems that should be covered by the AI Bill of Rights include those that have the potential to meaningfully impact civil rights, civil liberties, or privacy, equal opportunities, and access to critical resources and services. Examples include speech-related systems, surveillance and criminal justice algorithms, voting-related systems, education-related systems, housing-related systems, employment-related systems, health technologies, and financial system algorithms.",0.0,0.9379375969147422,0.0,0.0,0.861520906732834 +What is the significance of content provenance in managing risks associated with AI-generated synthetic content?,[],"The significance of content provenance in managing risks associated with AI-generated synthetic content lies in its ability to track and verify the origin and history of the content. Here are some key points: + +1. **Authenticity and Trust**: Content provenance helps to establish the authenticity of the content, ensuring that it comes from a reliable and verified source. This builds trust among users and mitigates the risk of misinformation or malicious use of synthetic content. + +2. **Accountability**: By maintaining a record of where and how the content was generated, it becomes easier to hold creators accountable for the content they produce. This can help deter the creation and distribution of harmful or deceptive synthetic content. + +3. **Transparency**: Provenance information provides transparency about the content creation process, including the tools and methods used. This transparency can help users better understand the nature of the content and its potential biases or limitations. + +4. **Combating Deepfakes and Misinformation**: With the rise of deepfakes and other forms of manipulated media, content provenance can serve as a critical tool in identifying and combating these threats. It enables the detection of alterations and the verification of the contentā€™s integrity. + +5. **Legal and Ethical Compliance**: Provenance data helps ensure that AI-generated content complies with legal and ethical standards. It allows for the enforcement of intellectual property rights and adherence to regulations regarding content creation and distribution. + +Overall, content provenance is a crucial component in the broader strategy of managing risks associated with AI-generated synthetic content, enhancing trust, and ensuring ethical and responsible use.","Content provenance is significant in managing risks associated with AI-generated synthetic content as it involves digital transparency mechanisms like provenance data tracking, which can trace the origin and history of content. This helps in distinguishing human-generated content from AI-generated synthetic content, facilitating greater information access about both authentic and synthetic content. Provenance data tracking can assist in assessing authenticity, integrity, intellectual property rights, and potential manipulations in digital content, thereby improving information integrity and upholding public trust.",1.0,1.0000000000000002,0.0,0.0,0.928592795386638 +What role do legal protections play in addressing algorithmic discrimination?,"['Many companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases.']","Legal protections play a crucial role in addressing algorithmic discrimination by establishing frameworks and regulations to ensure that automated systems are used fairly and equitably. These protections often include: + +1. **Standards and Guidance**: Federal government agencies develop standards and provide guidance for the use of automated systems to help prevent bias. This ensures that there is a clear understanding of what constitutes acceptable use of algorithms and what practices need to be avoided or corrected. + +2. **Bias Testing and Audits**: Legal requirements can mandate bias testing as part of product quality assessments and launch procedures. This helps companies identify and mitigate potential discrimination before products are released to the public. + +3. **Transparency and Accountability**: Legal frameworks can require transparency in the development and deployment of algorithms. This means companies and organizations must disclose how their algorithms work and what measures they are taking to prevent discrimination, thereby fostering accountability. + +4. **Impact Assessments**: Legal protections can necessitate the conduction of impact assessments to evaluate the potential and actual effects of algorithms on different groups. These assessments help identify areas where algorithms may disproportionately impact certain populations and provide data to inform corrective actions. + +Overall, legal protections ensure that there is a systemic approach to preventing, identifying, and addressing algorithmic discrimination, thereby safeguarding the public from potential harms associated with biased algorithms.","The context mentions that algorithmic discrimination may violate legal protections depending on specific circumstances, indicating that legal protections play a role in addressing algorithmic discrimination.",0.16666666666666666,1.0000000000000002,0.0,0.9999999999,0.289415916911166 +What measures should be taken to ensure that surveillance technologies do not infringe on privacy and civil liberties?,[],"Ensuring that surveillance technologies do not infringe on privacy and civil liberties involves implementing a combination of legal, technical, and ethical measures. Here are some key measures that should be taken: + +1. **Strong Legal Frameworks**: + - **Clear Regulations**: Establish clear and comprehensive laws and regulations that define the scope and limits of surveillance activities. + - **Oversight and Accountability**: Create independent oversight bodies to monitor and review surveillance practices and ensure compliance with laws. + - **Transparency Requirements**: Mandate transparency reports from entities that conduct surveillance, detailing the nature and extent of their activities. + +2. **Data Protection and Privacy Standards**: + - **Data Minimization**: Collect only the data that is necessary for a specific purpose and avoid excessive data collection. + - **Anonymization and Encryption**: Use techniques like anonymization and encryption to protect the identities and personal information of individuals. + - **Data Retention Policies**: Implement strict data retention policies to ensure that data is not kept longer than necessary and is securely deleted afterward. + +3. **Consent and Notification**: + - **Informed Consent**: Ensure that individuals are informed about surveillance practices and obtain their consent where applicable. + - **Notification**: Inform individuals when they are being monitored, except in specific cases where secrecy is justified and legally sanctioned. + +4. **Technical Safeguards**: + - **Security Measures**: Implement robust cybersecurity measures to protect collected data from unauthorized access and breaches. + - **Access Controls**: Restrict access to surveillance data to authorized personnel only and implement audit trails to monitor access. + +5. **Ethical Guidelines and Training**: + - **Ethical Standards**: Develop and enforce ethical guidelines for the use of surveillance technologies. + - **Training Programs**: Provide training for personnel involved in surveillance on privacy rights and the ethical use of surveillance technologies. + +6. **Public Engagement and Consultation**: + - **Stakeholder Involvement**: Engage with stakeholders, including civil society organizations and the public, to gather input and build trust. + - **Impact Assessments**: Conduct privacy and human rights impact assessments before deploying surveillance technologies. + +7. **Redress Mechanisms**: + - **Complaint Procedures**: Establish clear procedures for individuals to file complaints if they believe their privacy or civil liberties have been violated. + - **Legal Recourse**: Provide avenues for legal recourse and remedies for individuals affected by unlawful surveillance. + +By implementing these measures, it is possible to balance the benefits of surveillance technologies with the need to protect individual privacy and civil liberties.","Surveillance technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their potential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring should not be used in education, work, housing, or in other contexts where the use of such surveillance technologies is likely to limit rights, opportunities, or access.",0.0,0.9963964606778397,0.0,0.0,0.8875699203330398 +What are the requirements for employers regarding workplace surveillance during a labor dispute?,[],"I don't have specific information on the requirements for employers regarding workplace surveillance during a labor dispute. However, in general, workplace surveillance during labor disputes is typically governed by labor laws and regulations, which can vary by jurisdiction. Employers usually need to balance surveillance practices with employees' rights to privacy and to organize. + +For accurate and detailed information, it is advisable to consult legal experts or refer to the specific labor laws and regulations in your region.","Federal law requires employers, and any consultants they may retain, to report the costs of surveilling employees in the context of a labor dispute. Employers engaging in workplace surveillance aimed at obtaining information concerning the activities of employees or a labor organization in connection with a labor dispute must report expenditures relating to this surveillance to the Department of Labor Office of Labor-Management Standards, and consultants who employers retain for these purposes must also file reports regarding their activities.",0.0,0.0,0.0,0.0,0.6974097727587334 +What is the importance of documenting roles and responsibilities related to managing AI risks within an organization?,[],"Documenting roles and responsibilities related to managing AI risks within an organization is crucial for several reasons: + +1. **Clear Accountability:** It ensures that specific individuals or teams are accountable for managing AI risks. This clarity helps in tracking who is responsible for what, thereby reducing the likelihood of tasks falling through the cracks. + +2. **Consistency and Compliance:** Proper documentation ensures that there are consistent processes and procedures in place. This is essential for compliance with regulatory requirements and internal policies. + +3. **Risk Mitigation:** By clearly defining roles and responsibilities, organizations can more effectively identify, assess, and mitigate potential risks associated with AI systems. This proactive approach can prevent issues before they arise. + +4. **Resource Allocation:** It helps in the efficient allocation of resources, including personnel, to manage AI risks. Knowing who is responsible allows for better planning and resource management. + +5. **Training and Development:** It provides a framework for training and development, ensuring that the right people have the necessary skills and knowledge to manage AI risks effectively. + +6. **Transparency and Trust:** Clear documentation fosters transparency within the organization and with external stakeholders. It builds trust that the organization is taking AI risks seriously and managing them responsibly. + +7. **Improved Communication:** It facilitates better communication and coordination among different departments and teams involved in AI development and deployment. This ensures that everyone is on the same page regarding risk management strategies. + +In summary, documenting roles and responsibilities is a foundational practice that supports the effective management of AI risks, helping organizations to operate more safely, efficiently, and in compliance with relevant standards and regulations.","The importance of documenting roles and responsibilities related to managing AI risks within an organization is to ensure that these roles and lines of communication are clear to individuals and teams throughout the organization. This clarity helps in mapping, measuring, and managing AI risks effectively.",1.0,0.9795286829470293,0.0,0.0,0.36633574175568157 +What is the importance of assessing the proportion of synthetic to non-synthetic training data in AI model development?,[],"Assessing the proportion of synthetic to non-synthetic training data in AI model development is crucial for several reasons: + +1. **Data Quality and Diversity**: Real-world data often contains a variety of nuances and complexities that synthetic data might not fully capture. Balancing synthetic and non-synthetic data ensures the model is exposed to a wide range of scenarios, improving its ability to generalize effectively. + +2. **Bias and Fairness**: Non-synthetic data can contain inherent biases present in the real world, while synthetic data can be generated to be more balanced. Assessing the proportion helps in managing and mitigating biases, promoting fairness in AI outcomes. + +3. **Model Robustness**: Combining synthetic with non-synthetic data can enhance the robustness of the model. Synthetic data can be used to supplement underrepresented scenarios in the non-synthetic data, helping the model perform better in diverse and rare situations. + +4. **Data Availability**: In cases where non-synthetic data is scarce or expensive to obtain, synthetic data can fill the gaps. However, over-reliance on synthetic data can lead to models that perform well in theory but poorly in practice. Understanding the proportion helps in striking the right balance. + +5. **Regulatory and Ethical Considerations**: Certain applications, particularly those involving sensitive information, may have regulatory constraints on using synthetic data. Assessing the proportion ensures compliance with legal and ethical standards. + +Overall, the proportion of synthetic to non-synthetic training data is a key factor in developing robust, fair, and generalizable AI models.","The importance of assessing the proportion of synthetic to non-synthetic training data in AI model development is to verify that the training data is not overly homogenous or generated by Generative AI (GAI), which helps mitigate concerns of model collapse.",0.0,0.991913309772118,0.0,0.0,0.2961192301829323 +What is the significance of technological diffusion in the context of integrating AI technologies within communities?,[],"Technological diffusion refers to the process by which new technologies spread across different communities and sectors. In the context of integrating AI technologies within communities, the significance of technological diffusion includes the following aspects: + +1. **Accessibility and Equity**: Effective technological diffusion ensures that AI technologies are accessible to a broad range of communities, not just those with advanced technical infrastructure or significant financial resources. This helps in minimizing the digital divide and promotes equity. + +2. **Adoption and Integration**: For AI to be truly beneficial, it needs to be adopted and integrated into various aspects of society. Technological diffusion facilitates this by ensuring that knowledge, tools, and practices related to AI are shared widely, enabling different communities to harness its potential. + +3. **Innovation and Improvement**: As AI technologies spread, they can be adapted and improved upon by different communities, leading to innovations that might not have been possible in a more centralized development environment. This collaborative improvement can accelerate the advancement of AI technologies. + +4. **Economic Impact**: Widespread diffusion of AI can stimulate economic growth by enabling new business models, improving efficiencies, and creating new job opportunities across various sectors. + +5. **Social Benefits**: AI has the potential to address numerous social challenges, from healthcare to education. Effective diffusion ensures that these benefits are realized across diverse communities, enhancing overall societal well-being. + +6. **Risk Management**: Understanding and managing the risks associated with AI, such as biases, privacy concerns, and ethical considerations, requires widespread knowledge and vigilance. Technological diffusion helps build a more informed and prepared society that can collaboratively address these challenges. + +In summary, technological diffusion is crucial for maximizing the benefits of AI technologies while ensuring that these benefits are distributed equitably and responsibly across different communities.","Technological diffusion is significant in the context of integrating AI technologies within communities as it emphasizes the importance of thoughtful and responsible development and integration of technology. Panelists noted that examining how technological diffusion has worked in urban planning can provide lessons on balancing ownership rights, use rights, and community health, safety, and welfare, ensuring better representation of all voices, especially those traditionally marginalized by technological advances.",1.0,0.9999999999999996,0.0,0.0,0.27883290369724545 +What is the purpose of ballot curing laws in the voting process?,"['Ballot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot.' + 'A human curing process,98 which helps voters to confirm their signatures and correct other voting mistakes, is important to ensure all votes are counted,99 and it is already standard practice in much of the country for both an election official and the voter to have the opportunity to review and correct any such issues.100']","The purpose of ballot curing laws in the voting process is to ensure that all votes are counted accurately by allowing voters to correct mistakes or verify their signatures. This process typically involves election officials contacting voters through phone calls, emails, or mail to inform them of any issues with their ballots and provide an opportunity to resolve them. This helps to validate the authenticity of the ballots and ensures that eligible votes are not discarded due to minor errors.","Ballot curing laws are designed to allow voters to correct their ballot and have it counted in cases where a voter signature matching algorithm incorrectly flags their ballot as invalid or when there are other issues with their ballot. These laws ensure that voters have a fallback system to verify the validity of their ballot, which may include direct contact from election officials.",0.625,0.9999983793754975,1.0,0.99999999995,0.8572642735138769 +What role does technology play in implementing or improving social welfare systems?,[],"Technology plays a critical role in implementing and improving social welfare systems in several ways: + +1. **Efficiency and Accessibility**: Technology can streamline the administration of social welfare programs, making them more efficient and accessible. For example, online portals and mobile apps can simplify the application process for benefits, making it easier for individuals to access the support they need. + +2. **Data Management and Analytics**: Advanced data management systems and analytics can help social welfare agencies better understand the needs of the population they serve. By analyzing data trends, agencies can allocate resources more effectively and identify areas where intervention is needed most. + +3. **Communication and Outreach**: Technology enables better communication and outreach efforts. Social media, email, and texting can be used to inform the public about available services, deadlines, and important updates. This ensures that more people are aware of and can take advantage of social welfare programs. + +4. **Service Delivery**: Digital platforms can facilitate the delivery of services such as telehealth, online education, and remote job training. This can be particularly beneficial for individuals in remote or underserved areas who may not have easy access to physical locations offering these services. + +5. **Transparency and Accountability**: Technology can enhance transparency and accountability within social welfare systems. Digital records and blockchain technology can be used to track the distribution of funds and ensure that resources are used appropriately and reach the intended recipients. + +6. **Automation**: Automation of routine tasks can reduce the administrative burden on social welfare staff, allowing them to focus more on case management and personalized support. Automated systems can handle tasks such as eligibility verification, payment disbursement, and appointment scheduling. + +Overall, the integration of technology into social welfare systems has the potential to improve the efficiency, reach, and effectiveness of these programs, ultimately leading to better outcomes for individuals and communities.",The answer to given question is not present in context,1.0,0.9693434248220583,0.0,0.0,0.9217619531646224 +What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?,[],"Addressing risks associated with intellectual property (IP) infringement in organizational Generative AI (GAI) systems involves several proactive measures. Here are some suggested actions: + +1. **Legal Compliance and Licensing**: Ensure that all data and models used in GAI systems are legally obtained and properly licensed. This includes verifying the terms of use for any datasets or pre-trained models to avoid unauthorized use of IP. + +2. **Data Governance**: Implement strong data governance policies that include regular audits of the data used in GAI systems to ensure compliance with IP laws. This helps in identifying and mitigating potential IP risks early on. + +3. **Documentation and Transparency**: Maintain detailed documentation of data sources, model training processes, and the provenance of any third-party content. Transparency in these processes can help in defending against potential IP infringement claims. + +4. **Ethical AI Practices**: Adopt ethical AI practices that emphasize respect for IP rights. This involves training AI systems to avoid generating content that could infringe on existing IP. + +5. **Monitoring and Enforcement**: Continuously monitor the outputs of GAI systems for potential IP infringements. Implement enforcement mechanisms to address any identified infringements promptly. + +6. **Training and Awareness**: Educate employees and stakeholders about IP laws and the importance of respecting IP rights within the context of AI development and deployment. + +7. **Legal Consultation**: Regularly consult with legal experts specializing in IP law to stay updated on any changes in legislation and to ensure that the organizationā€™s practices remain compliant. + +These actions collectively help in mitigating the risks associated with IP infringement in GAI systems, ensuring that the organization's use of AI is both ethical and legally compliant.","The suggested action to address risks associated with intellectual property infringement in organizational GAI systems is to compile statistics on actual policy violations, take-down requests, and intellectual property infringement, and analyze transparency reports across demographic and language groups.",1.0,0.9367011960788706,0.0,0.0,0.2292939647482239 +What problems does AI-enabled nudification technology seek to address and protect against?,[],"AI-enabled nudification technology seeks to address and protect against several serious problems, including: + +1. **Privacy Violations**: Nudification technology can create non-consensual explicit images by altering innocent photos, leading to severe privacy breaches. +2. **Harassment and Exploitation**: These altered images can be used to harass, blackmail, or exploit individuals, particularly targeting women and minors. +3. **Reputation Damage**: The dissemination of fake explicit images can ruin personal and professional reputations, causing emotional and social harm. +4. **Legal and Ethical Issues**: The creation and distribution of such content can lead to legal consequences and ethical concerns about consent and digital manipulation. + +By addressing these issues, efforts to regulate and control AI-enabled nudification technology aim to protect individuals from being victimized by such malicious activities.","AI-enabled nudification technology seeks to address and protect against image-based abuse, particularly the creation of non-consensual intimate images that disproportionately impact women. It aims to combat the proliferation of apps that allow users to create or alter images of individuals without their consent, which can lead to devastating harm to victims.",0.0,0.9820301597943587,0.0,0.0,0.5391966566727824 +What precautions should be taken when using derived data sources in automated systems?,"['justification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness.']","When using derived data sources in automated systems, several precautions should be taken to ensure appropriate and lawful use: + +1. **Documentation of Justification**: Each data attribute and source should have documented justification explaining why it is appropriate to use this data to inform the results of the automated system. This documentation should also detail why the use of such data will not violate any applicable laws. + +2. **Description of Attribute Generation Process**: For high-dimensional and/or derived attributes, provide overall descriptions of the attribute generation process. This should include how the attributes are generated and why they are deemed appropriate for the task at hand. + +3. **Legal Compliance**: Ensure that the use of derived data complies with all relevant regulations and laws. This includes data protection laws, privacy regulations, and any industry-specific legal requirements. + +4. **Data Quality and Accuracy**: Verify that the derived data is of high quality and accurately represents the information it is supposed to convey. Poor quality or inaccurate data can lead to misleading results and potentially harmful outcomes. + +By taking these precautions, the use of derived data sources in automated systems can be more transparent, justified, and legally compliant, thus enhancing the overall safety and reliability of the system.","Precautions that should be taken when using derived data sources in automated systems include careful tracking and validation of derived data, as it may be high-risk and could lead to feedback loops, compounded harm, or inaccurate results. Such data should be validated against the risk of collateral consequences.",0.3888888888888889,0.9999999999999992,0.0,0.9999999999,0.28175954473915993 +What are indirect prompt injection attacks and how do they exploit vulnerabilities in GAI-integrated applications?,[],"Indirect prompt injection attacks are a form of security vulnerability that target applications integrated with Generative AI (GAI) systems. These attacks exploit the way GAI models process and generate responses based on user input or pre-defined prompts. + +In an indirect prompt injection attack, an attacker crafts input data that subtly manipulates the prompt or context fed into the GAI model. This manipulated prompt can then cause the model to generate harmful, misleading, or unintended outputs. The attack takes advantage of the modelā€™s dependency on the provided context to guide its responses. + +Hereā€™s how these attacks typically exploit vulnerabilities in GAI-integrated applications: + +1. **User-Generated Content**: If an application allows users to submit content that is later used as part of the prompt for a GAI model, an attacker can insert malicious instructions or misleading information within this content. When the GAI model processes this content, it follows the hidden instructions, potentially generating harmful outputs. + +2. **Context Manipulation**: Attackers can manipulate the broader context or environment in which the GAI model operates. For instance, if the model pulls context from a database or external sources, tampering with these sources can alter the context in a way that leads to compromised outputs. + +3. **Untrusted Data Sources**: Applications that rely on data from untrusted or semi-trusted sources may inadvertently include malicious content in the prompts provided to the GAI model. An attacker could exploit this by injecting harmful instructions into these data sources. + +4. **Complex Prompt Structures**: Complex or dynamically generated prompts that incorporate various pieces of user data or external information can be difficult to sanitize completely. An attacker can exploit weaknesses in the prompt construction process to introduce malicious elements. + +To mitigate these risks, developers should implement robust input validation and sanitization processes, ensure that context sources are trustworthy, and continuously monitor and audit the outputs of GAI models for signs of manipulation or abuse.",Indirect prompt injection attacks occur when adversaries remotely exploit LLM-integrated applications by injecting prompts into data likely to be retrieved. These attacks can exploit vulnerabilities by stealing proprietary data or running malicious code remotely on a machine.,1.0,0.9018973343170877,0.0,0.0,0.5989926896732917 +What is the significance of digital content transparency in relation to the societal impacts of AI?,[],"Digital content transparency plays a crucial role in mitigating the societal impacts of AI by fostering trust, accountability, and informed decision-making. Here are some key points highlighting its significance: + +1. **Trust and Credibility**: Transparency helps in building trust among users by providing clear information about how AI systems generate content, make decisions, and operate. When users understand the processes and data behind AI outputs, they are more likely to trust the technology. + +2. **Accountability**: Transparent AI systems allow for better accountability. When the workings of an AI system are open to scrutiny, it becomes easier to identify and address biases, errors, or unethical practices. This helps in holding developers and organizations responsible for the impacts of their AI technologies. + +3. **Informed Decision-Making**: Users and stakeholders can make more informed decisions when they have access to detailed information about AI systems. This includes understanding the limitations, potential biases, and the contexts in which the AI operates effectively. Transparency ensures that individuals are aware of the risks and benefits, leading to more responsible use of AI. + +4. **Bias and Fairness**: Transparency helps in detecting and mitigating biases in AI systems. By understanding the data and algorithms used, it becomes possible to identify and correct unfair or discriminatory practices, promoting fairness and equality in society. + +5. **Ethical Considerations**: Transparent AI practices align with ethical guidelines and principles. They ensure that AI development and deployment respect human rights, privacy, and other ethical standards, thus minimizing negative societal impacts. + +6. **Regulation and Compliance**: Transparency aids regulatory bodies in assessing compliance with laws and standards. It facilitates the creation and enforcement of regulations that ensure AI systems are safe, fair, and beneficial to society. + +In summary, digital content transparency is essential for ensuring that AI systems are used responsibly and ethically, thereby minimizing their potential negative impacts on society and enhancing their positive contributions.","The significance of digital content transparency in relation to the societal impacts of AI lies in providing input for training materials about the capabilities and limitations of GAI systems. This transparency is crucial for AI actors, professionals, and the public to understand the societal impacts of AI and the role of diverse and inclusive content generation.",0.0,0.9761458087568448,0.0,0.0,0.869086463804969 +What is the purpose of engaging in threat modeling for GAI systems?,[],"The purpose of engaging in threat modeling for General Artificial Intelligence (GAI) systems is to proactively identify, understand, and mitigate potential risks and vulnerabilities that these systems might pose. This process helps in: + +1. **Risk Identification**: Detecting possible threats and vulnerabilities that could be exploited by malicious actors or could arise due to unintended behaviors of the GAI systems. +2. **Risk Assessment**: Evaluating the likelihood and impact of these threats to prioritize which risks require more immediate and robust mitigation strategies. +3. **Mitigation Planning**: Developing strategies and controls to prevent, detect, and respond to identified threats, thereby reducing the potential negative impacts on safety, security, and ethical considerations. +4. **Continuous Improvement**: Updating and refining threat models as GAI systems evolve and as new threats emerge, ensuring ongoing vigilance and adaptation. + +Overall, threat modeling is a critical process in ensuring the safe and responsible development, deployment, and operation of GAI systems, safeguarding both users and broader society from potential harms.",Engaging in threat modeling for GAI systems is intended to anticipate potential risks from these systems.,1.0,0.9408849557689556,0.0,0.0,0.6087705359186453 +What role do GAI systems play in augmenting cybersecurity attacks?,"['GAI-based systems present two primary information security risks: GAI could potentially discover or enable new cybersecurity risks by lowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it expands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data poisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as hacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some vulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat actors might further these risks by developing GAI-powered security co-pilots for use in several parts of']","GAI (General Artificial Intelligence) systems can play a significant role in augmenting cybersecurity attacks in several ways: + +1. **Discovering Vulnerabilities**: GAI-based systems have the capability to discover vulnerabilities in hardware, software, and data systems. This means that they can identify potential weak points that could be exploited by cyber attackers. + +2. **Writing Exploit Code**: Once vulnerabilities are discovered, GAI systems can write code to exploit these vulnerabilities. This can significantly lower the barrier for conducting cyber attacks, making it easier for attackers to develop and deploy sophisticated malware or hacking tools. + +3. **Automating Offensive Capabilities**: GAI can automate the process of exercising offensive cyber capabilities. This includes automating tasks such as hacking, creating and distributing malware, and conducting phishing attacks. This automation can lead to more efficient and widespread cyber attacks. + +4. **GAI-Powered Security Co-Pilots**: Sophisticated threat actors might develop GAI-powered tools that act as security co-pilots. These tools can assist in various parts of a cyber attack, providing real-time guidance and enhancing the attacker's ability to breach security measures. + +Overall, GAI systems can significantly enhance the capabilities of cyber attackers, making attacks more effective, efficient, and difficult to defend against.","GAI systems may augment cybersecurity attacks by advancing offensive cyber capabilities such as hacking, malware, and phishing. Reports indicate that large language models (LLMs) can discover vulnerabilities in systems and write code to exploit them. Sophisticated threat actors might develop GAI-powered security co-pilots to inform attackers on how to evade threat detection and escalate privileges after gaining system access.",0.8461538461538461,0.9780766230624804,0.6666666666666666,0.9999999999,0.9350928402925377 +What role does user consent play in the collection and use of personal data?,"['Additional protections would assure the American public that the automated systems they use are not monitoring their activities, collecting information on their lives, or otherwise surveilling them without context-specific consent or legal authority.' + 'Provide the public with mechanisms for appropriate and meaningful consent, access, and \ncontrol over their data \nUse-specific consent. Consent practices should not allow for abusive surveillance practices. Where data \ncollectors or automated systems seek consent, they should seek it for specific, narrow use contexts, for specif\xad\nic time durations, and for use by specific entities. Consent should not extend if any of these conditions change; \nconsent should be re-acquired before using data if the use case changes, a time limit elapses, or data is trans\xad\nferred to another entity (including being shared or sold). Consent requested should be limited in scope and \nshould not request consent beyond what is required. Refusal to provide consent should be allowed, without \nadverse effects, to the greatest extent possible based on the needs of the use case. \nBrief and direct consent requests. When seeking consent from users short, plain language consent' + 'where it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \nin plain language, and give you agency over data collection and the specific context of use; current hard-to\xad\nunderstand notice-and-choice practices for broad uses of data should be changed.']","User consent plays a critical role in the collection and use of personal data, ensuring that individuals have control over how their information is used. Here's a summary of the key aspects: + +1. **Context-Specific Consent**: Consent should be obtained for specific, narrow use contexts. This means users should be informed about exactly how their data will be used, for what purpose, and for what duration. + +2. **Limited Scope**: Consent requests should be limited in scope, asking only for what is necessary for the intended use. This prevents overreach and ensures users aren't providing blanket permissions for unspecified future uses. + +3. **Re-acquisition of Consent**: If there are changes in the use case, the time duration, or if the data is transferred to another entity, consent should be re-acquired. This ensures that users are always aware and in control of how their data is being used. + +4. **Plain Language and Brevity**: Consent requests should be brief and understandable, using plain language. This helps users make informed decisions without being overwhelmed by complex legal jargon or excessively long documents. + +5. **Right to Refuse**: Users should have the right to refuse consent without facing adverse effects. This respects user autonomy and ensures that consent is given freely, not under duress or misleading circumstances. + +Overall, these practices aim to protect users from abusive surveillance and ensure their data is handled transparently and responsibly.","User consent plays a crucial role in the collection and use of personal data, as it should only be used to justify data collection in cases where it can be appropriately and meaningfully given. Consent requests should be brief, understandable in plain language, and provide individuals with agency over data collection and its specific context of use.",0.6818181818181818,0.9278610080642681,1.0,0.9999999999666667,0.5217334688264142 +What role do algorithmic impact assessments play in the expectations for automated systems?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.']","Algorithmic impact assessments play a crucial role in the expectations for automated systems by providing a structured way to evaluate and report on the impacts of these systems. Specifically, they include: + +1. **Evaluation and Reporting**: They help in assessing the potential effects of an automated system, including its benefits and risks. +2. **Disparity Testing**: They involve testing for disparities to ensure that the system does not unfairly affect certain groups. +3. **Mitigation Information**: They provide information on how any identified risks or disparities will be mitigated. +4. **Transparency**: Making the results of these assessments public whenever possible helps confirm that protections are in place and that the automated system is operating fairly and responsibly. + +Overall, these assessments are vital for ensuring that automated systems are used in ways that are ethical, transparent, and equitable.",The answer to given question is not present in context,0.2727272727272727,0.9999992239712276,0.0,0.0,0.9224482700597978 +What is the purpose of establishing transparency policies for GAI applications?,['Establish transparent acceptable use policies for GAI that address illegal use or applications of GAI.'],"The purpose of establishing transparency policies for General Artificial Intelligence (GAI) applications is to ensure that the use of such technologies is clear, understandable, and accountable to all stakeholders. This can help address and mitigate potential risks and concerns associated with GAI, including illegal use or misuse. Transparency policies help in: + +1. **Building Trust:** By clearly outlining how GAI will be used and for what purposes, stakeholders, including users, developers, and regulators, can have greater confidence in the technology. + +2. **Ensuring Accountability:** Transparent policies make it easier to hold parties accountable for their actions, as there are clear guidelines and expectations for behavior. + +3. **Mitigating Risks:** By addressing potential illegal uses and other harmful applications upfront, transparency policies can help prevent misuse and reduce the likelihood of unintended consequences. + +4. **Facilitating Compliance:** Clear policies can help ensure that GAI applications comply with relevant laws and regulations, thus avoiding legal complications. + +5. **Promoting Ethical Use:** Transparency in policies can promote ethical practices in the development and deployment of GAI, ensuring that the technology is used in ways that are beneficial and not harmful to society. + +Overall, transparency in acceptable use policies is crucial for the responsible development and deployment of GAI.","The purpose of establishing transparency policies for GAI applications is to document the origin and history of training data and generated data, which advances digital content transparency while balancing the proprietary nature of training approaches.",0.5263157894736842,0.9282563712725939,0.0,0.0,0.9050168682422227 +What is the purpose of the NIST AI Risk Management Framework?,"['The National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy,' + 'The AI RMF was released in January 2023, and is intended for voluntary use and to improve the ability of organizations to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems.']","The purpose of the NIST AI Risk Management Framework (AI RMF) is to help manage and mitigate the risks posed to individuals, organizations, and society by Artificial Intelligence (AI). It aims to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. This framework is intended for voluntary use and is developed through a consensus-driven, open, transparent, and collaborative process. It focuses on fostering innovative approaches to address characteristics of trustworthiness, including accuracy, explainability and interpretability, reliability, and privacy.","The purpose of the NIST AI Risk Management Framework is to help manage risks posed to individuals, organizations, and society by AI. It aims to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems.",1.0,1.0000000000000007,1.0,0.99999999995,0.9939459027184782 +What issues related to bias and discrimination are associated with the use of automated systems in decision-making?,"['The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.']","The use of automated systems in decision-making can raise several issues related to bias and discrimination: + +1. **Bias in Data**: Automated systems often rely on large datasets to make decisions. If these datasets contain historical biases or are not representative of the target population, the system can perpetuate or even exacerbate these biases. + +2. **Disparity in Treatment**: Automated decisions may result in different treatment for various demographic groups. Disparities can occur if the system's performance varies across different races, genders, ages, or other protected classifications. + +3. **Lack of Transparency**: It can be challenging to understand how an automated system arrives at a particular decision, making it difficult to identify and address biases or discriminatory practices. + +4. **Calibration and Fairness**: Ensuring that the automated system is calibrated to treat all demographic groups fairly is crucial. Without proper calibration, the system might favor one group over another unjustly. + +5. **Privacy Concerns**: Collecting and using demographic data to assess disparities raises privacy issues. This data must be handled separately from the data used by the automated system, and adequate privacy protections must be in place. + +6. **Documentation and Justification**: When an automated system results in disparate impacts on identified groups, it is essential to document these disparities and provide a justification for continuing to use the system. This transparency helps in addressing and mitigating discriminatory effects. + +By addressing these issues through inclusive demographic assessments, parity evaluations, and privacy protections, organizations can work towards reducing bias and discrimination in automated decision-making systems.","Automated systems in decision-making have been associated with issues such as reflecting and reproducing existing unwanted inequities, embedding new harmful bias and discrimination, and being unsafe or ineffective in areas like patient care, hiring, and credit decisions.",0.8095238095238095,0.9836433730310136,0.6666666666666666,0.9999999999,0.8826290102471099 +What is the importance of pre-deployment testing in the AI lifecycle?,[],"Pre-deployment testing is a critical phase in the AI lifecycle because it ensures that the AI system operates safely, reliably, and as intended before it is released into the real world. Here are some key reasons why pre-deployment testing is important: + +1. **Safety Assurance**: Pre-deployment testing helps identify and mitigate potential safety risks, ensuring that the AI system does not cause harm to users or the environment. + +2. **Performance Validation**: It ensures that the AI model performs well under various conditions and meets the performance criteria set during the design phase. + +3. **Bias and Fairness**: This phase allows for the detection and correction of biases in the AI model, promoting fairness and preventing discriminatory outcomes. + +4. **Robustness**: Testing under different scenarios and stress conditions helps ensure that the AI system is robust and can handle unexpected inputs or situations without failing. + +5. **Regulatory Compliance**: Many industries have regulatory requirements that mandate rigorous testing of AI systems before deployment to ensure they meet legal and ethical standards. + +6. **User Trust**: Thorough testing builds trust with users by demonstrating that the AI system is reliable and behaves as expected. + +7. **Error Identification**: It helps in identifying and fixing bugs or errors in the system, which could otherwise lead to failures or unintended behaviors. + +In summary, pre-deployment testing is essential for ensuring that an AI system is safe, reliable, fair, and compliant with regulatory standards, thereby fostering trust and acceptance among users and stakeholders.","The importance of pre-deployment testing in the AI lifecycle lies in its ability to complicate risk mapping and pre-deployment measurement efforts due to the diverse ways and contexts in which GAI systems may be developed, used, and repurposed. Robust test, evaluation, validation, and verification (TEVV) processes can be iteratively applied and documented in the early stages of the AI lifecycle, ensuring that the systems are properly assessed before deployment.",1.0,0.987383201615656,0.0,0.0,0.8831965397841518 +What role do civil liberties play in the context of surveillance systems?,[],"Civil liberties play a critical role in the context of surveillance systems, as they are fundamental rights and freedoms that need to be protected to maintain a democratic and just society. Here are a few key points on how civil liberties intersect with surveillance systems: + +1. **Privacy**: One of the core civil liberties affected by surveillance systems is the right to privacy. Surveillance systems, especially those that are pervasive and intrusive, can infringe on individuals' privacy by monitoring their activities, communications, and behaviors without their consent. + +2. **Freedom of Expression**: Surveillance can have a chilling effect on freedom of expression. When people know they are being watched, they may self-censor and refrain from expressing their opinions freely, particularly if those opinions are dissenting or controversial. + +3. **Due Process and Legal Protections**: Surveillance systems must operate within the bounds of the law, respecting due process and legal protections. Unauthorized or warrantless surveillance can undermine legal safeguards designed to protect individuals from arbitrary or unjust actions by the state. + +4. **Transparency and Accountability**: For surveillance systems to align with civil liberties, there must be transparency about how these systems are used and accountability mechanisms in place. This includes clear regulations, oversight by independent bodies, and avenues for redress if rights are violated. + +5. **Proportionality and Necessity**: Surveillance measures should be proportionate to the threats they aim to mitigate and necessary for achieving legitimate objectives. Overreaching surveillance can lead to disproportionate restrictions on civil liberties without corresponding benefits to security. + +In summary, the implementation and operation of surveillance systems must carefully balance security interests with the protection of civil liberties to ensure that fundamental rights and freedoms are not compromised.","Civil liberties play a crucial role in the context of surveillance systems by ensuring that civil rights are not limited by the threat of surveillance or harassment facilitated by automated systems. Surveillance systems should not monitor the exercise of democratic rights, such as voting, privacy, peaceful assembly, speech, or association, in a way that restricts these civil liberties. Additionally, information related to identity should be carefully limited to avoid algorithmic discrimination, and continuous surveillance should not be used in ways that suppress the exercise of rights.",0.0,1.0,0.0,0.0,0.8609639563783513 +What measures are suggested to assess the environmental impact of AI model training and management activities?,[],"Assessing the environmental impact of AI model training and management activities involves several measures: + +1. **Energy Consumption Monitoring:** + - Track and report the energy usage during the training and deployment phases of AI models. + - Use energy-efficient hardware and optimize software to reduce energy consumption. + +2. **Carbon Footprint Estimation:** + - Calculate the carbon emissions associated with the energy used in AI processes. + - Use tools and platforms that provide carbon footprint estimates for computational tasks. + +3. **Sustainable Data Centers:** + - Utilize data centers that are powered by renewable energy sources. + - Implement cooling and power management systems that minimize environmental impact. + +4. **Optimized Model Design:** + - Design AI models that require less computational power without compromising performance. + - Employ techniques such as model pruning, quantization, and efficient neural architecture search. + +5. **Lifecycle Assessment:** + - Conduct a full lifecycle assessment (LCA) of AI models, from development to deployment and decommissioning. + - Consider the environmental impacts of hardware manufacturing, software development, and end-of-life disposal. + +6. **Scalable and Distributed Training:** + - Use distributed computing resources that can scale efficiently, reducing the need for extensive physical infrastructure. + - Implement federated learning to distribute the training process, minimizing centralized energy use. + +7. **Policy and Regulation Compliance:** + - Adhere to environmental regulations and policies aimed at reducing carbon emissions and promoting sustainability. + - Engage in industry-wide initiatives to set standards for the environmental impact of AI technologies. + +By implementing these measures, organizations can better understand and mitigate the environmental impacts associated with AI model training and management activities.","The suggested measures to assess the environmental impact of AI model training and management activities include: 1) Assessing safety to physical environments when deploying GAI systems, 2) Documenting anticipated environmental impacts of model development, maintenance, and deployment in product design decisions, 3) Measuring or estimating environmental impacts such as energy and water consumption for training, fine-tuning, and deploying models, and verifying trade-offs between resources used at inference time versus additional resources required at training time, and 4) Verifying the effectiveness of carbon capture or offset programs for GAI training and applications, while addressing green-washing concerns.",1.0,0.979109766582706,0.0,0.0,0.2350394986697477 +What should designers and developers provide to ensure clear understanding of system functioning in automated systems?,"['Designers, developers, and deployers of automat\xaded systems should provide generally accessible plain language docu\xadmentation including clear descriptions of the overall system func\xadtioning and the role automation plays, notice that such systems are in use, the individual or organization responsible for the system, and ex\xadplanations of outcomes that are clear, timely, and accessible.']","Designers and developers should provide generally accessible plain language documentation that includes: + +1. Clear descriptions of the overall system functioning and the role automation plays. +2. Notice that such systems are in use. +3. Information on the individual or organization responsible for the system. +4. Explanations of outcomes that are clear, timely, and accessible. + +This ensures a clear understanding of system functioning in automated systems.","Designers, developers, and deployers of automated systems should provide generally accessible plain language documentation that includes clear descriptions of the overall system functioning and the role automation plays.",0.8333333333333334,0.940193849529106,1.0,0.9999999999,0.6178367384939542 +What is the role of the National Institute of Standards and Technology in advancing artificial intelligence?,"['About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI.']","The National Institute of Standards and Technology (NIST) plays a crucial role in advancing artificial intelligence (AI) by developing measurements, technology, tools, and standards to ensure AI systems are reliable, safe, transparent, explainable, privacy-enhanced, and fair. Their goal is to enable AI to achieve its full commercial and societal benefits without causing harm to people or the planet. + +NIST has been involved in both fundamental and applied AI research for over a decade. Furthermore, they are key in implementing the 2023 Executive Order on Safe, Secure, and Trustworthy AI by establishing the U.S. AI Safety Institute and the AI Safety Institute Consortium. These entities are dedicated to building the scientific foundation necessary for the safe, secure, and trustworthy development and use of AI.","The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) to realize its full commercial and societal benefits without harm to people or the planet. NIST has conducted both fundamental and applied work on AI for more than a decade and is helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",1.0,0.9837227173936314,1.0,0.9999999999,0.9965779778098691 +What is the purpose of using structured feedback mechanisms in relation to AI-generated content?,['Evaluate potential biases and stereotypes that could emerge from the AI-generated content using appropriate methodologies including computational testing methods as well as evaluating structured feedback input.'],"The purpose of using structured feedback mechanisms in relation to AI-generated content is to systematically identify, evaluate, and address potential biases and stereotypes that may emerge from the AI's outputs. These mechanisms provide a consistent and organized way to collect insights from users and experts about the performance and impact of the AI system. Here are some specific benefits of structured feedback mechanisms: + +1. **Bias Detection**: They help in identifying biases in the AI-generated content by collecting detailed feedback from diverse users who might experience or observe different manifestations of bias. + +2. **Quality Improvement**: Structured feedback allows developers to understand specific areas where the AI's performance can be improved, leading to more accurate and fair outputs. + +3. **Accountability and Transparency**: These mechanisms promote transparency by documenting feedback and the AI's responses to it, thereby holding the system accountable for its outputs. + +4. **User Trust**: By actively soliciting and incorporating user feedback, developers can build greater trust in the AI system among users, as they see their concerns being addressed. + +5. **Ethical Considerations**: Structured feedback ensures that ethical considerations are systematically reviewed and integrated into the AI's development and deployment, reducing the risk of harm caused by biased or inappropriate content. + +6. **Iterative Refinement**: Feedback is essential for the iterative refinement of AI models, enabling continuous improvement based on real-world use and experiences. + +In summary, structured feedback mechanisms are crucial for maintaining the integrity, fairness, and effectiveness of AI systems by providing a clear pathway for ongoing evaluation and improvement.",The purpose of using structured feedback mechanisms in relation to AI-generated content is to solicit and capture user input about the content to detect subtle shifts in quality or alignment with community and societal values.,0.3333333333333333,1.000000000000001,0.0,0.9999999999,0.902771012519338 +What measures are suggested to ensure information integrity in the deployment of GAI systems?,"['MS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic data.' + 'ā€¢ Auditing and assessment \nā€¢ Data provenance' + 'Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.' + 'Assess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs).']","To ensure information integrity in the deployment of Generative AI (GAI) systems, the following measures are suggested: + +1. **Auditing and Assessment**: Regular audits and assessments of the AI system can help ensure that it operates as expected and adheres to defined standards and policies. + +2. **Data Provenance**: Tracking the origin and history of data used in training and deploying GAI systems ensures that the data is reliable and its integrity can be verified. + +3. **Documentation Practices**: + - **Logging and Recording**: Keeping detailed logs and records of GAI incidents aids in the analysis and resolution of issues. + - **Analyzing GAI Incidents**: Analyzing incidents helps in understanding the causes and implementing measures to prevent future occurrences. + - **Change Management Records**: Keeping thorough records of any changes made to the system ensures traceability and accountability. + - **Version History and Metadata**: Maintaining version history and metadata enables better tracking of changes and the context in which they were made. + +4. **Information Sharing**: Regular sharing of information, including audit findings and incident reports, with relevant AI actors ensures transparency and collective oversight. + +5. **Evaluation of GAI Output**: + - **Comparing to Ground Truth Data**: Comparing the outputs of the GAI system to a set of known ground truth data helps in assessing its accuracy and reliability. + - **Evaluation Methods**: Utilizing a variety of evaluation methods, such as human oversight, automated evaluation, proven cryptographic techniques, and reviewing content inputs, can help in verifying the quality and authenticity of the GAI outputs. + +By implementing these measures, the integrity of information in GAI systems can be better maintained, ensuring trustworthy and reliable AI deployments.","Suggested measures to ensure information integrity in the deployment of GAI systems include verifying GAI system training data and TEVV data provenance, and ensuring that fine-tuning or retrieval-augmented generation data is grounded. Additionally, it is recommended to review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities.",0.6,0.9421384040537645,0.5,0.999999999975,0.2837761468748547 +What steps should automated systems take to avoid bias and support equity for marginalized groups?,"['The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.']","To avoid bias and support equity for marginalized groups, automated systems should take the following steps: + +1. **Inclusive Demographics**: Ensure the demographics of the assessed groups are as inclusive as possible, covering race, color, ethnicity, sex (including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual orientation), religion, age, national origin, disability, veteran status, genetic information, or any other classification protected by law. + +2. **Broad Measures Assessment**: Assess a broad set of measures, including demographic performance measures, overall and subgroup parity assessment, and calibration to ensure fairness and equity. + +3. **Separate Demographic Data**: Demographic data collected for disparity assessment should be separated from data used by the automated system to protect privacy. + +4. **Privacy Protections**: Implement privacy protections for the demographic data collected. In some cases, it may be appropriate to perform disparity assessments using a data sample to further protect privacy. + +5. **Document Disparities**: For every instance where the deployed automated system leads to different treatment or impacts disfavoring identified groups, document the disparity and provide a justification for any continued use of the system. This documentation should be comprehensive and transparent. + +By taking these steps, automated systems can minimize bias and promote equity for marginalized groups.","Automated systems should take several steps to avoid bias and support equity for marginalized groups, including conducting proactive equity assessments during the design phase to identify potential discrimination, using representative and robust data that reflects local communities, and guarding against the use of demographic proxies that could lead to algorithmic discrimination. These steps should be integrated throughout the design, development, and deployment processes to ensure ongoing protection against algorithmic discrimination.",0.7692307692307693,1.0000000000000002,0.25,0.9999999999,0.653725718297961 +Why is user consent important for protecting personal data?,"['You should be protected from abusive data practices via built-in protections and you should have agency over how data about you is used. You should be protected from violations of privacy through design choices that ensure such protections are included by default, including ensuring that data collection conforms to reasonable expectations and that only data strictly necessary for the specific context is collected. Designers, developers, and deployers of automated systems should seek your permission and respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate ways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be used. Systems should not employ user experience and design decisions that obfuscate user choice or burden users with defaults that are privacy invasive.' + 'Provide the public with mechanisms for appropriate and meaningful consent, access, and \ncontrol over their data \nUse-specific consent. Consent practices should not allow for abusive surveillance practices. Where data \ncollectors or automated systems seek consent, they should seek it for specific, narrow use contexts, for specif\xad\nic time durations, and for use by specific entities. Consent should not extend if any of these conditions change; \nconsent should be re-acquired before using data if the use case changes, a time limit elapses, or data is trans\xad\nferred to another entity (including being shared or sold). Consent requested should be limited in scope and \nshould not request consent beyond what is required. Refusal to provide consent should be allowed, without \nadverse effects, to the greatest extent possible based on the needs of the use case. \nBrief and direct consent requests. When seeking consent from users short, plain language consent' + 'Additional protec\xad tions would assure the American public that the automated systems they use are not monitoring their activities, collecting information on their lives, or otherwise surveilling them without context-specific consent or legal authori\xad ty.' + 'You should be protected from abusive data practices via built-in protections and you \nshould have agency over how data about you is used. You should be protected from violations of \nprivacy through design choices that ensure such protections are included by default, including ensuring that \ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \nusers with defaults that are privacy invasive.' + 'better notice and consent procedures that ensure privacy based on context and use case']","User consent is crucial for protecting personal data for several reasons: + +1. **Autonomy and Control**: Consent ensures that users have agency over how their data is collected, used, accessed, transferred, and deleted. This empowers users to make informed decisions about their personal information. + +2. **Privacy Protection**: By seeking consent, it ensures that data collection and processing conform to reasonable expectations of privacy. Users can agree to specific, narrow use contexts, thereby limiting unnecessary or invasive data collection. + +3. **Transparency and Trust**: Brief and direct consent requests in plain language help build transparency and trust between users and the entities collecting their data. When users are aware of and agree to how their data will be used, it fosters a more trustworthy relationship. + +4. **Preventing Abusive Practices**: Requiring consent for specific time durations, use contexts, and entities prevents abusive surveillance practices. It ensures that users are not unknowingly monitored or have their data misused. + +5. **Legal Compliance**: Consent mechanisms help ensure that data collection and use comply with legal and regulatory requirements. This safeguards both the users' rights and the organizations from potential legal repercussions. + +6. **Context-Specific Protections**: Consent practices tailored to specific contexts and use cases ensure that data is handled appropriately within the given context, avoiding overreach and protecting user privacy. + +Overall, user consent is a fundamental element in safeguarding personal data and maintaining the integrity and trustworthiness of automated systems and data practices.","User consent is important for protecting personal data because it ensures that data collection is justified only in cases where consent can be appropriately and meaningfully given. Consent requests should be brief, understandable in plain language, and provide individuals with agency over their data collection and its specific context of use. This approach helps to prevent abusive data practices and ensures that individuals have control over how their data is used.",1.0,0.9999999999999999,1.0,0.99999999998,0.9370850721231714 +"What steps, like bias testing, ensure fair automated systems?","['ā€¢ Auditing and assessment' + 'the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems;']","Ensuring fair automated systems involves several steps, including but not limited to the following: + +1. **Bias Testing**: Regularly test the system for biases across different demographic groups. This involves assessing the system's accuracy and error rates to identify any differential impacts on various groups. + +2. **Performance Testing**: Conduct comprehensive performance assessments to evaluate the system's overall accuracy and effectiveness. This includes testing the system's performance for different demographic groups to ensure it is equitable. + +3. **Result Auditing**: Regularly audit the results of performance testing. This involves analyzing accuracy, differential demographic impact, resulting error rates (both overall and per demographic group), and comparing these results to previously deployed systems to identify any areas of improvement. + +4. **Transparency and Documentation**: Maintain detailed documentation of the system's development, including data sources, model choices, and any decisions made to address biases. This transparency helps in understanding and addressing potential biases. + +5. **Regular Updates and Monitoring**: Continuously monitor the system's performance after deployment and update it as necessary to ensure it remains fair and unbiased over time. + +6. **Stakeholder Involvement**: Involve diverse stakeholders in the development and assessment process to provide different perspectives and insights that can help in identifying and mitigating biases. + +By implementing these steps, developers and users of automated systems can work towards ensuring their systems are fair and equitable.","Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to products being changed or not launched to prevent harm. Federal government agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have developed best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.",0.47619047619047616,0.958680346106955,0.0,0.9999999999,0.47082068388510284 +How does the NAACP impact civil rights in tech governance?,[],"The National Association for the Advancement of Colored People (NAACP) primarily focuses on ensuring the political, educational, social, and economic equality of rights of all persons and eliminating race-based discrimination. While the NAACP is not specifically a tech-focused organization, its impact on civil rights can extend into the realm of tech governance in several ways: + +1. **Advocacy for Inclusive Policies:** The NAACP advocates for policies that promote diversity and inclusion within the tech industry. This includes pushing for equitable hiring practices, fair wages, and inclusive workplace environments. + +2. **Addressing Algorithmic Bias:** The NAACP may also engage in efforts to address and mitigate algorithmic biases that disproportionately affect marginalized communities. This could involve collaborating with tech companies to develop fairer algorithms or advocating for regulations that ensure equality. + +3. **Digital Equity:** Ensuring equal access to technology and the internet is another area where the NAACP impacts tech governance. Digital equity is critical for providing educational and economic opportunities to underserved communities. + +4. **Data Privacy and Security:** The NAACP may also advocate for stronger data privacy and security protections, particularly for communities of color who might be disproportionately impacted by data breaches and misuse of personal information. + +5. **Public Awareness and Education:** The organization plays a role in raising public awareness about the intersection of civil rights and technology, helping to educate the public and policymakers about the importance of equitable tech governance. + +Through these efforts, the NAACP contributes to shaping a tech landscape that is more inclusive and just, ensuring that advancements in technology benefit all members of society equally.",The answer to given question is not present in context,0.0,0.9721635445605016,0.0,0.0,0.1724352148696494 +How does DARPA's XAI tackle opaque AI decision-making challenges?,[],"DARPA's Explainable Artificial Intelligence (XAI) program addresses the challenge of opaque AI decision-making by developing new machine learning techniques that produce more interpretable models, while maintaining high levels of learning performance. The goal of XAI is to create AI systems whose actions can be understood and trusted by human users. This involves: + +1. **Creating Explainable Models**: Developing machine learning models that are inherently more understandable without sacrificing accuracy. This includes techniques that make the internal decision-making process of AI systems more transparent. + +2. **Human-Computer Interaction Research**: Studying how to best present explanations to users so that they can understand and effectively trust the AIā€™s decisions. This involves designing user interfaces and explanation techniques that make complex models more comprehensible. + +3. **Empirical Evaluation**: Testing these models with human users to ensure that the explanations are useful and genuinely enhance the user's understanding and trust. + +By focusing on these areas, DARPA's XAI program aims to make AI systems more accessible and reliable, ultimately fostering greater trust and adoption of AI technologies in critical applications.",The answer to given question is not present in context,0.8333333333333334,0.9136827390211785,0.0,0.0,0.17905135740366185 +What concerns did panelists raise about AI in policing and its impact on safety and democracy?,"[""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","Panelists raised several concerns about the use of AI in policing and its impact on safety and democracy: + +1. **Validity of Systems**: Concerns about the accuracy and reliability of AI systems used in policing. +2. **Adverse or Irrelevant Data**: The potential for AI to replicate unjust outcomes due to biased or irrelevant data. +3. **Confirmation Bias**: The tendency of people to defer to potentially inaccurate automated systems. +4. **Lack of Individualization**: AI systems often lack personalization and do not account for the belief that people can change for the better. +5. **Severe Impact on Individuals and Communities**: The use of AI in policing can lead to significant negative consequences, such as loss of jobs, loss of custody of children, and surveillance that results in chilling effects for communities. +6. **Negative Signals**: Surveillance and other AI-driven policing methods can send negative signals to community members about how they are viewed by authorities. + +These concerns highlight the potential for AI in policing to undermine safety and democratic principles if not carefully managed.","Panelists raised concerns about the validity of AI systems used in policing, noting that adverse or irrelevant data can lead to a replication of unjust outcomes. They highlighted issues such as confirmation bias and the tendency to defer to potentially inaccurate automated systems. The impact of these systems on individuals and communities is seen as potentially severe, with concerns that they lack individualization, undermine the belief in people's ability to change for the better, and can lead to job loss and custody issues. Additionally, surveillance technologies can create chilling effects in communities and send negative signals about how community members are viewed. Panelists emphasized that while transparency is important, it is not sufficient for achieving accountability.",0.7777777777777778,0.9689915846343693,0.8,0.9999999999,0.8604918583315954 +What role does the OSTP play in the AI Bill of Rights regarding public input and civil liberties?,[],"The Office of Science and Technology Policy (OSTP) plays a pivotal role in the AI Bill of Rights, particularly in ensuring that public input is considered and that civil liberties are protected. The OSTP is responsible for guiding the development and implementation of policies that ensure AI technologies are designed and used in ways that respect and uphold civil liberties. This involves actively seeking and incorporating feedback from the public and various stakeholders to shape policies that reflect the needs and concerns of society. The OSTPā€™s involvement helps to ensure that AI systems are developed ethically, transparently, and with respect for individuals' rights and freedoms.","The Office of Science and Technology Policy (OSTP) plays a crucial role in the AI Bill of Rights by leading a year-long process to seek and distill input from various stakeholders, including impacted communities, industry stakeholders, technology developers, and policymakers. This engagement informs the development of policies and practices that protect civil rights and promote democratic values in the governance of automated systems.",0.0,0.9118113520854066,0.0,0.0,0.6041608042567927 +How do Model Cards enhance AI transparency and accountability amid privacy issues?,[],"Model Cards are a tool designed to enhance AI transparency and accountability by providing detailed documentation about machine learning models. They can play a significant role in addressing privacy issues in several ways: + +1. **Clear Documentation**: Model Cards offer comprehensive details about a model, including its intended use, performance metrics, and limitations. This transparency helps stakeholders understand the context in which the model should be used and recognize any potential privacy concerns related to its deployment. + +2. **Bias and Fairness**: By documenting the data sources and the methodologies used in training the model, Model Cards can highlight any biases present in the data. This awareness allows developers and users to take steps to mitigate these biases, which is crucial for protecting individuals' privacy and ensuring fair treatment. + +3. **Data Privacy Considerations**: Model Cards can include specific sections that address how data privacy is handled throughout the model development process. This might cover aspects like data anonymization techniques, compliance with data protection regulations (such as GDPR), and the steps taken to protect sensitive information. + +4. **Usage Guidelines**: By specifying the appropriate and inappropriate uses of the model, Model Cards help prevent misuse that could lead to privacy violations. For example, a Model Card might state that a model is not suitable for processing personal health data, thereby guiding users to respect privacy boundaries. + +5. **Accountability Mechanism**: Including information about the model's developers, the version history, and the contact details for reporting issues provides an accountability mechanism. Stakeholders can reach out to the responsible parties if they identify any privacy-related concerns or if the model is being used inappropriately. + +Overall, Model Cards promote a culture of transparency and accountability in AI development, making it easier to address privacy issues proactively and responsibly.",The answer to given question is not present in context,1.0,0.9955960998449336,0.0,0.0,0.9220679392303183 +What does the AI Bill of Rights suggest for protecting civil rights in tech?,[],"The AI Bill of Rights, as introduced by the White House Office of Science and Technology Policy in October 2022, outlines several principles to protect civil rights in the context of technology and artificial intelligence. Here are some of the key suggestions: + +1. **Safe and Effective Systems**: Ensuring that AI systems are safe and effective before they are used, and that they are monitored continuously to mitigate any risks or potential harms. + +2. **Algorithmic Discrimination Protections**: Protecting individuals from discrimination by algorithms and ensuring that AI systems are designed and used in ways that are fair and equitable. + +3. **Data Privacy**: Safeguarding personal data and ensuring that individuals have control over how their data is used and shared. + +4. **Notice and Explanation**: Ensuring that people are informed when an AI system is being used and that they understand how it impacts them. This includes providing clear explanations about AI decisions. + +5. **Human Alternatives, Consideration, and Fallback**: Ensuring that individuals can opt-out of AI decisions and seek human alternatives, particularly in critical situations such as legal, medical, and financial decisions. + +These principles aim to establish a framework to ensure that AI technologies are developed and deployed in ways that uphold civil rights, promote equity, and protect the public from potential harms.","The AI Bill of Rights suggests guiding the design, use, and deployment of automated systems to protect the American public, ensuring that these technologies reinforce civil rights and democratic values. It emphasizes the need to root out inequity, embed fairness in decision-making processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.",0.0,0.907941664157978,0.0,0.0,0.852822373647813 +What steps are taken to ensure fair use of automated systems?,"['to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible.' + 'Some companies have put in place safeguards that have prevented harm from occurring by ensuring that key development decisions are vetted by an ethics review; others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad tion processes that may be applied when considering the use of new automated systems, and existing product develop\xad ment and testing practices already protect the American public from many potential harms.' + 'justification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws.' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who']","Ensuring fair use of automated systems involves several steps aimed at protecting individuals from harm and ensuring transparency and accountability. Here are some key steps based on the provided context: + +1. **Proactive Protection from Harm**: Automated systems should be designed, developed, and deployed in a way that proactively protects users from foreseeable harms, including inappropriate or irrelevant data use and compounded harm from data reuse. + +2. **Independent Evaluation and Reporting**: Independent evaluations should be conducted to confirm that the automated system is safe and effective. This includes reporting steps taken to mitigate potential harms and making the results public whenever possible. + +3. **Ethics Review and Safeguards**: Companies should implement safeguards such as ethics reviews to vet key development decisions. This helps in identifying and mitigating potential harms through pre-deployment testing and ongoing monitoring processes. + +4. **Public Consultation Processes**: Governments at all levels may apply existing public consultation processes when considering the use of new automated systems. This ensures that public input is considered in the decision-making process. + +5. **Documentation and Justification of Data Use**: For each data attribute and source used by the automated system, a justification should be documented to explain why it is appropriate and how it complies with applicable laws. + +6. **Transparency and Notice**: Users should be informed that an automated system is being used and how it contributes to outcomes that impact them. Designers, developers, and deployers should provide accessible and clear documentation about the systemā€™s functioning, the role of automation, and the responsible parties. + +7. **Explanation of Outcomes**: Automated systems should offer explanations that are technically valid, meaningful, and useful. Users should understand how and why an outcome impacting them was determined, including when the automated system is not the sole input determining the outcome. Notifications should be updated to reflect significant changes in use cases or key functionalities. + +By following these steps, organizations can ensure that automated systems are used fairly and responsibly, minimizing potential harms and enhancing transparency and accountability.","Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to changes or prevented harmful product launches. Federal agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have developed best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.",1.0,0.982421722097167,1.0,0.99999999995,0.3629559741999778 +What are the AI ethics for intel and their alignment with NIST standards?,[],"Intel has developed a comprehensive set of AI ethics principles that guide their development and use of artificial intelligence technologies. These principles are crafted to ensure that AI is developed responsibly and ethically. While I don't have specific details from Intel's latest documents, I can outline some common themes that are likely part of their AI ethics framework, which also align with the National Institute of Standards and Technology (NIST) standards. + +1. **Transparency**: Intel emphasizes the importance of transparency in AI systems, ensuring that AI's decision-making processes are understandable and traceable. This aligns with NIST's standards that advocate for explainability and auditability in AI systems. + +2. **Fairness and Non-Discrimination**: Intel is committed to ensuring that AI technologies do not perpetuate or amplify biases, promoting fairness and equity. NIST also prioritizes fairness, emphasizing the need to address and mitigate bias in AI systems. + +3. **Privacy and Security**: Protecting user data and ensuring the security of AI systems are critical components of Intel's ethics. Similarly, NIST standards underscore the importance of safeguarding data privacy and securing AI systems from malicious attacks. + +4. **Accountability**: Intel holds itself accountable for the impacts of its AI technologies, ensuring there are mechanisms in place to address any negative consequences. NIST also stresses the need for accountability in AI, recommending clear responsibilities and governance structures. + +5. **Human-Centric AI**: Intel focuses on creating AI that augments human abilities and respects human rights and autonomy. This is in line with NIST's emphasis on human-centered approaches in AI development. + +6. **Safety and Robustness**: Ensuring that AI systems are safe, reliable, and robust is a key principle for Intel. NIST also highlights the importance of robustness, resilience, and trustworthiness in AI applications. + +While these principles provide a broad overview, for specific and detailed information about Intel's AI ethics and alignment with NIST standards, it would be best to refer to Intel's published AI ethics guidelines and NIST's official documentation on AI standards.",The answer to given question is not present in context,0.0,0.0,1.0,0.0,0.9226271556467167 +What's the role of incident response plans in assessing GAI performance and AI Actor communication during incidents?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","Incident response plans are crucial in assessing Generative AI (GAI) performance and enhancing communication among AI Actors during incidents. These plans typically include detailed procedures for logging, recording, and analyzing GAI incidents, which facilitate the smooth sharing of information. By maintaining records such as change management logs, version histories, and metadata, AI Actors can effectively respond to and manage incidents. This structured approach ensures that all relevant parties are informed and can collaborate efficiently, leading to quicker resolution and continuous improvement in GAI performance.","Incident response plans play a crucial role in assessing GAI performance by providing structured procedures for addressing the generation of inappropriate or harmful content. They ensure that incidents are communicated to relevant AI Actors, including affected communities, and that processes for tracking, responding to, and recovering from incidents are followed and documented. This structured approach helps in understanding the root causes of incidents and implementing preventive measures, thereby enhancing overall AI Actor communication during such events.",0.3333333333333333,0.9037632184974921,0.0,0.9999999999,0.694688666551931 +How do GAI incident docs help AI Actors assess and manage system performance?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","GAI (General Artificial Intelligence) incident documentation helps AI Actors assess and manage system performance in several ways: + +1. **Information Sharing**: Detailed incident reports provide comprehensive insights into the nature, cause, and impact of incidents. This facilitates a smoother exchange of information among AI Actors, ensuring everyone has a clear understanding of what happened and why. + +2. **Change Management Records**: Documentation of changes made to the AI system, including updates, patches, and configuration changes, helps AI Actors trace issues back to specific modifications. This is crucial for identifying the root cause of incidents and preventing future occurrences. + +3. **Version History**: Maintaining a version history allows AI Actors to compare different iterations of the AI system. By analyzing the differences between versions, they can pinpoint which changes may have contributed to an incident, aiding in performance assessment and improvement. + +4. **Metadata Analysis**: Metadata, such as timestamps, user interactions, and system responses, provides additional context for incidents. Analyzing this metadata can help AI Actors understand the conditions under which incidents occur, leading to more effective management strategies. + +5. **Incident Analysis**: Recording and analyzing incidents enable AI Actors to identify patterns and trends. This proactive approach helps in predicting potential issues and implementing preventive measures to enhance the overall system performance. + +By leveraging these documentation practices, AI Actors can make informed decisions, improve system resilience, and enhance the performance and reliability of AI systems.","GAI incident documentation helps AI Actors assess and manage system performance by facilitating smoother sharing of information regarding incidents, which includes logging, recording, and analyzing GAI incidents. This documentation allows AI Actors to trace impacts to their source, understand previous incidents, and implement measures to prevent similar occurrences in the future. Additionally, regular information sharing and maintaining change management records empower AI Actors in responding to and managing AI incidents effectively.",0.08,0.9837099590411337,0.6666666666666666,0.9999999999,0.4624430746751972 +"What principles did the White House OSTP set for civil rights in automated systems, and how was public input involved?",[],"The White House Office of Science and Technology Policy (OSTP) released a ""Blueprint for an AI Bill of Rights"" that outlined principles intended to protect civil rights in automated systems. The blueprint emphasized ensuring that AI technologies are designed, developed, and deployed in ways that respect the rights and freedoms of individuals. Key principles include: + +1. **Safe and Effective Systems**: Ensuring that automated systems are safe and effective, and that they are designed to proactively protect individuals from errors, failures, and harmful outcomes. +2. **Algorithmic Discrimination Protections**: Safeguarding individuals against discrimination by ensuring that AI systems do not perpetuate or amplify biases. +3. **Data Privacy**: Protecting individuals' data privacy by ensuring that AI systems handle personal data responsibly and transparently. +4. **Notice and Explanation**: Providing individuals with clear and accessible information about how AI systems are used and how decisions are made. +5. **Human Alternatives, Consideration, and Fallback**: Offering options for individuals to opt out of AI-driven decisions and ensuring that human oversight is available when needed. + +Public input played a significant role in the development of these principles. The OSTP engaged with a wide range of stakeholders, including experts, advocates, industry leaders, and the general public, to gather diverse perspectives and insights. This collaborative approach aimed to ensure that the guidelines reflect the needs and concerns of various communities and promote fairness, accountability, and transparency in AI technologies.","The Blueprint for an AI Bill of Rights includes five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It was developed through extensive consultation with the American public, which involved a year-long process of seeking and distilling input from impacted communities, industry stakeholders, technology developers, and policymakers. This public engagement included panel discussions, public listening sessions, and a formal request for information, allowing various voices to shape the principles aimed at preventing algorithmic and data-driven harms.",1.0,0.8704773768538888,0.0,0.0,0.7849803928451129 +How do training and feedback improve understanding of digital content transparency in GAI systems?,[],"Training and feedback play crucial roles in enhancing the understanding of digital content transparency in General Artificial Intelligence (GAI) systems. Here's how they contribute: + +1. **Training**: + - **Data Quality and Diversity**: Training GAI systems on diverse and high-quality datasets helps ensure that the models learn to recognize and generate content that is accurate and reliable. This reduces the likelihood of producing misleading or biased content. + - **Transparency-Oriented Objectives**: By incorporating specific objectives during training that prioritize transparency, such as clear and explainable decision-making processes, GAI systems can be better aligned with transparency goals. + - **Iterative Learning**: Continuous training and updating of models based on new data and scenarios help in refining the system's ability to handle various types of content with greater transparency. + +2. **Feedback**: + - **User Feedback**: Collecting feedback from users about the clarity, accuracy, and reliability of the content generated by GAI systems allows developers to identify areas where the system may fall short in terms of transparency. This feedback can then be used to fine-tune the models. + - **Expert Review**: Regular audits and reviews by experts can provide critical insights into the system's performance. Experts can identify transparency issues that may not be evident through automated metrics alone and suggest improvements. + - **Automated Monitoring**: Implementing automated systems to monitor the output of GAI models can help detect and flag instances where the content may lack transparency. This continuous monitoring can inform adjustments and improvements in real-time. + +By integrating robust training methodologies and leveraging feedback mechanisms, developers can significantly improve the transparency of digital content generated by GAI systems, leading to more trustworthy and understandable outputs.","Training and feedback improve understanding of digital content transparency in GAI systems by providing input for training materials about the capabilities and limitations of GAI systems related to digital content transparency. This includes actively seeking feedback on generated content quality and potential biases, as well as assessing the general awareness among end users and impacted communities about the availability of feedback channels.",1.0,0.9377700133623103,0.0,0.0,0.5787567075860336 +"What leads to model collapse in AI, especially with synthetic data and biases?",[],"Model collapse in AI can occur due to several reasons, especially when synthetic data and biases are involved. Here are some key factors: + +1. **Synthetic Data Quality**: If the synthetic data used to train the model is of poor quality or not representative of the real-world data, the model may learn incorrect patterns. This can lead to poor generalization and performance on actual tasks, causing a collapse in its effectiveness. + +2. **Bias Propagation**: When synthetic data is generated from biased sources or contains inherent biases, the model trained on this data can perpetuate and even amplify these biases. This can result in skewed predictions and unfair outcomes, leading to a collapse in the model's reliability and fairness. + +3. **Data Homogeneity**: If the synthetic data lacks diversity and variety, the model may overfit to the limited patterns present in the data. This overfitting makes the model less robust and more likely to perform poorly when faced with new, unseen data, leading to a collapse in its predictive accuracy. + +4. **Feedback Loops**: In scenarios where models are retrained on their own predictions (self-training loops), any initial biases or errors can be reinforced and magnified over time. This feedback loop can cause the model's performance to degrade progressively, leading to collapse. + +5. **Lack of Real-World Validation**: Relying solely on synthetic data without validating the model against real-world data can prevent the detection of issues that might lead to collapse. Regularly testing the model with real-world data can help identify and mitigate potential problems. + +6. **Insufficient Data Volume**: Synthetic data might sometimes lack the volume necessary to capture the complexity of the task at hand. Insufficient data can cause the model to underperform, contributing to a collapse. + +By addressing these factors, one can mitigate the risk of model collapse, ensuring that the AI system remains robust, fair, and effective.","Model collapse in AI can occur when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. This phenomenon threatens the robustness of the model overall and can lead to homogenized outputs, amplifying any homogenization from the model used to generate the synthetic training data.",1.0,0.9335519631270341,0.0,0.0,0.9279817010109818 +What are Idaho's rules on pretrial risk assessment transparency and their alignment with federal ethical AI standards?,[],"I don't have specific information on Idaho's rules on pretrial risk assessment transparency or their alignment with federal ethical AI standards. However, pretrial risk assessments generally involve evaluating the likelihood that a defendant will appear in court or commit a crime if released before trial. Transparency in these assessments is crucial for ensuring fairness and accountability. + +Federal ethical AI standards typically emphasize principles such as transparency, accountability, fairness, and non-discrimination. Ideally, Idaho's rules would align with these principles by providing clear information on how risk assessments are conducted, ensuring that the algorithms used are free from bias, and allowing for independent audits and reviews. + +For precise details on Idaho's specific rules and their alignment with federal standards, you may need to consult legal resources or official Idaho state publications.","Idaho's rules on pretrial risk assessment transparency require that any pretrial risk assessment be shown to be free of bias against any class of individuals protected from discrimination by state or federal law. Additionally, any locality using a pretrial risk assessment must formally validate the claim of it being free of bias, and all documents, records, and information used to build or validate the risk assessment must be open to public inspection. However, the context does not provide specific information on how these rules align with federal ethical AI standards.",0.75,0.0,0.0,0.0,0.6432713249309618 +What strategies help engage AI Actors to assess GAI impacts while maintaining AI content integrity?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","To engage AI Actors in assessing General AI (GAI) impacts while maintaining AI content integrity, several strategies can be employed: + +1. **Documentation Practices**: Implement thorough documentation practices such as logging, recording, and analyzing AI incidents. This ensures that all relevant information about AI functionality and behavior is captured and can be reviewed by AI Actors. + +2. **Information Sharing**: Promote regular sharing of information among AI Actors. This includes sharing change management records, version history, and metadata. This transparency helps AI Actors understand how the AI system has evolved and what changes have been made over time. + +3. **Change Management Records**: Maintain detailed change management records. These records allow AI Actors to track modifications and updates to the AI system, helping them assess the impacts of those changes accurately. + +4. **Version History**: Keep comprehensive version histories of AI models and systems. This allows AI Actors to compare different versions and understand the evolution of the AI system, which is crucial for assessing impacts and maintaining content integrity. + +5. **Metadata**: Ensure that metadata is consistently recorded and updated. Metadata provides context about the data and the AI system, making it easier for AI Actors to assess impacts and maintain content integrity. + +By implementing these strategies, AI Actors can effectively assess the impacts of GAI while ensuring that the integrity of AI content is maintained.","Strategies to engage AI Actors to assess GAI impacts while maintaining AI content integrity include determining context-based measures to identify new impacts, planning regular engagements with AI Actors responsible for inputs to GAI systems, employing methods to trace the origin and modifications of digital content, integrating tools to analyze content provenance, and using structured feedback mechanisms to capture user input about AI-generated content.",0.1,0.9735649925594467,0.0,0.9999999999,0.38941583454791384 +What strategies are best for managing GAI systems and their lifecycle risks?,[],"Managing General Artificial Intelligence (GAI) systems and their lifecycle risks involves several comprehensive strategies to ensure safety, ethical alignment, and control. Here are some of the best strategies: + +1. **Ethical and Safety Design Principles:** + - **Value Alignment:** Ensure that the GAI systems' goals and behaviors align with human values and ethical principles. + - **Robustness:** Develop GAI systems that can handle unexpected situations safely and continue to function correctly under various conditions. + - **Transparency:** Make the decision-making processes of GAI systems understandable and explainable to humans. + +2. **Rigorous Testing and Validation:** + - **Simulations:** Extensively test GAI systems in simulated environments to observe their behavior in diverse scenarios. + - **Adversarial Testing:** Challenge the GAI with difficult, unexpected, and adversarial scenarios to identify potential risks and vulnerabilities. + +3. **Continuous Monitoring and Auditing:** + - **Real-time Monitoring:** Implement continuous monitoring to detect and mitigate unexpected behaviors promptly. + - **Regular Audits:** Conduct regular audits of the GAI systems to assess their performance, safety, and alignment with ethical standards. + +4. **Controlled Deployment:** + - **Incremental Rollout:** Deploy GAI systems gradually, allowing time to assess their impact and make necessary adjustments. + - **Containment Measures:** Use containment strategies to limit the scope and influence of GAI systems during early deployment stages. + +5. **Legal and Regulatory Frameworks:** + - **Compliance:** Ensure that GAI systems comply with existing legal and regulatory standards. + - **Policy Development:** Advocate for and contribute to the development of new policies and regulations that address the unique challenges posed by GAI. + +6. **Stakeholder Involvement:** + - **Multi-Disciplinary Collaboration:** Engage experts from various fields (e.g., ethics, law, engineering, social sciences) to provide diverse perspectives on GAI development and deployment. + - **Public Engagement:** Involve the public in discussions about GAI to understand societal concerns and expectations. + +7. **Fail-Safe Mechanisms:** + - **Kill Switches:** Implement mechanisms to shut down GAI systems safely if they exhibit dangerous or undesirable behaviors. + - **Fallback Systems:** Develop fallback or redundant systems to take over in case of GAI failure. + +8. **Post-Deployment Surveillance:** + - **Impact Assessment:** Continuously assess the societal and environmental impacts of GAI systems post-deployment. + - **Feedback Loops:** Establish feedback mechanisms to learn from real-world deployments and improve future iterations of GAI systems. + +By integrating these strategies, stakeholders can better manage the lifecycle risks associated with General Artificial Intelligence, ensuring that such systems are safe, ethical, and beneficial to society.",The context does not provide specific strategies for managing GAI systems and their lifecycle risks.,1.0,0.9343142222362433,1.0,0.0,0.20712512298310057 +What confabulation might mislead users about CBRN info or capabilities?,[],"Confabulation in the context of AI refers to the generation of false or misleading information that the AI believes to be true due to incorrect or incomplete training data. When it comes to CBRN (Chemical, Biological, Radiological, and Nuclear) information or capabilities, confabulation could mislead users in several critical ways: + +1. **Misinformation about Threats**: The AI might generate incorrect information about the presence, severity, or type of CBRN threats. For example, it might wrongly assert the existence of a chemical threat in an area where there is none. + +2. **Incorrect Safety Procedures**: The AI could provide misleading or incorrect safety protocols or countermeasures for dealing with CBRN threats. This might include recommending ineffective or dangerous actions. + +3. **False Capabilities**: It might exaggerate or inaccurately describe the capabilities of certain CBRN detection or mitigation technologies, leading users to overestimate their safety or preparedness. + +4. **Erroneous Response Strategies**: The AI could suggest inappropriate or ineffective emergency response strategies, potentially putting lives at risk if followed during an actual CBRN event. + +5. **Misleading Historical Data**: Providing false historical data about past CBRN incidents, which could skew risk assessments and preparedness plans. + +To mitigate these risks, it's crucial to ensure that AI systems dealing with CBRN information are trained on accurate, verified data and continuously updated with the latest validated information. Regular audits and validation by subject matter experts are also critical to prevent and correct any instances of confabulation.",Confabulation in the context of CBRN information or capabilities refers to the production of confidently stated but erroneous or false content that may mislead or deceive users regarding the access to or synthesis of nefarious information or design capabilities related to CBRN weapons or other dangerous materials.,1.0,0.9238708245237026,0.0,0.0,0.9132156793429416 +"What insights did OSTP seek from the biometric tech RFI, and who provided feedback?","['ā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below listed organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union']","The Office of Science and Technology Policy (OSTP) issued a Request For Information (RFI) to gain insights into several aspects of biometric technologies. Specifically, they aimed to understand: + +1. The extent and variety of biometric technologies in past, current, or planned use. +2. The domains in which these technologies are being used. +3. The entities making use of biometric technologies. +4. Current principles, practices, or policies governing the use of these technologies. +5. The stakeholders that are, or may be, impacted by the use or regulation of biometric technologies. + +The feedback for this RFI was provided by 130 respondents, which included a variety of organizations and individuals such as: + +- Accenture +- Access Now +- ACT | The App Association +- AHIP +- AIethicist.org +- Airlines for America +- Alliance for Automotive Innovation +- Amelia Winger-Bearskin +- American Civil Liberties Union + +These responses are available in full online for further review.","OSTP sought insights on the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. Feedback was provided by 130 organizations and individuals, including Accenture, ACLU, Google, Microsoft Corporation, and many others.",1.0,0.9132416259566449,0.5,0.9999999999,0.677222440803751 +"What causes model collapse in AI, especially with synthetic data and biases?",[],"Model collapse in AI can occur due to several factors, particularly when dealing with synthetic data and biases. Here are some primary causes: + +1. **Over-reliance on Synthetic Data**: If a model is trained primarily on synthetic data, it might not generalize well to real-world scenarios. Synthetic data might lack the variability and complexity of real-world data, leading to overfitting where the model performs well on synthetic data but poorly on actual data. + +2. **Biases in Data**: If the synthetic data contains biases, these biases can get ingrained in the model. Bias in data can stem from several sources, such as biased sampling methods, historical prejudices, or even the algorithms used to generate synthetic data. When a model learns from biased data, it can perpetuate and even amplify these biases, resulting in unfair or skewed outcomes. + +3. **Feedback Loop of Bias**: When a biased model generates synthetic data, and this data is then used to further train the model, a dangerous feedback loop can be created. This iterative process can cause the biases to become more pronounced over time, leading to what is known as model collapse. Essentially, the model's predictions become increasingly detached from reality and more aligned with the inherent biases in the data. + +4. **Lack of Diversity in Training Data**: Diversity in training data is crucial for a model to learn a wide range of features and scenarios. If the synthetic data is not diverse enough, the model may only learn to handle a narrow set of situations, making it brittle and prone to failure when faced with unforeseen circumstances. + +5. **Quality of Synthetic Data**: The quality of the synthetic data plays a significant role. Poorly generated synthetic data that does not accurately represent the real-world distributions can lead to erroneous learning patterns in the model. This can cause the model to develop incorrect assumptions and perform poorly on actual tasks. + +6. **Insufficient Validation**: Another cause can be the lack of proper validation mechanisms to ensure that the model performs well on real-world data. Without rigorous testing and validation using real-world data, it is challenging to identify and correct for model collapse. + +Addressing these issues requires careful design of training protocols, ensuring diversity and quality in synthetic data, and implementing robust validation processes to mitigate biases and prevent model collapse.","Model collapse in AI can occur when model training over-relies on synthetic data, leading to data points disappearing from the distribution of the new model's outputs. This threatens the robustness of the model overall and can result in homogenized outputs, amplifying any homogenization from the model used to generate the synthetic training data.",1.0,0.9418735247760051,0.0,0.0,0.9308849049684553 +"What standards should automated systems follow for safety and fairness, and how to assess them?",[],"Ensuring the safety and fairness of automated systems is crucial for their responsible development and deployment. Here are some key standards and assessment methods that should be followed: + +### Safety Standards + +1. **Reliability and Robustness**: + - **Standard**: Systems should be reliable and perform consistently under various conditions, including unexpected situations. + - **Assessment**: Conduct rigorous testing, including stress tests, to ensure the system performs well under different scenarios and edge cases. + +2. **Security**: + - **Standard**: Systems should be secure against cyber threats and unauthorized access. + - **Assessment**: Perform regular security audits and penetration testing to identify and mitigate vulnerabilities. + +3. **Transparency and Explainability**: + - **Standard**: The decision-making processes of automated systems should be transparent and explainable to users. + - **Assessment**: Implement explainable AI (XAI) techniques and document the decision-making process clearly. + +4. **Accountability**: + - **Standard**: There should be clear accountability mechanisms in place for the actions and decisions made by automated systems. + - **Assessment**: Establish clear guidelines and responsibilities for monitoring and responding to system performance and errors. + +### Fairness Standards + +1. **Bias and Discrimination**: + - **Standard**: Systems should be designed to minimize and mitigate biases and discrimination. + - **Assessment**: Use fairness-aware machine learning techniques and conduct bias audits to identify and address potential biases in data and algorithms. + +2. **Inclusivity**: + - **Standard**: Automated systems should consider the needs of diverse user groups and avoid exclusion. + - **Assessment**: Engage with diverse stakeholders during the design and testing phases to ensure inclusivity. + +3. **Ethical Considerations**: + - **Standard**: Systems should adhere to ethical guidelines and respect user rights and freedoms. + - **Assessment**: Implement ethical review boards and conduct regular ethical impact assessments. + +### Assessment Methods + +1. **Testing and Validation**: + - **Unit Testing**: Test individual components of the system. + - **Integration Testing**: Ensure that components work together as expected. + - **User Testing**: Collect feedback from real users to identify potential issues. + +2. **Audits and Reviews**: + - **Internal Audits**: Regularly review the system internally for compliance with safety and fairness standards. + - **Third-party Audits**: Engage independent auditors to provide an unbiased assessment. + +3. **Monitoring and Reporting**: + - **Continuous Monitoring**: Implement monitoring systems to track the performance and behavior of automated systems in real-time. + - **Incident Reporting**: Establish a clear process for reporting and addressing incidents related to safety and fairness. + +4. **Regulatory Compliance**: + - **Adherence to Regulations**: Ensure compliance with relevant laws and regulations, such as GDPR for data protection and privacy. + - **Certification**: Obtain certifications from recognized standards organizations, such as ISO/IEC 27001 for information security management. + +By following these standards and assessment methods, organizations can develop and deploy automated systems that are both safe and fair, thereby gaining trust and ensuring ethical use of AI technologies.","Automated systems should follow standards that include independent evaluation, regular reporting, and protections against algorithmic discrimination. They should be designed to allow independent evaluators access to assess safety and effectiveness, with regular updates on system performance, data usage, risk management, and independent evaluations. Additionally, entities should conduct algorithmic impact assessments to evaluate potential discrimination and ensure transparency in reporting these assessments.",0.0,0.962656847868895,0.0,0.0,0.42707503199867314 +What strategies help with privacy and IP risks in AI content?,[],"To mitigate privacy and intellectual property (IP) risks in AI-generated content, several strategies can be employed: + +1. **Data Anonymization**: Ensure that any personal data used in training AI models is anonymized to protect individual privacy. Techniques like data masking, pseudonymization, and generalization can help. + +2. **Access Controls**: Implement strict access controls to limit who can access sensitive data and AI models. Use role-based access controls (RBAC) and ensure that only authorized personnel have access to critical information. + +3. **Encryption**: Encrypt data both in transit and at rest to protect it from unauthorized access or breaches. Use robust encryption standards to safeguard sensitive information. + +4. **Ethical AI Use Policies**: Establish clear policies regarding the ethical use of AI, including guidelines on how to handle data and respect for IP rights. Ensure all stakeholders are aware of these policies. + +5. **Regular Audits**: Conduct regular audits and assessments of AI systems to identify and mitigate potential privacy and IP risks. This can help in maintaining compliance with regulations and standards. + +6. **Data Minimization**: Collect and use only the data necessary for the specific AI application. Reducing the amount of data processed can lower the risk of privacy breaches and IP theft. + +7. **IP Management**: Implement robust IP management practices, including securing patents, trademarks, and copyrights for AI-generated content. This can help protect proprietary information and innovations. + +8. **Transparency and Explainability**: Make AI processes as transparent and explainable as possible. This helps in understanding how data is used and ensures compliance with privacy regulations. + +9. **Legal Compliance**: Stay informed about and comply with relevant data protection laws and regulations, such as GDPR, CCPA, and others. This can help avoid legal repercussions and enhance trust. + +10. **Third-Party Agreements**: Ensure that third-party vendors and partners comply with your privacy and IP protection standards. Use comprehensive agreements to enforce these requirements. + +Implementing these strategies can significantly reduce privacy and IP risks associated with AI-generated content.","Strategies to help with privacy and intellectual property (IP) risks in AI content include conducting periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, documenting training data curation policies, establishing policies for collection and retention of data, and conducting appropriate diligence on training data use to assess intellectual property and privacy risks.",1.0,0.969496560603174,0.0,0.0,0.31710628224756876 +"How does risk documentation aid compliance and governance in GAI systems, especially with external feedback?","['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms;']","Risk documentation plays a critical role in compliance and governance for General Artificial Intelligence (GAI) systems, particularly when incorporating external feedback. Hereā€™s how: + +1. **Transparency and Accountability:** By documenting risks, including concerns raised during public consultations and the steps taken to mitigate potential harms, GAI system developers can demonstrate transparency in their processes. This helps build trust with stakeholders and regulatory bodies. + +2. **Informed Decision-Making:** Risk documentation allows for a structured approach to identifying, assessing, and managing potential risks associated with GAI systems. This systematic approach aids in making informed decisions that prioritize safety and ethical considerations. + +3. **Change Management:** Maintaining thorough records, including change management logs and version histories, ensures that any modifications to the GAI system are tracked. This is crucial for understanding the impact of changes and maintaining the integrity of the system over time. + +4. **Incident Response:** Effective logging and recording of GAI incidents provide a foundation for analyzing and responding to issues promptly. Sharing this information with relevant AI actors can lead to more coordinated and effective incident management and resolution. + +5. **Regulatory Compliance:** Detailed risk documentation helps in complying with regulatory requirements by providing evidence of due diligence in identifying and mitigating risks. It ensures that the GAI system adheres to legal and ethical standards. + +6. **Feedback Integration:** Documenting external feedback, such as concerns from public consultations, allows for continuous improvement of GAI systems. It ensures that the system evolves in a way that aligns with societal values and addresses public concerns. + +In summary, risk documentation enhances compliance and governance by promoting transparency, enabling informed decision-making, ensuring effective change management, facilitating incident response, ensuring regulatory compliance, and integrating external feedback.","The context does not provide specific information on how risk documentation aids compliance and governance in GAI systems, particularly regarding external feedback.",0.40540540540540543,0.9234380953751838,1.0,0.0,0.4729754463156192 +"How does testing ensure the safety of automated systems before deployment, especially regarding community input and risk?","['Governments at all levels have existing public consulta\xad tion processes that may be applied when considering the use of new automated systems, and existing product develop\xad ment and testing practices already protect the American public from many potential harms.' + 'the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.']","Testing ensures the safety of automated systems before deployment through several key processes, particularly by incorporating community input and risk management strategies. Hereā€™s a detailed explanation: + +1. **Public Consultation**: + - **Community Input**: Governments and organizations often engage in public consultation processes to gather concerns and feedback from the community regarding the deployment of new automated systems. This input helps identify potential issues and ensures that the system aligns with public expectations and needs. + - **Decision-Making**: The concerns raised during public consultations are used to make informed decisions about the system's deployment. This process ensures that the community's viewpoints are considered, potentially leading to revisions or enhancements to the system based on the feedback received. + +2. **Risk Identification and Management**: + - **Assessments**: Comprehensive risk assessments are conducted to identify potential harms that could arise from the use of the automated system. These assessments evaluate various risk factors and potential impacts on different demographic groups. + - **Mitigation Steps**: Based on the risk identification, specific steps are taken to mitigate potential harms. This could involve modifying the system, implementing additional safeguards, or setting up robust monitoring mechanisms to ensure ongoing safety. + +3. **Performance Testing**: + - **Accuracy and Error Rates**: The system undergoes rigorous performance testing to measure its accuracy and identify error rates, both overall and per demographic group. This helps ensure that the system performs reliably and does not disproportionately affect certain groups. + - **Comparative Analysis**: The new system's performance is compared to previously deployed systems to ensure it offers improvements or at least meets the existing safety and efficacy standards. + +4. **Independent Evaluation and Reporting**: + - **Verification**: Independent evaluations are conducted to confirm that the system is safe and effective. This external review adds an additional layer of scrutiny and objectivity to the testing process. + - **Transparency**: The results of these evaluations, including any steps taken to mitigate identified harms, are made public whenever possible. This transparency helps build trust in the system and ensures accountability. + +5. **Ongoing Monitoring**: + - **Regular Performance Reports**: After deployment, the system is subject to ongoing monitoring procedures and regular performance testing. This continuous oversight helps detect and address any emerging issues promptly. + - **Monitoring Frequency**: The frequency of these monitoring activities ensures that any deviations from expected performance are quickly identified and corrected, maintaining the system's safety over time. + +By integrating these processes, the testing phase ensures that automated systems are thoroughly vetted for safety and efficacy, taking into account community input and potential risks before they are deployed.","Testing ensures the safety of automated systems before deployment by requiring extensive testing that follows domain-specific best practices, taking into account the specific technology used and the roles of human operators. This testing should mirror real-world conditions and include both automated and human-led testing. Additionally, community input is gathered through consultation during the design and implementation phases, allowing for the identification and mitigation of potential risks that may impact rights and access, particularly for affected communities. Concerns raised during this consultation should be documented and considered in the development process, ensuring that the system is safe and effective based on community feedback.",0.5151515151515151,0.9666118802913207,0.5,0.9999999999666667,0.8509670474889497 +"What standards should automated systems follow for safety and fairness, and how to assess them?",[],"Ensuring safety and fairness in automated systems is crucial for their responsible deployment. Here are some standards and methods to assess them: + +1. **Transparency and Explainability**: + - **Standard**: Automated systems should be transparent about how they make decisions and provide explanations that are understandable to users. + - **Assessment**: Evaluate the system's documentation and user interface to ensure explanations are clear. Conduct user studies to check if users understand the decisions made by the system. + +2. **Accountability**: + - **Standard**: There should be mechanisms in place to hold developers and operators accountable for the systemā€™s actions. + - **Assessment**: Check if there are logs and audit trails for decisions made by the system. Ensure there are processes for reporting issues and that there are clear responsibilities assigned. + +3. **Bias and Fairness**: + - **Standard**: Automated systems should be designed to minimize and monitor bias to ensure fair treatment of all users. + - **Assessment**: Conduct regular audits of the system's outputs to identify and mitigate biases. Use fairness metrics like demographic parity or equal opportunity to assess the system's performance across different groups. + +4. **Security and Privacy**: + - **Standard**: Systems should be secure against malicious attacks and protect user privacy. + - **Assessment**: Perform security audits and penetration testing. Ensure compliance with privacy regulations like GDPR or CCPA. Regularly review data handling practices and access controls. + +5. **Reliability and Robustness**: + - **Standard**: Automated systems should operate reliably under expected conditions and handle unexpected conditions gracefully. + - **Assessment**: Test the system extensively under various scenarios, including edge cases. Use techniques like fault injection to assess robustness. + +6. **Human Oversight**: + - **Standard**: There should be meaningful human oversight in critical decision-making processes. + - **Assessment**: Evaluate the design of the human-in-the-loop processes. Ensure there are clear interfaces and protocols for human intervention. + +7. **Ethical Use**: + - **Standard**: The deployment and use of automated systems should align with ethical guidelines and societal values. + - **Assessment**: Review the system's use cases against ethical standards and guidelines. Engage with stakeholders to understand and address ethical concerns. + +By adhering to these standards and regularly assessing automated systems against them, developers and operators can help ensure that these systems are safe, fair, and beneficial for all users.","Automated systems should follow standards that include independent evaluation to ensure safety and effectiveness, regular reporting on system performance and data usage, and protections against algorithmic discrimination. Assessments should involve algorithmic impact assessments that detail consultation results, equity assessments, and any disparities, with findings made public whenever possible.",1.0,0.9741821324363441,0.0,0.0,0.30428608033228005 +What federal steps are being taken to tackle algorithmic bias in mortgage lending for communities of color?,['Federal government agencies have been developing standards and guidance for the use of automated systems in order to help prevent bias.'],"The federal government has been actively working to develop standards and guidance to address and prevent bias in automated systems, including those used in mortgage lending. These efforts are aimed at ensuring that automated systems do not unfairly disadvantage communities of color. Here are some key steps being taken: + +1. **Development of Standards and Guidance:** Federal agencies are creating and refining standards and guidelines to ensure that automated systems are fair, transparent, and accountable. These standards aim to mitigate bias and ensure equitable treatment for all applicants, regardless of race or ethnicity. + +2. **Fair Lending Laws:** Agencies such as the Consumer Financial Protection Bureau (CFPB) and the Department of Housing and Urban Development (HUD) enforce fair lending laws that prohibit discrimination based on race, color, national origin, religion, sex, familial status, or disability. These laws include the Equal Credit Opportunity Act (ECOA) and the Fair Housing Act (FHA). + +3. **Algorithmic Audits and Impact Assessments:** Conducting regular audits and impact assessments of mortgage lending algorithms can help identify and rectify biases that may disproportionately affect communities of color. This involves analyzing the data and outcomes produced by these systems to ensure they are fair and just. + +4. **Transparency and Accountability Requirements:** Federal agencies are advocating for greater transparency in how mortgage lending algorithms are designed and used. This includes requiring lenders to provide clear explanations of how their algorithms work and how decisions are made. + +5. **Collaboration and Research:** Federal agencies are collaborating with researchers, civil rights organizations, and industry stakeholders to study and address the impacts of algorithmic bias. This collaborative approach helps ensure that a wide range of perspectives and expertise inform the development of fair and equitable automated systems. + +By implementing these steps, the federal government aims to reduce algorithmic bias in mortgage lending and ensure that communities of color are treated fairly and have equal access to credit.","The federal government is working to combat discrimination in mortgage lending through initiatives such as the Department of Justice's nationwide initiative to combat redlining. This includes reviewing how lenders may be avoiding serving communities of color and conducting targeted marketing and advertising. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from agencies overseeing mortgage lending to include a nondiscrimination standard in proposed rules for Automated Valuation Models.",0.19230769230769232,0.9498155528513254,0.0,0.9999999999,0.9092470234145235 +What strategies can reduce bias in GAI while maintaining data accuracy?,[],"Reducing bias in General Artificial Intelligence (GAI) while maintaining data accuracy involves several strategies: + +1. **Diverse and Representative Data**: Ensure the training data is diverse and representative of all relevant groups. This helps the AI learn from a wide range of examples, reducing the risk of bias towards any specific group. + +2. **Preprocessing Techniques**: Implement preprocessing techniques such as data augmentation, re-sampling, or re-weighting to balance the dataset. This can help mitigate biases that stem from imbalanced data. + +3. **Bias Detection Tools**: Use bias detection tools to identify and quantify biases in the dataset and the AI models. Tools like Fairness Indicators or AI Fairness 360 can help in detecting and addressing biases. + +4. **Fair Algorithms**: Integrate fairness constraints and bias mitigation algorithms during the model training phase. Techniques like adversarial debiasing, re-weighting loss functions, or using fairness-aware machine learning algorithms can help in this regard. + +5. **Human-in-the-Loop**: Incorporate human oversight in the AI development process to identify and correct biases that automated systems might miss. Diverse teams can provide insights into potential biases and their mitigation. + +6. **Transparent Reporting**: Maintain transparency in the AI development process by documenting data sources, model choices, and decisions made during development. This can help in identifying and addressing biases and ensuring that the AI's decision-making process is understandable. + +7. **Ongoing Monitoring and Evaluation**: Continuously monitor and evaluate the AI system post-deployment to detect any emerging biases. This involves regular audits and updates to the model as new data becomes available. + +By implementing these strategies, it is possible to reduce bias in GAI while maintaining the accuracy and integrity of the data used.",The answer to given question is not present in context,0.0,0.9431887187906636,0.0,0.0,0.9233834974831243 +What strategies help manage IP risks in GAI while ensuring transparency?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.']","Managing intellectual property (IP) risks in General Artificial Intelligence (GAI) while ensuring transparency involves several strategies: + +1. **Documentation Practices**: Thorough logging, recording, and analyzing of GAI incidents can help manage IP risks. By maintaining detailed records, organizations can track the development and usage of AI systems, ensuring that IP concerns are addressed promptly and transparently. + +2. **Information Sharing**: Regular sharing of information with relevant AI actors can mitigate IP risks. This includes sharing change management records, version history, and metadata, which can help in tracing the origins and modifications of AI systems, making it easier to resolve IP disputes. + +3. **Change Management and Version Control**: Keeping meticulous change management records and version histories ensures that every modification is documented. This transparency helps in identifying the contribution of different parties and protecting their IP rights. + +4. **Metadata Management**: Proper management of metadata related to AI models and datasets can aid in maintaining transparency. Metadata provides detailed information about the data and models, which is essential for IP management and ensuring that all contributions are correctly attributed. + +By implementing these strategies, organizations can effectively manage IP risks while maintaining the transparency needed for responsible AI development and deployment.",The context does not provide specific strategies for managing IP risks in GAI while ensuring transparency.,0.26666666666666666,0.9195987241752617,1.0,0.0,0.9130638557738306 +How do automated customer service systems meet complex needs with human oversight?,[],"Automated customer service systems typically meet complex needs by combining advanced technologies with human oversight in a few key ways: + +1. **Escalation Protocols**: Automated systems handle routine inquiries and tasks, but they are programmed to recognize when an issue is too complex or falls outside of predefined parameters. In such cases, the system escalates the issue to a human agent who can provide more nuanced assistance. + +2. **AI and Machine Learning**: These systems use AI and machine learning to continually improve their understanding of customer inquiries. They can analyze large sets of data to identify patterns and improve their responses over time. However, when these systems encounter ambiguous or complex queries that they cannot resolve, human oversight ensures that the customer still receives accurate and appropriate support. + +3. **Human-in-the-Loop (HITL)**: This approach integrates human judgment into the automation process. For more complex tasks, human agents can review and override the automated responses to ensure accuracy and relevance. This helps in maintaining a high standard of service quality. + +4. **Regular Training and Updates**: Human supervisors and AI trainers regularly update the automated systems with new information, trends, and insights from human interactions. This continuous learning process helps the system to better handle complex needs in the future. + +5. **Feedback Mechanisms**: Automated customer service systems often include mechanisms for collecting customer feedback on their performance. This feedback is reviewed by human agents who can make necessary adjustments to the system, ensuring it meets customer needs more effectively over time. + +By leveraging these strategies, automated customer service systems can address a wide range of customer concerns while ensuring that complex issues are managed with the necessary human oversight.","Automated customer service systems meet complex needs with human oversight by integrating automated services such as chat-bots and AI-driven call response systems, which can escalate issues to a human support team when necessary. This allows companies to provide faster customer care while ensuring that human agents are available to handle complicated requests.",1.0,0.9695459514981483,0.0,0.0,0.4332676029107196 +How does diverse data help prevent algorithmic bias in automated systems?,[],"Diverse data helps prevent algorithmic bias in automated systems by ensuring that the models trained on this data can recognize and appropriately handle a wide range of scenarios and characteristics that exist in the real world. Here are some specific ways diverse data contributes to reducing bias: + +1. **Representative Sampling**: When data includes various demographic groups, it ensures that the model is exposed to different patterns and behaviors, reducing the risk of the model favoring one group over another. + +2. **Mitigating Underrepresentation**: Diverse data helps prevent the issue of underrepresentation of certain groups. If a particular group is underrepresented in the training data, the model might perform poorly for that group, leading to biased outcomes. + +3. **Improving Generalization**: Models trained on diverse datasets are better at generalizing their predictions to new, unseen data. This reduces the risk of making biased decisions based on limited or skewed data. + +4. **Identifying and Correcting Bias**: Diverse datasets can help identify existing biases in the data. By recognizing these biases during the training phase, steps can be taken to correct them, leading to fairer and more equitable models. + +5. **Preventing Overfitting**: Exposure to a variety of data points helps prevent overfitting, where a model performs well on training data but fails to generalize to new data. Overfitting can exacerbate biases if the training data is not representative. + +In summary, using diverse data in training automated systems ensures more comprehensive and fairer models, which are crucial for reducing algorithmic bias and improving the reliability and equity of AI systems.","Diverse data helps prevent algorithmic bias in automated systems by ensuring that any data used in system development or assessment is representative of local communities based on the planned deployment setting. This data should be reviewed for bias considering the historical and societal context, and it should be sufficiently robust to identify and mitigate biases and potential harms.",1.0,1.000000000000001,0.0,0.0,0.38703773265199304 +What factors should be considered for assessing GAI systems' socio-cultural impacts and data integrity?,"['ā€¢ Data provenance' + 'a description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted, a summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","When assessing the socio-cultural impacts and data integrity of General AI (GAI) systems, the following factors should be considered: + +**Socio-Cultural Impacts:** + +1. **Bias and Fairness:** + - Evaluate the system for biases that might affect different demographic groups unequally. + - Implement fairness assessments to ensure equitable treatment across all segments of society. + +2. **Public Perception and Trust:** + - Conduct public consultations to gather concerns and feedback from diverse stakeholders. + - Transparency in how decisions are made and how data is used to foster trust. + +3. **Ethical Considerations:** + - Ensure the AI aligns with societal norms and ethical standards. + - Address issues related to privacy, autonomy, and the potential for misuse. + +4. **Impact on Employment and Economy:** + - Assess how the deployment of GAI may affect job markets and economic stability. + - Consider strategies for workforce reskilling and transitioning. + +5. **Cultural Sensitivity:** + - Ensure the AI respects and understands cultural nuances and diversity. + - Avoid cultural appropriation or insensitivity in AI behaviors and outputs. + +**Data Integrity:** + +1. **Data Provenance:** + - Maintain a comprehensive description of the data sources used for training. + - Document how data was processed and interpreted, including any transformations applied. + +2. **Data Quality:** + - Identify and address any missing, incomplete, or erroneous data. + - Justify the relevancy of the data used in the context of the AI's intended application. + +3. **Risk Identification and Management:** + - Conduct risk assessments to identify potential harms arising from data usage. + - Implement steps to mitigate these risks, such as data anonymization or bias correction techniques. + +4. **Performance Testing and Monitoring:** + - Regularly test the AI system's performance, including accuracy and error rates. + - Monitor differential impacts on various demographic groups to ensure fairness. + - Compare performance with previously deployed systems to identify improvements or regressions. + +5. **Ongoing Monitoring and Reporting:** + - Establish ongoing procedures for performance monitoring. + - Generate regular reports on monitoring results and any actions taken in response to issues identified. + +By carefully considering these factors, developers and stakeholders can better understand and mitigate the potential socio-cultural impacts and ensure the integrity of the data used in GAI systems.","Factors to consider for assessing GAI systems' socio-cultural impacts include assumptions and limitations, direct value to the organization, intended operational environment, observed usage patterns, potential positive and negative impacts to individuals and communities, and social norms and expectations. For data integrity, factors include evaluating the quality and integrity of data used in training, the provenance of AI-generated content, and ensuring that data or benchmarks used in risk measurement are representative of diverse in-context user populations.",0.5652173913043478,0.9460105832293703,0.0,0.49999999995,0.8534320221867975 +"What risks come from human use of generative AI, both emotionally and socially?","['Conversely, due to the complexity and increasing reliability of GAI technology, over time, humans may over-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that produced by other sources. This phenomenon is an example of automation bias, or excessive deference to automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation or risks of bias or homogenization. There may also be concerns about emotional entanglement between humans and GAI systems, which could lead to negative psychological impacts.']","Human use of generative AI (GAI) can pose several risks both emotionally and socially: + +1. **Automation Bias:** As humans increasingly rely on GAI systems, there is a risk of automation bias, where people may unjustifiably perceive GAI content as being of higher quality or more reliable than that produced by other sources. This can lead to over-reliance on these systems and a lack of critical evaluation of the information they provide. + +2. **Confabulation:** GAI systems can sometimes generate incorrect or misleading information. If users overly trust these systems, they might accept and act on false information, leading to potentially harmful decisions. + +3. **Bias and Homogenization:** GAI systems can perpetuate and exacerbate existing biases if they are trained on biased datasets. This can result in the homogenization of content, where diverse perspectives are underrepresented or ignored, reinforcing stereotypes and limiting the richness of discourse. + +4. **Emotional Entanglement:** There is a concern about emotional entanglement between humans and GAI systems. Users might form emotional attachments to these systems, which could lead to negative psychological impacts, such as increased loneliness or dependency on non-human interactions for emotional support. + +These risks highlight the importance of critical engagement with GAI systems and the need for awareness of their limitations and potential biases.","The risks that come from human use of generative AI (GAI) include immediate and prolonged emotional harm, potential risks to physical safety due to the distribution of harmful deepfake images, and the long-term effect of disinformation on societal trust in public institutions.",1.0,0.9690994182098133,0.3333333333333333,0.9999999999,0.303451799456877 +"What problems does AI nudification tech address, and how do they connect to wider concerns about automated harm?",[],"AI nudification technology primarily addresses the issue of transforming clothed images into nude ones, often without the consent of the individuals depicted. This type of technology can lead to significant privacy violations and emotional distress for the victims. Here are some key problems it addresses and how they connect to wider concerns about automated harm: + +1. **Privacy Invasion:** AI nudification can be used to create non-consensual, explicit images, invading the privacy of individuals. This is a direct harm to personal dignity and can have serious emotional and social consequences. + +2. **Harassment and Exploitation:** Such technology can be weaponized for harassment, blackmail, and exploitation. It can be used to create fake explicit content for the purpose of bullying or coercing individuals. + +3. **Misinformation and Defamation:** AI nudification can contribute to the spread of misinformation. Fake nude images can be used to defame or discredit individuals, leading to reputational damage and potentially severe personal and professional consequences. + +4. **Impact on Trust in Media:** The ability to easily manipulate images can undermine trust in visual media. If people cannot trust that images they see are real, it can erode trust in media sources and information in general. + +These problems connect to wider concerns about automated harm in several ways: + +- **Ethical Use of AI:** The potential for harm with AI nudification tech highlights the need for ethical guidelines and regulations around AI development and deployment. Ensuring AI is used responsibly is a major concern in the field of AI safety. + +- **Consent and Control:** Automated technologies that can manipulate personal data without consent raise important questions about control and autonomy. Protecting individuals' rights to control their own image and data is crucial. + +- **Societal Impact:** The broader societal impact of such technologies includes exacerbating issues like cyberbullying, sexual harassment, and digital abuse. It calls for a societal response in terms of legal frameworks and protective measures. + +- **Trust and Security:** Ensuring that AI technologies do not undermine trust in digital interactions is essential. This includes developing technologies that can detect and prevent misuse of AI for harmful purposes. + +Addressing these problems requires a multifaceted approach, including technological safeguards, legal regulations, ethical standards, and public awareness to mitigate the risks associated with AI nudification and similar technologies.","AI nudification technology addresses the problem of creating non-consensual intimate images that can lead to image-based abuse, particularly impacting women. This technology raises wider concerns about automated harm as it exemplifies how advanced tools can be misused, leading to devastating effects on victims' personal and professional lives, as well as their mental and physical health. Additionally, the reliance on automated systems can result in unintended consequences, such as incorrect penalization of drivers or biased decision-making based on flawed historical data, highlighting the need for safeguards and ethical reviews in technology deployment.",1.0,0.9459072862930135,0.0,0.0,0.4956264919112619 +"What strategies ensure safe, fair automated systems for underserved communities?",[],"Ensuring safe and fair automated systems for underserved communities involves several strategies: + +1. **Inclusive Design and Development**: + - Engage community members and stakeholders in the design and development process to understand their needs and challenges. + - Ensure diverse representation in the development team to avoid biases and blind spots. + +2. **Bias Mitigation**: + - Use diverse and representative data sets to train AI models. + - Implement bias detection and mitigation techniques throughout the development process. + - Regularly audit AI systems for biases and unfair outcomes. + +3. **Transparency and Accountability**: + - Make AI systems and their decision-making processes transparent to users. + - Establish clear accountability mechanisms for decisions made by automated systems. + - Provide users with clear explanations of how decisions are made and the factors considered. + +4. **Community Engagement and Education**: + - Educate community members about AI technologies and their potential impacts. + - Facilitate ongoing dialogue between developers and the community to address concerns and gather feedback. + +5. **Ethical Frameworks and Standards**: + - Adopt ethical guidelines and standards for AI development and deployment. + - Ensure adherence to legal and regulatory requirements related to fairness, non-discrimination, and data protection. + +6. **Continuous Monitoring and Evaluation**: + - Implement continuous monitoring of AI systems to ensure they operate as intended and do not introduce new biases. + - Conduct regular impact assessments to evaluate the effects of AI systems on underserved communities. + +7. **Accessibility and Usability**: + - Design AI systems to be accessible and user-friendly for all community members, regardless of their technical proficiency. + - Provide support and resources to help users understand and effectively interact with AI systems. + +By integrating these strategies, developers can create AI systems that are not only safe and fair but also beneficial and empowering for underserved communities.","Strategies to ensure safe and fair automated systems for underserved communities include conducting proactive equity assessments during the design phase, using representative and robust data, guarding against proxies that may lead to algorithmic discrimination, and implementing ongoing monitoring and evaluation to confirm protections against algorithmic discrimination. These strategies aim to identify potential discrimination and effects on equity, ensuring that the systems are designed and deployed in an equitable manner.",1.0,0.9926039562040477,0.0,0.0,0.886277705253313 +What's the role of oversight and feedback in managing GAI risks and communicating their societal effects?,[],"Oversight and feedback play crucial roles in managing the risks associated with General Artificial Intelligence (GAI) and in effectively communicating their societal effects: + +1. **Risk Management**: + - **Continuous Monitoring**: Regular oversight helps in continuously monitoring the development and deployment of GAI systems to ensure they align with established safety protocols and ethical guidelines. + - **Early Detection**: Feedback mechanisms can help in the early detection of unintended behaviors or potential risks by collecting data from various stakeholders, including users, developers, and external auditors. + - **Adaptive Strategies**: Incorporating feedback allows for adaptive risk management strategies. As new risks are identified, systems and protocols can be updated to mitigate these risks effectively. + - **Accountability**: Oversight ensures that there is accountability in the development and deployment of GAI systems. It helps in assigning responsibility and ensuring that any negative consequences are addressed promptly. + +2. **Communication of Societal Effects**: + - **Transparency**: Effective oversight ensures transparency in how GAI systems are developed and used. This transparency is crucial for gaining public trust and for making informed decisions about the adoption of such technologies. + - **Public Engagement**: Feedback from the public and other stakeholders can be used to understand societal concerns and expectations. This engagement is essential for aligning GAI systems with societal values and needs. + - **Policy Development**: Feedback helps policymakers understand the societal impacts of GAI, which in turn aids in the development of regulations and policies that promote beneficial uses while mitigating risks. + - **Education and Awareness**: Communicating the feedback and oversight findings to the public helps in increasing awareness about the potential benefits and risks of GAI. This can lead to more informed public discourse and better preparedness for societal changes brought about by GAI. + +In summary, oversight and feedback mechanisms are integral to ensuring that GAI systems are safe, accountable, and aligned with societal values, while also facilitating transparent and informed communication about their impacts.","Oversight and feedback play a crucial role in managing GAI risks by ensuring that organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from external sources regarding the potential individual and societal impacts related to AI risks. This includes establishing oversight functions across the GAI lifecycle and documenting the risks and potential impacts of the AI technology, which facilitates broader communication about these impacts.",0.0,0.9079664300146382,0.0,0.0,0.7629150913969963 +Which framework aims to boost AI trustworthiness while upholding civil rights and privacy laws?,[],The AI Risk Management Framework (AI RMF) aims to boost AI trustworthiness while upholding civil rights and privacy laws. This framework was developed by the U.S. National Institute of Standards and Technology (NIST) and is designed to manage the risks associated with AI systems effectively. The AI RMF provides a structured approach to enhance the reliability and trustworthiness of AI technologies while ensuring compliance with relevant legal and ethical standards.,The NIST AI Risk Management Framework aims to boost AI trustworthiness while upholding civil rights and privacy laws.,0.0,0.8295859256408605,0.0,0.0,0.48449581876453596 +What factors ensure effective oversight in automated systems for critical fields like justice and healthcare?,"['ā€¢ Auditing and assessment' + 'receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.']","Effective oversight in automated systems for critical fields like justice and healthcare can be ensured through several key factors: + +1. **Community Input**: Engaging the community in the design and use of these technologies helps ensure that the systems are meeting the needs and values of those they serve. + +2. **Public Reporting**: Transparent reporting on the critical elements of these systems helps build trust and allows for public scrutiny. + +3. **Notice and Consent Procedures**: Ensuring that privacy protocols are based on the context and use case, and implementing procedures that allow individuals to understand and consent to the use of these systems, is crucial. + +4. **Opt-Out Options**: Providing the ability for individuals to opt-out of using automated systems and revert to human processes ensures that those who are uncomfortable with or distrustful of the technology are not forced into its use. + +5. **Explanations of Decisions**: Providing clear explanations of how decisions are made by these systems helps individuals understand and trust the outcomes. + +6. **Governance Training**: Training those who use and oversee these systems ensures that they are used appropriately and ethically. + +7. **Goal-Related Use Cases**: Ensuring that the technological use cases are genuinely related to the goal task and are locally validated to work ensures that the systems are effective and relevant. + +8. **Third-Party Audits**: Instituting and protecting third-party audits ensures ongoing accountability and validity of the systems. + +By incorporating these factors, oversight can be effectively maintained, ensuring that automated systems in critical fields like justice and healthcare are used responsibly and ethically.","Effective oversight in automated systems for critical fields like justice and healthcare is ensured by tailoring the systems to their intended purpose, providing meaningful access for oversight, including training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions. Additionally, reporting on human governance processes and assessing their timeliness, accessibility, outcomes, and effectiveness should be made public whenever possible.",0.5294117647058824,0.991484029532184,0.5,0.49999999995,0.35948820747078203 +"What impact do automated systems have on rights, and how are transparency needs met by current laws?","[""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed."" + 'Among the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent.']","Automated systems can have significant impacts on individual rights and freedoms, presenting both opportunities and challenges. Here are some key points based on the provided context: + +### Impact on Rights: +1. **Limiting Opportunities:** + - Automated systems can restrict access to critical resources or services, often perpetuating existing inequities or introducing new forms of bias and discrimination. For example, algorithms used in hiring and credit decisions may reflect and reproduce societal biases, limiting opportunities for certain groups. + +2. **Threat to Privacy:** + - Unchecked data collection by social media and other platforms can undermine privacy, tracking individuals' activities often without their knowledge or consent. This pervasive surveillance can have chilling effects on communities, influencing how people behave and express themselves. + +3. **Community Impact:** + - The use of automated systems can send negative signals to community members about how they are viewed, potentially leading to social alienation. Moreover, the lack of individualization in these systems can work against the belief that people can change for the better, impacting their social and economic mobility. + +4. **Loss of Jobs and Custody:** + - Automated systems used in employment and social services can contribute to job losses and affect custody decisions, with severe personal and social consequences. + +### Transparency Needs and Current Laws: +The context does not provide specific details about how current laws meet the transparency needs for automated systems. However, it is implied that there are significant concerns regarding the transparency and accountability of these systems. Here are some general considerations: + +1. **Transparency Requirements:** + - Effective transparency would require that individuals and communities understand how automated decisions are made, what data is being used, and how potential biases are being addressed. This includes making algorithms and decision-making processes accessible and understandable to the public. + +2. **Legal Frameworks:** + - Current laws may address some aspects of transparency and accountability, but there are often gaps. For example, there may be requirements for data protection and privacy, but they might not fully cover the need for algorithmic transparency or the ability for individuals to challenge automated decisions. + +3. **Ethical and Regulatory Standards:** + - There is a growing call for robust ethical and regulatory standards to ensure that automated systems are used fairly and transparently. This includes developing guidelines for the ethical use of AI, regular audits of algorithms, and mechanisms for individuals to seek redress if adversely affected by automated decisions. + +Overall, while automated systems offer significant benefits, their use must be carefully regulated to protect individual rights and ensure transparency. Current laws may provide a starting point, but there is a need for more comprehensive frameworks to address the challenges posed by these technologies.","Automated systems significantly impact rights by determining opportunities in various areas such as employment and credit, often without individuals being aware of the algorithms influencing decisions. Current laws, such as the Biometric Information Privacy Act in Illinois, require written notice when biometric information is used, and federal laws like the Fair Credit Reporting Act mandate that consumers receive adverse action notices when credit is denied. These laws aim to ensure transparency and provide individuals with the knowledge necessary to contest decisions made by automated systems.",0.8620689655172413,0.941017375049192,0.3333333333333333,0.99999999995,0.33795046623191566 +What criteria ensure human subject protection in AI evaluations?,"['>>>\nWhen implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.\n>>>']","To ensure human subject protection in AI evaluations, organizations should adhere to several key criteria, including: + +1. **Informed Consent**: Participants should be fully informed about the nature of the study, its purpose, procedures, risks, and benefits. They should voluntarily agree to participate without any form of coercion. + +2. **Subject Compensation**: Participants should be fairly compensated for their time and involvement in the study, ensuring that compensation is not so high as to be coercive. + +3. **Privacy and Confidentiality**: Participants' data should be collected, stored, and processed in a manner that ensures their privacy and confidentiality. This includes anonymizing data where possible and implementing robust data security measures. + +4. **Minimization of Risk**: The study should be designed to minimize potential risks to participants. This includes assessing and mitigating any physical, psychological, or social risks that may arise from participation. + +5. **Ethical Approval**: The study should be reviewed and approved by an Institutional Review Board (IRB) or an equivalent ethics committee to ensure that it meets ethical standards and regulatory requirements. + +6. **Transparency and Accountability**: The organization conducting the study should be transparent about the research process and be accountable for upholding ethical standards throughout the study. + +By following these criteria, organizations can help protect the rights and well-being of human subjects involved in AI evaluations.","Human subject protection in AI evaluations is ensured through several criteria, including: 1) evaluations involving human subjects must meet applicable requirements and be representative of the relevant population; 2) options must be provided for human subjects to withdraw participation or revoke consent for the use of their data; 3) techniques such as anonymization and differential privacy should be used to minimize risks associated with linking AI-generated content back to individual human subjects; 4) documentation of how content provenance data is tracked and how it interacts with privacy and security is necessary, including the removal of personally identifiable information (PII).",0.7777777777777778,0.9740153250201121,0.0,0.9999999999,0.2354870955126728 +"How does data provenance support ethical AI governance, especially for human protection and bias?","['a description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted, a summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications;']","Data provenance, which refers to the detailed history of the data used to train machine learning models, plays a crucial role in supporting ethical AI governance, particularly in terms of human protection and bias mitigation. Hereā€™s how: + +1. **Transparency and Accountability**: By documenting the origins and transformations of data, data provenance ensures transparency in AI development. This transparency allows stakeholders to trace back decisions to their data sources, holding developers accountable for the quality and ethical implications of the data used. + +2. **Bias Detection and Mitigation**: Understanding the provenance of data helps in identifying potential biases embedded in the datasets. If certain demographic groups are underrepresented or misrepresented in the data, it can lead to biased outcomes. Provenance information allows developers to detect these issues early and take corrective actions, such as rebalancing the dataset or applying fairness-adjusting techniques. + +3. **Data Quality and Integrity**: Provenance provides insights into how data was collected, processed, and interpreted. This helps in ensuring the quality and integrity of the data, which is essential for building reliable and trustworthy AI systems. High-quality data reduces the risk of erroneous or harmful AI behaviors. + +4. **Compliance with Ethical Standards and Regulations**: Many ethical guidelines and regulations require documentation of data sources and processing methods. Data provenance helps organizations comply with these requirements, ensuring that AI systems are developed and deployed in line with legal and ethical standards. + +5. **Informed Consent and Privacy Protection**: Provenance data can include information about how consent was obtained from individuals whose data is being used. This ensures that AI development respects individuals' privacy and consent, which is a fundamental aspect of ethical AI. + +Overall, data provenance is a foundational element in building ethical AI systems that protect human rights and minimize biases, ultimately fostering trust and ensuring the responsible use of AI technologies.","The context does not explicitly mention how data provenance supports ethical AI governance, particularly regarding human protection and bias.",0.0,0.9597059766658592,0.0,0.0,0.4481204269861282 +"What factors on data privacy and content integrity should be considered for a GAI system, especially regarding user feedback and transparency?","['receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.']","When considering data privacy and content integrity for a General AI (GAI) system, especially in the context of user feedback and transparency, several factors should be taken into account: + +1. **Community Input and Transparency**: + - **Receiving Community Input**: Actively involve the community in the design and use of the GAI system. This ensures that the system aligns with user expectations and societal norms. + - **Public Reporting**: Regularly report on crucial elements of the system, including data handling practices, decision-making processes, and updates. This fosters trust and accountability. + +2. **Data Privacy**: + - **Notice and Consent Procedures**: Implement robust notice and consent procedures that are clear and context-specific. Users should understand what data is being collected, how it will be used, and have the ability to consent or decline. + - **Opt-Out Options**: Provide users with the ability to opt-out of using the GAI system and offer a fallback to a human process. This respects user autonomy and privacy preferences. + +3. **Explanations and Transparency**: + - **Providing Explanations**: Offer clear explanations of the decisions made by the GAI system and how it operates. This helps users understand the reasoning behind automated decisions and reduces the opacity of AI processes. + +4. **Governance and Accountability**: + - **Training and Governance**: Ensure that there is proper governance in place, including training for those using and managing the GAI system. This includes understanding data privacy laws and ethical guidelines. + - **Third-Party Audits**: Allow for third-party audits to ensure the GAI system remains accountable and valid. Audits can verify that the system adheres to privacy standards and maintains content integrity. + +5. **Technological and Use Case Validation**: + - **Local Validation**: Ensure that the GAI systemā€™s use cases are genuinely related to the goal task and have been locally validated to work effectively. This minimizes the risk of misuse and unintended consequences. + +By addressing these factors, a GAI system can better handle data privacy concerns and maintain content integrity, thereby fostering a trustworthy and transparent relationship with its users.","Factors on data privacy and content integrity for a GAI system include documenting the extent to which human domain knowledge is employed to improve GAI system performance, reviewing and verifying sources and citations in GAI system outputs, tracking instances of anthropomorphization in GAI system interfaces, verifying GAI system training data and TEVV data provenance, and regularly reviewing security and safety guardrails. Additionally, structured feedback about content provenance should be recorded and integrated from operators, users, and impacted communities, and there should be an emphasis on digital content transparency regarding the societal impacts of AI and the role of diverse and inclusive content generation.",0.5357142857142857,0.9266504397900003,0.14285714285714285,0.9999999999,0.8491766528293083 +What challenges did panelists see at the tech-healthcare equity intersection?,"['delivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","Panelists identified several challenges at the intersection of technology and healthcare equity: + +1. **Access and Expense of Broadband Service**: The lack of affordable and accessible broadband service can limit access to telehealth systems. + +2. **Privacy Concerns**: There are significant privacy concerns associated with telehealth systems that need to be addressed. + +3. **Cost of Health Monitoring Devices**: The high expense of health monitoring devices can exacerbate equity issues, making it difficult for underserved populations to benefit from these technologies. + +4. **Racial Biases in Medicine**: The use of race in medical practices can perpetuate harms and embed prior discrimination, raising concerns about the accountability of these technologies to relevant stakeholders. + +5. **Stakeholder Accountability**: There is a need to ensure that technologies used in medical care are accountable to the relevant stakeholders, particularly those subjected to these technologies. + +6. **Voice and Representation**: It is important to hear the voices of those subjected to these technologies to ensure that their needs and concerns are addressed. + +These challenges highlight the need for careful consideration and inclusive practices to ensure that technology enhances, rather than hinders, equity in healthcare.","Panelists identified several challenges at the tech-healthcare equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense of health monitoring devices, which can exacerbate equity issues. Additionally, they discussed how racial biases and the use of race in medicine perpetuate harms and embed prior discrimination, emphasizing the need for accountability of the technologies used in medical care and the importance of hearing the voices of those subjected to these technologies.",0.7142857142857143,0.9522629898050273,1.0,0.99999999995,0.9109156066636536 +What strategies can help reduce IP and privacy risks in AI training data?,[],"To reduce IP (intellectual property) and privacy risks in AI training data, several strategies can be employed: + +1. **Data Anonymization:** Remove or alter personal identifiers in the data to protect individual privacy. Techniques such as k-anonymity, l-diversity, and differential privacy can help ensure that individuals cannot be re-identified. + +2. **Data Minimization:** Collect and use only the data that is strictly necessary for the purpose of training the AI model. This reduces the exposure of sensitive information. + +3. **Secure Data Storage and Transmission:** Encrypt data both at rest and in transit to protect it from unauthorized access. Implement robust access controls and audit logs to monitor who accesses the data. + +4. **Synthetic Data:** Use synthetic data generated from models that mimic the statistical properties of the real data without exposing actual sensitive information. This can help in reducing privacy risks. + +5. **Federated Learning:** Train models across multiple decentralized devices or servers holding local data samples, without exchanging them. This keeps the data localized and reduces the risk of data breaches. + +6. **Access Control and Data Governance:** Implement strict access control policies to ensure that only authorized personnel can access sensitive data. Regularly review and update these policies. + +7. **Regular Audits and Compliance Checks:** Conduct regular audits to ensure compliance with data protection regulations and to identify potential vulnerabilities in data handling practices. + +8. **Legal Agreements and Compliance:** Ensure that all data usage complies with relevant laws and regulations such as GDPR, CCPA, and others. Use legal agreements to formalize data sharing and usage terms with third parties. + +9. **Data Deletion Policies:** Implement clear data deletion policies to ensure that data is not retained longer than necessary and is securely disposed of when no longer needed. + +By implementing these strategies, organizations can significantly mitigate the IP and privacy risks associated with AI training data.","Strategies to reduce IP and privacy risks in AI training data include conducting periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, documenting training data curation policies, establishing policies for collection and retention of data, and conducting appropriate diligence on training data use to assess intellectual property and privacy risks.",1.0,0.9967141874323785,0.0,0.0,0.2809495664037599 +"What goals does the NIST AI Risk Management Framework pursue for safe, equitable AI, especially in transparency and ethics?","['The \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems.' + 'About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet.']","The NIST AI Risk Management Framework pursues several goals to ensure the development and deployment of safe and equitable AI systems. Key goals include: + +1. **Transparency**: Ensuring that AI systems are clear and understandable to users and stakeholders. This includes making the decision-making processes and data sources used by AI systems more visible and interpretable. + +2. **Accountability**: Establishing mechanisms to hold AI developers and deployers responsible for the impacts of their systems. This involves creating processes for monitoring, evaluating, and addressing any negative consequences that may arise from the use of AI. + +3. **Fairness**: Promoting equitable treatment and outcomes by minimizing biases and ensuring that AI systems do not unfairly disadvantage any group. This includes careful consideration of data used in training AI models to avoid perpetuating existing biases. + +These principles are integrated into every stage of the AI lifecycle, including pre-design, design and development, deployment, use, and testing and evaluation. By adhering to these principles, the NIST framework aims to advance AI technologies that are reliable, safe, transparent, explainable, privacy-enhanced, and fair, ultimately maximizing their benefits while minimizing harm to people and the environment.","The NIST AI Risk Management Framework aims to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) to realize its full commercial and societal benefits without harm to people or the planet. It also supports the development of safe, secure, and trustworthy AI, emphasizing transparency and ethical considerations in its implementation.",0.47619047619047616,0.9310630341390488,0.5,0.99999999995,0.6021394304244616 +How do real-time auditing tools help with AI content authenticity and system monitoring?,[],"Real-time auditing tools play a crucial role in ensuring AI content authenticity and effective system monitoring. Here's how they help: + +1. **Content Authenticity**: + - **Verification**: Real-time auditing tools can verify the authenticity of content generated by AI systems by cross-referencing it with trusted sources and databases. This helps in detecting and mitigating the spread of misinformation or fake content. + - **Traceability**: These tools can trace the origin and modifications of content, ensuring that any changes are logged and can be reviewed. This is important for maintaining the integrity of the content and ensuring it has not been tampered with. + +2. **System Monitoring**: + - **Anomaly Detection**: Real-time auditing tools continuously monitor the AI system for unusual activities or anomalies that could indicate potential security threats or malfunctions. This helps in early detection and response to issues that could compromise the system's performance or safety. + - **Compliance**: They ensure that the AI system adheres to regulatory and compliance requirements by monitoring its operations and generating reports that can be reviewed by auditors. This helps in maintaining transparency and accountability. + - **Performance Metrics**: These tools track various performance metrics of the AI system in real-time, helping in identifying areas that need improvement and ensuring the system is operating efficiently. + +In summary, real-time auditing tools enhance the reliability, security, and accountability of AI systems by providing continuous monitoring, verification, and traceability of both content and system operations.","Real-time auditing tools aid in the tracking and validation of the lineage and authenticity of AI-generated data, which is essential for ensuring the integrity and reliability of the content produced by AI systems.",0.0,0.9860748598239605,0.0,0.0,0.5875770183226401 +What key processes and stakeholder interactions ensure automated systems' safety and effectiveness?,"['ā€¢ Auditing and assessment' + 'the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.' + 'Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.' + 'Some companies have put in place safeguards that have prevented harm from occurring by ensuring that key development decisions are vetted by an ethics review; others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad tion processes that may be applied when considering the use of new automated systems, and existing product develop\xad ment and testing practices already protect the American public from many potential harms.']","Ensuring the safety and effectiveness of automated systems involves several key processes and stakeholder interactions: + +1. **Public Consultation and Community Input:** + - **Receiving Community Input:** Engaging with the community to gather input on the design and use of technologies. + - **Public Reporting:** Providing transparency by publicly reporting on crucial elements of these systems. + - **Notice and Consent Procedures:** Ensuring privacy through better notice and consent procedures based on context and use case. + - **Opt-Out Options:** Allowing individuals to opt-out of using these systems and offering a fallback to a human process. + +2. **Auditing and Assessment:** + - **Ethics Review:** Key development decisions should be vetted by an ethics review to prevent harm. + - **Risk Identification and Management:** Identifying potential risks and taking steps to mitigate them. + - **Performance Testing:** Conducting thorough performance testing, including accuracy checks, differential demographic impact analysis, and error rate evaluations. + - **Ongoing Monitoring:** Establishing ongoing monitoring procedures and regular performance testing reports to ensure continued effectiveness and safety. + +3. **Independent Evaluation and Reporting:** + - **External Audits:** Instituting third-party audits to ensure systems remain accountable and valid. + - **Public Availability of Results:** Making the results of independent evaluations and steps taken to mitigate potential harms publicly available whenever possible. + +4. **Governance and Training:** + - **Training:** Providing training for those using these systems to ensure they understand how to operate them safely and effectively. + - **Local Validation:** Ensuring that technological use cases are genuinely related to the goal task and are locally validated to work. + +These processes and interactions collectively contribute to the development, deployment, and maintenance of safe and effective automated systems.","Key processes and stakeholder interactions that ensure automated systems' safety and effectiveness include ongoing monitoring procedures, clear organizational oversight, consultation with the public during various phases of development, extensive testing before deployment, and proactive risk identification and mitigation. These processes involve continuous evaluation of performance metrics, involvement of organizational stakeholders, engagement with diverse impacted communities, and adherence to domain-specific best practices for testing.",0.9444444444444444,0.9575967573781989,0.6666666666666666,0.6791666666496875,0.5039574403723173 +What are the effects of bias and uniformity in GAI on data accuracy and user feedback?,[],"Bias and uniformity in General Artificial Intelligence (GAI) can have significant effects on data accuracy and user feedback: + +1. **Data Accuracy**: + - **Reduced Accuracy**: If a GAI system is biased, it may produce outputs that reflect those biases, leading to inaccurate or skewed data. For example, if the training data of the GAI contains biases against certain groups, the system might generate results that unfairly disadvantage those groups. + - **Loss of Nuance**: Uniformity in GAI can lead to a lack of diversity in the system's responses or predictions. This can result in oversimplified or generalized data that does not accurately capture the complexity of real-world scenarios. + - **Systematic Errors**: Biases in GAI can create systematic errors where certain types of mistakes are consistently made. This can degrade the overall quality and reliability of the data produced by the GAI. + +2. **User Feedback**: + - **Trust and Credibility**: Users are likely to lose trust in a GAI system that consistently produces biased results. If users perceive the system as unfair or discriminatory, they may be less likely to use it or provide honest feedback. + - **Diverse Perspectives**: A lack of diversity in GAI outputs can alienate users with different backgrounds or perspectives. This can lead to a feedback loop where only a narrow range of user experiences are considered, further entrenching bias. + - **Quality of Feedback**: If users recognize that the GAI is biased, they might provide feedback that is aimed at correcting perceived biases rather than honest input about their experiences or needs. This can distort the feedback loop and make it harder to improve the system. + +Overall, addressing bias and promoting diversity in GAI is crucial for maintaining data accuracy and fostering positive, constructive user feedback.","The effects of bias and uniformity in GAI on data accuracy and user feedback are related to harmful bias and homogenization, which can compromise the representativeness and relevance of data used in AI systems. This can lead to inaccuracies in the information generated and may affect the quality of user feedback, as it may not accurately reflect diverse perspectives or experiences.",1.0,0.9542159162244933,0.0,0.0,0.8963312213927193 +Which NSF projects align with federal ethics for automated systems?,[],"I don't have specific information on NSF projects that align with federal ethics for automated systems. However, the National Science Foundation (NSF) does fund a variety of projects that focus on ethical AI and automated systems, often ensuring they align with federal guidelines and ethical standards. These projects typically emphasize transparency, fairness, accountability, and the mitigation of bias in AI systems. + +For detailed information on specific NSF projects, you may want to visit the NSF website or look through their publicly available project databases and reports. They often provide summaries and outcomes of funded projects that include their alignment with ethical standards.","The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles, including the National AI Research Institutes, the Cyber Physical Systems program, the Secure and Trustworthy Cyberspace program, the Formal Methods in the Field program, and the Designing Accountable Software Systems program.",0.0,0.0,0.0,0.0,0.7465460702739077 +What concerns do panelists raise about AI in criminal justice and its effects on communities and democracy?,"[""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","Panelists raise several concerns regarding the use of AI in criminal justice and its effects on communities and democracy: + +1. **Validity of AI Systems**: There are doubts about the accuracy and reliability of these systems, which may lead to unjust outcomes. + +2. **Bias and Data Issues**: Adverse or irrelevant data can lead to the replication of existing biases and unjust outcomes, impacting fairness in decision-making. + +3. **Confirmation Bias and Over-Reliance**: People may tend to defer to potentially inaccurate automated systems, leading to confirmation bias and reduced critical thinking. + +4. **Lack of Individualization**: AI systems often lack the ability to consider individual circumstances, which goes against the belief that people can change for the better. + +5. **Severe Impact on Individuals**: The use of AI can result in significant personal consequences, such as job loss and loss of custody of children. + +6. **Surveillance and Chilling Effects**: Increased surveillance can create a chilling effect, discouraging free expression and participation in community activities. It also sends negative signals to community members about how they are perceived by authorities. + +These concerns highlight the potential for AI systems to adversely affect both individual lives and the broader democratic principles of fairness, privacy, and community trust.","Panelists raised concerns about the validity of AI systems used in the criminal justice system, noting that adverse or irrelevant data can lead to a replication of unjust outcomes. They highlighted issues such as confirmation bias and the tendency to defer to potentially inaccurate automated systems. The impact of these systems on individuals and communities is seen as potentially severe, with concerns that they lack individualization, work against the belief that people can change for the better, and can lead to job loss and custody issues. Additionally, surveillance can create chilling effects for communities and send negative signals about how they are viewed. Panelists emphasized that while transparency is important, it is not sufficient for achieving accountability, and they discussed the need for regulation that includes limits on the type and cost of such technologies.",0.9,0.9695053228263691,0.8,0.9999999999,0.8895698576285149 +"What must lenders disclose to consumers about adverse actions from automated decisions, and how does this tie into the need for transparency in algorithms affecting rights?","['Lenders are required by federal law to notify consumers about certain decisions made about \nthem. Both the Fair Credit Reporting Act and the Equal Credit Opportunity Act require in certain circumstances \nthat consumers who are denied credit receive ""adverse action"" notices. Anyone who relies on the information in a \ncredit report to deny a consumer credit must, under the Fair Credit Reporting Act, provide an ""adverse action""']","Lenders are required by federal law to notify consumers about adverse actions taken based on automated decisions. Specifically, under the Fair Credit Reporting Act (FCRA) and the Equal Credit Opportunity Act (ECOA), lenders must provide ""adverse action"" notices to consumers if they are denied credit based on information in a credit report. + +These adverse action notices must include: +1. The specific reasons for the denial or other adverse action. +2. The credit reporting agency that provided the information used in the decision. +3. An explanation of the consumer's right to obtain a free copy of their credit report from the reporting agency. +4. A disclosure of the consumer's right to dispute the accuracy or completeness of any information in the report. + +This requirement for transparency ensures that consumers are informed about the basis for decisions that significantly affect their rights and opportunities. It also allows consumers to understand and potentially challenge the data or decisions made by automated systems. This transparency is crucial in maintaining fairness and accountability in the use of algorithms, as it helps prevent discriminatory practices and ensures that consumers can take steps to correct any errors that may have impacted the automated decision-making process.","Lenders are required by federal law to notify consumers about certain decisions made about them, specifically through ""adverse action"" notices. This includes providing notice of the reasons a creditor took adverse action on a credit application or existing credit account. This requirement ties into the need for transparency in algorithms affecting rights, as it ensures that consumers are informed about the automated decisions impacting their credit, allowing them to understand and contest those decisions.",0.16666666666666666,0.9385415112961075,0.3333333333333333,0.9999999999,0.9893119239652767 +What steps are taken to inform the public about automated decision-making and their rights?,[],"Informing the public about automated decision-making and their rights typically involves several key steps: + +1. **Transparency Initiatives**: Organizations and governments often engage in transparency initiatives to disclose the use of automated decision-making systems. This includes publishing reports, guidelines, and the methodologies behind these systems. + +2. **Public Awareness Campaigns**: These campaigns aim to educate the public through various media channels about the presence of automated decision-making systems, how they work, and the potential impacts on individuals and society. + +3. **User Notifications**: When individuals are directly affected by automated decisions, they may receive notifications explaining that an automated system was used, along with information on how to seek further details or contest the decision. + +4. **Educational Programs and Workshops**: Offering educational programs, workshops, and seminars to help the public understand the basics of AI and their rights related to automated decision-making. + +5. **Online Resources**: Providing accessible online resources, such as FAQs, articles, and videos, to help the public understand automated decision-making processes and their rights. + +6. **Regulatory Requirements**: Governments may enforce regulations that require organizations to inform individuals about the use of automated decision-making and provide information on how to exercise their rights, such as accessing, correcting, or contesting decisions. + +7. **Collaboration with Advocacy Groups**: Partnering with consumer protection and digital rights organizations to spread awareness and provide support to individuals in understanding and exercising their rights. + +These steps aim to ensure that the public is well-informed and empowered to engage with automated decision-making systems in a knowledgeable manner.","Steps taken to inform the public about automated decision-making and their rights include written notice provided by private entities in Illinois regarding the use of biometric information, federal laws requiring lenders to notify consumers about adverse actions related to credit decisions, and California laws mandating that warehouse employees receive written descriptions of quotas. Additionally, major technology companies are developing frameworks for transparency in machine learning systems, and federal agencies are conducting research on explainable AI systems to ensure that the public understands how automated systems impact their rights and opportunities.",0.0,0.986870650048728,0.0,0.0,0.9096368622757971 +How does community assessment help reduce algorithmic bias in the AI Bill of Rights?,[],"Community assessment helps reduce algorithmic bias in the AI Bill of Rights by actively involving the communities that are most impacted by AI systems in the evaluation and development process. By incorporating the perspectives and experiences of diverse community members, developers and policymakers can identify potential biases and harmful effects that may not be apparent from a purely technical standpoint. This inclusive approach ensures that the AI systems are designed and implemented in a way that respects and protects the rights of all individuals, leading to fairer and more equitable outcomes.",The answer to given question is not present in context,0.0,0.9999999999999997,0.0,0.0,0.9282427720067112 +What role does NIST play in AI safety and risk management?,"['About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI.' + 'National Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.']","The National Institute of Standards and Technology (NIST) plays a crucial role in AI safety and risk management. NIST develops measurements, technology, tools, and standards to ensure that artificial intelligence (AI) is reliable, safe, transparent, explainable, privacy-enhanced, and fair. Their goal is to maximize the commercial and societal benefits of AI while minimizing potential harm to people and the planet. + +NIST's involvement in AI safety and risk management includes: + +1. **Development of Standards and Frameworks**: NIST has created the AI Risk Management Framework (AI RMF), which provides guidelines and best practices for managing AI-related risks. This framework covers various aspects such as AI risks, trustworthiness, and profiles for different AI applications. + +2. **Research and Innovation**: NIST has been conducting both fundamental and applied research on AI for more than a decade, contributing to advancements in the field and understanding of AI safety. + +3. **U.S. AI Safety Institute**: NIST established the U.S. AI Safety Institute and the AI Safety Institute Consortium to further the efforts set in motion by the 2023 Executive Order on Safe, Secure, and Trustworthy AI. This institute focuses on building the scientific foundation needed for the safe, secure, and trustworthy development and use of AI. + +4. **Compliance with Executive Orders**: NIST is actively involved in fulfilling the requirements of the 2023 Executive Order on Safe, Secure, and Trustworthy AI, which mandates the development of measures to ensure AI technologies are developed and used responsibly. + +Through these efforts, NIST aims to advance the safe and beneficial use of AI, addressing the risks and ensuring the trustworthiness of AI systems.","NIST plays a significant role in AI safety and risk management by developing measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence. They are also helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI and have established the U.S. AI Safety Institute and the AI Safety Institute Consortium to build the necessary science for the safe and trustworthy development and use of AI.",1.0,0.9821077055213853,1.0,0.99999999995,0.45635259075806295 +What governance aspects are key for public safety in automated systems?,"['receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.' + 'ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Auditing and assessment \nā€¢ Change-management controls']","Key governance aspects for public safety in automated systems include: + +1. **Community Input**: Receiving community input into the design and use of technologies ensures that the systems align with public values and needs. + +2. **Public Reporting**: Transparent reporting on crucial elements of these systems builds trust and accountability. + +3. **Notice and Consent Procedures**: Implementing better notice and consent procedures that ensure privacy based on context and use case helps protect user rights. + +4. **Opt-Out Options**: Providing the ability to opt-out of using automated systems and offering a fallback to a human process ensures that individuals are not forced to rely solely on automated decisions. + +5. **Explanation of Decisions**: Offering clear explanations of decisions and how these systems work aids in understanding and trust. + +6. **Governance and Training**: Ensuring proper governance, including training for those using these systems, helps maintain effective and ethical use. + +7. **Technological Relevance**: Ensuring the technological use cases are genuinely related to the goal task and are locally validated to work ensures effectiveness and appropriateness. + +8. **Third-Party Audits**: Instituting and protecting third-party audits to ensure systems continue to be accountable and valid is crucial for ongoing trust and safety. + +9. **AI Actor Credentials and Qualifications**: Ensuring that individuals and organizations involved in the development and deployment of AI systems have the necessary credentials and qualifications. + +10. **Auditing and Assessment**: Regular auditing and assessment of the systems to ensure they are functioning as intended and meeting safety standards. + +11. **Change-Management Controls**: Implementing change-management controls to handle updates and modifications to the systems responsibly. + +These governance aspects help ensure that automated systems are used safely, ethically, and effectively, protecting public welfare.","Key governance aspects for public safety in automated systems include laying out clear governance structures and procedures, establishing responsibility for oversight, involving organizational stakeholders in governance procedures, and ensuring that those in charge are aware of potential impacts on people's rights and opportunities. Additionally, it may be appropriate to conduct an independent ethics review before deployment.",0.5714285714285714,0.9920789318627037,0.0,0.99999999995,0.9158605789299302 +How do content provenance standards impact the performance and risks of third-party GAI systems regarding info integrity and IP?,"['Draft and maintain well-deļ¬ned contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations for GAI systems.']","Content provenance standards play a crucial role in ensuring the performance and mitigating risks associated with third-party Generative AI (GAI) systems, particularly in terms of information integrity and intellectual property (IP) management. Hereā€™s how they impact these areas: + +1. **Information Integrity**: + - **Source Verification**: Content provenance standards require the verification of the sources from which the AI systems generate content. This helps ensure that the information produced is accurate, reliable, and trustworthy. + - **Traceability**: By maintaining a clear record of the origins and transformations of the content, it becomes easier to trace back any errors or misinformation to their source, facilitating timely corrections and accountability. + - **Quality Assurance**: Provenance standards often include quality benchmarks that content must meet, thus ensuring that the output maintains a high level of integrity and accuracy. + +2. **Intellectual Property (IP) Management**: + - **Ownership Clarity**: Detailed provenance records help establish clear ownership of the content, reducing disputes over IP rights. This is crucial when content is generated using third-party systems where multiple parties might claim rights. + - **Usage Rights**: Provenance standards specify the usage rights associated with the content, ensuring that it is used in compliance with legal and contractual obligations. This reduces the risk of unauthorized use or distribution. + - **Compliance and Legal Protection**: Adhering to provenance standards helps organizations comply with legal requirements and protect themselves against potential IP infringement claims. This is especially important when using third-party systems, where the provenance of the data may not be inherently clear. + +Overall, content provenance standards enhance the reliability, accountability, and legal safety of using third-party GAI systems, thereby improving their performance and reducing associated risks.",The answer to given question is not present in context,0.78125,0.908368185934339,1.0,0.0,0.922749531458428 +What goals does the U.S. AI Safety Institute have for NIST's AI risk standards?,[],"The U.S. AI Safety Institute, as part of the National Institute of Standards and Technology (NIST), aims to establish comprehensive AI risk standards that ensure the safe and ethical use of artificial intelligence. These goals include: + +1. **Developing Best Practices**: Creating guidelines and best practices for the development, deployment, and management of AI systems to minimize risks and enhance safety. +2. **Promoting Transparency**: Ensuring that AI systems are transparent and their operations are understandable to users and stakeholders. +3. **Enhancing Security**: Addressing vulnerabilities in AI systems to protect against malicious use and unintended consequences. +4. **Ensuring Fairness**: Mitigating biases in AI algorithms to promote fairness and equity in AI applications. +5. **Encouraging Accountability**: Establishing accountability mechanisms for AI developers and users to ensure responsible use of AI technologies. + +These goals are aimed at fostering trust in AI systems and ensuring that their deployment benefits society while minimizing potential harms.",The answer to given question is not present in context,0.0,0.9438856420553713,0.0,0.0,0.1693638395957735 +"What org strategies help with AI testing, incident reporting, and risk communication?","['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","Organizational strategies that can help with AI testing, incident reporting, and risk communication include: + +1. **Documentation Practices**: Implement comprehensive logging, recording, and analyzing of AI incidents. This ensures that relevant information is captured and can be shared effectively with AI actors who need to respond to and manage incidents. + +2. **Regular Information Sharing**: Establish routines for regular information sharing among AI stakeholders. This can include updates on testing results, known risks, and incident reports. + +3. **Change Management Records**: Maintain detailed change management records, including version history and metadata. This helps in tracking changes, understanding their impacts, and managing incidents more effectively. + +4. **Version History**: Keep a meticulous version history of AI models and systems. This helps in tracing back changes that might have led to incidents and understanding the evolution of the system over time. + +5. **Metadata**: Collect and manage metadata related to AI systems. Metadata can provide context for incident analysis and aid in risk communication by offering additional insights into system behavior. + +By integrating these strategies, organizations can create a robust framework for ensuring AI safety and effectively managing and communicating risks associated with AI systems.","Organizational strategies that help with AI testing, incident reporting, and risk communication include establishing policies for measuring the effectiveness of content provenance methodologies, identifying the minimum set of criteria necessary for GAI system incident reporting, and verifying information sharing and feedback mechanisms regarding any negative impact from GAI systems.",0.8421052631578947,0.9802974723129108,0.0,0.9999999999,0.4144073002222601 +"What insights did OSTP seek from the biometric tech RFI, and who provided feedback?","['ā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The purpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below listed organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union']","The Office of Science and Technology Policy (OSTP) issued a Request For Information (RFI) to gather insights on the use and governance of biometric technologies. The purpose of this RFI was to understand several key aspects: + +1. The extent and variety of biometric technologies in past, current, or planned use. +2. The domains in which these technologies are being utilized. +3. The entities making use of these technologies. +4. Current principles, practices, or policies governing the use of biometric technologies. +5. The stakeholders that are, or may be, impacted by the use or regulation of biometric technologies. + +The feedback was provided by a diverse group of organizations and individuals, which included: + +- Accenture +- Access Now +- ACT | The App Association +- AHIP +- AIethicist.org +- Airlines for America +- Alliance for Automotive Innovation +- Amelia Winger-Bearskin +- American Civil Liberties Union + +These responses are available in full online and provide a comprehensive view of the various perspectives on the use and governance of biometric technologies.","OSTP sought insights on the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. Feedback was provided by 130 organizations and individuals, including Accenture, ACLU, Google, Microsoft Corporation, and many others.",0.8888888888888888,0.9132416259566449,0.5,0.9999999999,0.6922434809678822 +What consent practices and design principles can help balance user privacy and surveillance risks in automated systems?,"['You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive.']","Balancing user privacy and surveillance risks in automated systems can be achieved through several consent practices and design principles: + +1. **Informed Consent**: Seek clear and explicit permission from users before collecting, using, or sharing their data. Ensure that users fully understand what data is being collected, how it will be used, who it will be shared with, and the potential risks involved. + +2. **Transparency and Clarity**: Provide transparent information about data practices in a user-friendly manner. Avoid legal jargon and present information in a way that is easily understandable to the average user. + +3. **Minimal Data Collection**: Collect only the data that is strictly necessary for the specific context or purpose. Avoid gathering excessive or irrelevant information that could increase privacy risks. + +4. **Privacy by Design**: Integrate privacy protections into the design and architecture of systems from the outset. Ensure that default settings are privacy-protective and that users have control over their data. + +5. **User Control and Agency**: Give users the ability to manage their data preferences easily. This includes options to access, modify, transfer, and delete their data. Respect users' decisions regarding their data. + +6. **Reasonable Expectations**: Align data collection practices with users' reasonable expectations. Do not engage in data practices that are surprising or unexpected to users without their explicit consent. + +7. **Alternative Safeguards**: Where obtaining explicit consent is not possible, implement alternative privacy safeguards to protect user data. This may include anonymization, aggregation, and other technical measures to mitigate privacy risks. + +8. **Avoiding Dark Patterns**: Ensure that user experience and design choices do not obfuscate user options or manipulate users into making privacy-invasive decisions. Avoid design practices that unduly burden users with privacy-unfriendly defaults. + +By adhering to these principles and practices, designers, developers, and deployers of automated systems can help ensure that user privacy is respected and surveillance risks are minimized.","Consent practices that can help balance user privacy and surveillance risks in automated systems include use-specific consent, where consent is sought for specific, narrow use contexts and time durations, and should be re-acquired if conditions change. Additionally, brief and direct consent requests should be used, employing short, plain language to ensure users understand the context and duration of data use. User experience research should be conducted to ensure these requests are accessible and comprehensible, avoiding manipulative design choices. Furthermore, privacy should be protected by design and by default, with privacy risks assessed throughout the development life cycle and data collection minimized to only what is necessary for identified goals.",0.8076923076923077,0.9605373420365382,0.5,0.9999999999,0.5709578222686931 +What are the effects of GAI evaluations on fair content and community input?,[],"The effects of General Artificial Intelligence (GAI) evaluations on fair content and community input can be multifaceted. Here are some key considerations: + +1. **Bias Mitigation**: Evaluations of GAI can help identify and mitigate biases in AI systems, ensuring that the content generated or moderated by these systems is fair and equitable. This includes assessing the training data, algorithms, and output to ensure they do not disproportionately favor or disadvantage any group. + +2. **Transparency and Accountability**: Regular evaluations can promote transparency and accountability, as they can provide insights into how AI systems make decisions. This can build trust within the community and ensure that the AI behaves in a manner consistent with community values and standards. + +3. **Community Feedback**: Incorporating community input into GAI evaluations ensures that the perspectives and concerns of diverse groups are considered. This can help in refining the AI to better serve the needs of all community members and address any issues that may arise from its deployment. + +4. **Ethical Considerations**: Evaluations can include ethical considerations, ensuring that the AI's actions align with ethical norms and do not harm individuals or groups. This is crucial for maintaining the social license to operate and the overall acceptance of AI technologies. + +5. **Improving Fairness**: Through continuous evaluation and community input, GAI systems can be iteratively improved to enhance fairness in content creation, moderation, and decision-making processes. This can lead to a more inclusive and fair digital environment. + +6. **Adaptation to Community Needs**: As communities evolve, their needs and values may change. Regular evaluations that include community input can help GAI systems adapt to these changes, ensuring they remain relevant and effective over time. + +In summary, GAI evaluations can have significant positive effects on ensuring fair content and incorporating community input, ultimately leading to more reliable, transparent, and equitable AI systems.",The answer to given question is not present in context,1.0,0.9458220983026614,0.0,0.0,0.17297242745121863 +"What risks come from easier access to violent content, especially regarding CBRN knowledge and misinformation?","['harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools.']","Easier access to violent content, particularly concerning Chemical, Biological, Radiological, and Nuclear (CBRN) knowledge, poses significant risks, including: + +1. **Ideation and Design of Harmful Agents**: Individuals or groups could harness AI tools to ideate and design novel harmful chemical or biological agents. This could lead to the creation and dissemination of dangerous substances that might be used in attacks or other malicious activities. + +2. **Facilitation of CBRN Weapons Planning**: AI systems with access to relevant data and tools could potentially be used to facilitate the planning and execution of CBRN weapon attacks. This includes the strategic use of such weapons, which could have catastrophic consequences. + +3. **Spread of Misinformation**: Misleading or false information about CBRN threats could be disseminated more easily, leading to public panic, misguided responses, or even the unintentional spread of harmful practices. + +4. **Access to Sensitive Data**: If GAI systems are connected to or have access to sensitive data regarding CBRN materials and tactics, there is a substantial risk that this information could be exploited by malicious actors. + +Monitoring and controlling access to this type of content, as well as continually assessing the capabilities of AI tools in this domain, are crucial steps in mitigating these risks.","Eased access to violent content can lead to the production of and access to violent, inciting, radicalizing, or threatening content, as well as recommendations to carry out self-harm or conduct illegal activities. This includes difficulty controlling public exposure to hateful and disparaging or stereotyping content. Additionally, the lowered barrier to generate and support the exchange of content may not distinguish fact from opinion or acknowledge uncertainties, which could be leveraged for large-scale dis- and mis-information campaigns, potentially impacting the operational likelihood of attacks involving CBRN knowledge.",0.5625,0.98102440210631,0.25,0.9999999999,0.7427731395467041 +"What factors on data privacy and content integrity should be considered for a GAI system, especially regarding user feedback and transparency?","['receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.']","When considering data privacy and content integrity for a General Artificial Intelligence (GAI) system, especially in relation to user feedback and transparency, several key factors should be taken into account: + +1. **Community Input and Transparency:** + - **Receiving Community Input:** Engage with the community to gather input on the design and use of the GAI system. This ensures that the system aligns with the values, needs, and concerns of the users. + - **Public Reporting:** Regularly publish reports on the crucial elements of the GAI system. This includes detailed information on how the system operates, what data it collects, and how it uses this data. + +2. **Privacy and Consent:** + - **Notice and Consent Procedures:** Implement robust notice and consent procedures that ensure users are fully informed about what data is being collected, how it will be used, and the context in which it will be used. This should be tailored to the specific use case. + - **Opt-out Options:** Provide users with the ability to opt out of using the GAI system and offer a fallback to a human process. This respects user autonomy and privacy preferences. + +3. **Explanations and Transparency:** + - **Explanations of Decisions:** Offer clear and understandable explanations of the decisions made by the GAI system. Users should be able to understand how the system works and why certain decisions were made. + - **Transparency in Operations:** Maintain transparency about the inner workings of the system, including algorithms, data sources, and decision-making processes. + +4. **Governance and Training:** + - **Governance Structures:** Establish governance structures to oversee the use of the GAI system. This includes training for those using the system to ensure they understand its capabilities and limitations. + - **Local Validation:** Ensure the technological use cases are genuinely related to the goal task and have been locally validated to work effectively in the intended context. + +5. **Accountability and Audits:** + - **Third-Party Audits:** Implement and protect the institution of third-party audits to ensure the GAI system remains accountable and its outputs valid over time. These audits help verify that the system continues to operate as intended and adheres to privacy and integrity standards. + +By considering these factors, a GAI system can better respect user privacy, maintain content integrity, and operate transparently, fostering trust and accountability.","Factors on data privacy and content integrity for a GAI system include documenting the extent to which human domain knowledge is employed to improve GAI system performance, reviewing and verifying sources and citations in GAI system outputs, tracking instances of anthropomorphization in GAI system interfaces, verifying GAI system training data and TEVV data provenance, and regularly reviewing security and safety guardrails. Additionally, structured feedback about content provenance should be recorded and integrated from operators, users, and impacted communities, and there should be an emphasis on digital content transparency regarding the societal impacts of AI and the role of diverse and inclusive content generation.",0.7058823529411765,0.9202107347072964,0.14285714285714285,0.9999999999,0.8347691025311021 +What goals does PAVE have for racial equity and valuing marginalized communities?,[],"PAVE, which stands for Parents Amplifying Voices in Education, has clear goals for promoting racial equity and valuing marginalized communities. These goals include: + +1. **Enabling Black and Brown parents to advocate for equity in education**: PAVE aims to empower parents from marginalized communities to take an active role in advocating for fair and equitable educational opportunities for their children. + +2. **Supporting marginalized communities in fighting for safe, equitable school communities**: PAVE is committed to assisting these communities in their efforts to create safe and inclusive educational environments that cater to the needs and well-being of all students. + +By focusing on these goals, PAVE works towards ensuring that the voices of marginalized communities are heard and that their children receive the quality education they deserve.",The answer to given question is not present in context,0.0,0.9547134556180472,0.0,0.0,0.923836985064783 +What steps ensure automated systems reduce bias and promote equity?,[],"To ensure automated systems reduce bias and promote equity, several key steps can be taken: + +1. **Diverse Data Collection**: Ensure that the data used to train the system is representative of all relevant populations. This helps to avoid biases that can arise from over-representing or under-representing certain groups. + +2. **Bias Detection and Mitigation**: Implement methods to detect and correct biases in both the data and the algorithms. Techniques such as fairness-aware machine learning can help identify and reduce biases. + +3. **Regular Audits**: Conduct regular audits of the automated system to identify any biases that may have been introduced over time. This includes reviewing the systemā€™s decisions and outcomes. + +4. **Transparency**: Make the decision-making processes of the automated system transparent. This includes providing explanations for decisions, which can help identify and address bias. + +5. **Inclusive Design**: Involve a diverse group of stakeholders in the design and development process. This helps ensure that multiple perspectives are considered and that the system serves a broader range of users. + +6. **Continuous Monitoring and Feedback**: Establish mechanisms for continuous monitoring and feedback. Encourage users to report any biased outcomes and use this feedback to make necessary adjustments to the system. + +7. **Ethical Guidelines and Policies**: Develop and adhere to ethical guidelines and policies that prioritize fairness and equity in the development and deployment of automated systems. + +By following these steps, developers and organizations can work towards creating automated systems that are more fair and equitable.","To ensure automated systems reduce bias and promote equity, several steps should be taken: 1) Conduct proactive equity assessments during the design phase to identify potential discrimination and effects on equity; 2) Use representative and robust data that reflects local communities and is reviewed for bias; 3) Guard against proxies by avoiding the direct use of demographic information in system design and testing for correlations; 4) Allow independent evaluations of potential algorithmic discrimination; 5) Provide reporting of algorithmic impact assessments that detail consultations, equity assessments, and any disparities found, ensuring transparency and public accountability.",0.0,0.990049640211169,0.0,0.0,0.8778248553324092 +How does threat modeling help with GAI risk and org policies on transparency?,[],"Threat modeling is a crucial process in identifying, understanding, and mitigating potential risks associated with General Artificial Intelligence (GAI). Hereā€™s how it helps with GAI risk and organizational policies on transparency: + +### GAI Risk Mitigation: +1. **Identification of Risks**: + - **Scenario Analysis**: By simulating different scenarios where GAI might fail or be misused, organizations can identify potential threats. + - **Vulnerability Assessment**: Understanding where and how GAI systems might be vulnerable to attacks, misuse, or unintended consequences. + +2. **Risk Prioritization**: + - **Impact Assessment**: Evaluating the potential impact of different threats helps in prioritizing risks based on their severity. + - **Likelihood Analysis**: Assessing how likely different threats are to materialize can guide resource allocation for mitigation efforts. + +3. **Mitigation Strategies**: + - **Prevention Measures**: Developing and implementing measures to prevent identified risks from occurring. + - **Response Planning**: Creating response plans to quickly and effectively address threats if they materialize. + +### Organizational Policies on Transparency: +1. **Clear Communication**: + - **Disclosure of Risks**: Transparently communicating identified risks and mitigation strategies to stakeholders builds trust and accountability. + - **Operational Transparency**: Providing insights into how GAI systems make decisions and the safeguards in place to prevent misuse. + +2. **Compliance and Ethics**: + - **Regulatory Compliance**: Ensuring that threat modeling aligns with regulatory requirements for transparency and accountability. + - **Ethical Standards**: Adhering to ethical standards by being open about potential risks and the steps taken to mitigate them. + +3. **Stakeholder Engagement**: + - **Inclusive Feedback**: Engaging with diverse stakeholders to gather feedback on identified risks and transparency measures. + - **Collaborative Risk Management**: Working collaboratively with external experts, regulators, and the community to address and mitigate risks. + +By systematically identifying and addressing potential risks through threat modeling, organizations can not only safeguard against GAI-related threats but also enhance transparency and trust with their stakeholders.",The answer to given question is not present in context,0.0,0.8793691170250227,0.0,0.0,0.9222280466682282 +How does the AI Incident Database help with AI challenges in cybersecurity and mental health?,[],"The AI Incident Database (AIID) helps address AI challenges in cybersecurity and mental health by providing a repository of incidents where AI systems have caused harm or failed in some way. This database allows researchers, developers, and policymakers to: + +1. **Identify Patterns and Risks**: By analyzing past incidents, stakeholders can identify common patterns and potential risks associated with AI systems. This is crucial for both cybersecurity and mental health, as understanding these patterns can help in mitigating future risks. + +2. **Improve AI System Design**: Insights from the database can inform better design and implementation practices for AI systems, ensuring they are more robust, secure, and capable of handling sensitive tasks in cybersecurity and mental health contexts. + +3. **Develop Guidelines and Regulations**: The documented incidents can serve as case studies to develop more effective guidelines and regulatory frameworks that govern the use and deployment of AI in critical areas like cybersecurity and mental health. + +4. **Enhance Training and Awareness**: The AIID serves as an educational resource, helping train AI professionals and raising awareness about the potential pitfalls and ethical considerations when deploying AI in sensitive fields. + +By learning from past incidents, stakeholders can proactively address vulnerabilities and ethical concerns, thereby improving the overall safety and effectiveness of AI systems in cybersecurity and mental health.",The answer to given question is not present in context,0.0,0.9761827217785876,0.0,0.0,0.9226505877941145 +What steps ensure automated systems avoid bias and maintain safety?,"['ā€¢ Auditing and assessment' + 'risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.']","To ensure that automated systems avoid bias and maintain safety, several steps should be taken, including: + +1. **Risk Identification and Management Assessments**: Conduct thorough assessments to identify and manage potential risks. This involves recognizing areas where bias could occur and understanding how these risks can impact different demographic groups. + +2. **Performance Testing**: Regularly test the system's performance, focusing on metrics such as accuracy, differential demographic impact, and error rates across different demographic groups. Compare these results to previously deployed systems to gauge improvements or regressions. + +3. **Ongoing Monitoring and Reporting**: Implement ongoing monitoring procedures with regular performance testing reports to ensure the system continues to operate safely and effectively. The frequency of monitoring should be adequate to catch any emerging issues promptly. + +4. **Independent Evaluation**: Engage independent evaluators to assess the system and confirm its safety and effectiveness. They should report on steps taken to mitigate potential harms and make these findings public whenever possible. + +By following these steps, developers and operators can better ensure that automated systems are equitable, transparent, and safe for all users.","To ensure automated systems avoid bias and maintain safety, designers, developers, and deployers should take proactive and continuous measures, including conducting proactive equity assessments as part of system design, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Additionally, independent evaluation and reporting should confirm that the system is safe and effective, including steps taken to mitigate potential harms.",0.7058823529411765,0.9857987105967108,0.5,0.5833333333041666,0.6408396567834427 +What methods work for evaluating biases in AI content with diverse user feedback?,['Evaluate potential biases and stereotypes that could emerge from the AI-generated content using appropriate methodologies including computational testing methods as well as evaluating structured feedback input.'],"Evaluating biases in AI-generated content using diverse user feedback can be approached using a combination of computational testing methods and structured feedback evaluation. Here are some effective methods: + +1. **Computational Testing Methods:** + - **Adversarial Testing:** Introduce inputs designed to reveal biases in the AI system. This includes edge cases or controversial topics that might trigger biased responses. + - **Algorithmic Audits:** Regularly audit the AI's outputs to check for biases across various demographic groups. This can involve statistical analysis of how different groups are represented in the outputs. + - **Bias Detection Algorithms:** Use specialized algorithms that can detect known biases in text, such as gender or racial biases. These algorithms can flag potentially biased content for further review. + +2. **Structured Feedback Evaluation:** + - **User Surveys and Feedback Forms:** Collect feedback from a diverse group of users about their experience with the AI system. Include questions that specifically address perceived biases or stereotypes in the content. + - **Focus Groups:** Conduct focus groups with participants from different backgrounds to discuss their experiences and perceptions of the AI-generated content. This qualitative data can provide deeper insights into potential biases. + - **Crowdsourced Feedback Platforms:** Use platforms like Mechanical Turk or similar services to gather feedback from a large and diverse set of users. Analyze this feedback to identify common themes and concerns regarding bias. + +3. **Intersectional Analysis:** + - Ensure that feedback is gathered from users representing various intersections of identity (e.g., race, gender, socioeconomic status) to understand how multiple factors might influence the perception of bias. + +4. **Continuous Monitoring and Iteration:** + - Implement a continuous feedback loop where user feedback is regularly incorporated into the model's training and evaluation process. This helps in addressing new biases that might emerge over time. + +Combining these methodologies provides a comprehensive approach to evaluating and mitigating biases in AI-generated content. It ensures that the AI system is more inclusive and fair in its outputs.","The context mentions evaluating potential biases and stereotypes that could emerge from AI-generated content using appropriate methodologies, including computational testing methods as well as evaluating structured feedback input. Additionally, it suggests recording and integrating structured feedback about content provenance from operators, users, and potentially impacted communities through methods such as user research studies, focus groups, or community forums.",1.0,0.9596124720690692,0.5,0.9999999999,0.4549628705240867 +What are the U.S. AI Safety Institute's goals for NIST's AI risk standards?,[],"I don't have information on the specific goals of the U.S. AI Safety Institute for NIST's AI risk standards. If you need detailed and up-to-date information on this topic, I recommend checking the official websites or recent publications from the U.S. AI Safety Institute and NIST (National Institute of Standards and Technology).",The answer to given question is not present in context,0.0,0.0,0.0,0.0,0.1820806986621076 +"How might AI tech reinforce inequities in education, housing, and jobs, and add burdens on those using social welfare?",[],"AI technology has the potential to reinforce existing inequities in various sectors, including education, housing, and employment, and can also add burdens on those using social welfare. Here are some ways this can happen: + +1. **Education**: + - **Bias in Learning Tools**: AI-driven educational tools and platforms may contain biases that disadvantage certain groups of students. For example, if an AI system is trained on data that predominantly reflects the experiences of a particular demographic, it may not cater effectively to the needs of students from different backgrounds. + - **Access to Technology**: Not all students have equal access to the necessary technology and internet connectivity required to benefit from AI-enhanced educational tools, leading to a digital divide. + +2. **Housing**: + - **Discriminatory Algorithms**: AI systems used in housing markets, such as those for tenant screening or mortgage approval, can perpetuate historical biases. If these systems are trained on biased data, they may unfairly disadvantage minority groups. + - **Predictive Policing**: AI systems used in predictive policing can disproportionately target minority communities, affecting their living conditions and perpetuating housing inequities. + +3. **Jobs**: + - **Hiring Bias**: AI in recruitment processes can reinforce existing biases in hiring if the training data reflects historical prejudices. This can result in discriminatory hiring practices that disadvantage certain groups. + - **Job Displacement**: Automation and AI can lead to job displacement, disproportionately affecting low-income workers and those in certain industries that are more susceptible to automation. + +4. **Social Welfare**: + - **Inequitable Resource Allocation**: AI systems used to determine eligibility for social welfare programs might not account for the complexities of individual circumstances, leading to unfair outcomes. + - **Surveillance and Privacy**: AI-driven surveillance systems can add burdens on welfare recipients by subjecting them to increased scrutiny and privacy invasions, which can be both stressful and stigmatizing. + +Overall, the deployment of AI in these areas needs to be carefully managed to ensure that it does not exacerbate existing inequalities. This involves using diverse and representative data sets, implementing rigorous fairness and bias audits, and ensuring transparency and accountability in AI systems.","AI technology can reinforce inequities in education, housing, and jobs by being used to limit access to equal opportunities, such as through automated tenant background screening, discrimination in automated hiring screening, and remote proctoring systems. Additionally, these technologies can shift the burden of oversight from employers to workers, schools to students, and landlords to tenants, which diminishes equality of opportunity. In the context of social welfare, AI systems may reduce the burden for government agencies but increase the burden on individuals interacting with these technologies, potentially creating feedback loops that reinforce inequality.",1.0,0.9628036627404879,0.0,0.0,0.5879809668258318 +What role do algorithmic impact assessments play in clarifying accountability for automated systems?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.']","Algorithmic impact assessments (AIAs) play a crucial role in clarifying accountability for automated systems by providing a structured evaluation and transparent reporting mechanism. They help in: + +1. **Identifying Potential Risks:** AIAs systematically assess the potential risks and impacts of deploying an automated system, including ethical, social, and legal implications. + +2. **Disparity Testing:** They include disparity testing results to identify and evaluate any biases or unequal impacts the automated system may have on different groups. + +3. **Mitigation Information:** AIAs document the strategies and measures taken to mitigate identified risks, ensuring that any potential harms are addressed proactively. + +4. **Public Accountability:** By making the results and mitigation information public whenever possible, AIAs ensure that there is transparency and public scrutiny, which holds the developers and deployers of automated systems accountable for their actions and decisions. + +In summary, AIAs enhance accountability by ensuring that the impacts of automated systems are thoroughly evaluated, documented, and made transparent to the public.",The answer to given question is not present in context,0.15,0.9393274994894981,0.0,0.0,0.9227347609671405 +How does human input affect fairness and fallback in automated systems?,[],"Human input plays a critical role in determining both the fairness and the fallback mechanisms of automated systems. Here are some key ways in which human involvement impacts these aspects: + +1. **Data Quality and Bias Mitigation:** + - **Fairness:** Human input is crucial in curating and validating the data used to train automated systems. Biases present in the training data can lead to unfair outcomes, so careful selection and preprocessing of data by humans can help mitigate these biases and ensure a fairer system. + - **Fallback:** Humans can identify and correct biases that the system may exhibit or learn from the data. This helps in creating fallback mechanisms that can address and rectify unfair behaviors when they are detected. + +2. **Algorithm Design and Ethical Considerations:** + - **Fairness:** Humans are responsible for designing the algorithms and models that power automated systems. By incorporating ethical considerations and fairness constraints during the design phase, humans can ensure that the system produces equitable outcomes. + - **Fallback:** Human oversight can provide guidelines for fallback procedures when the automated system fails or produces questionable results. This includes setting up manual review processes or developing alternative decision pathways. + +3. **Continuous Monitoring and Feedback:** + - **Fairness:** Ongoing human monitoring allows for the identification of any drift or degradation in system performance over time, particularly concerning fairness. Regular audits and feedback loops can help maintain and improve fairness throughout the system's lifecycle. + - **Fallback:** Human operators can be involved in real-time monitoring and intervention, providing immediate fallback options if the system encounters unexpected issues or fails to perform as intended. + +4. **Transparency and Explainability:** + - **Fairness:** Humans can help ensure that automated systems are transparent and their decision-making processes are explainable. This transparency is vital for assessing and ensuring fairness, as it allows stakeholders to understand, trust, and verify the decisions made by the system. + - **Fallback:** Explainability aids in diagnosing problems when a fallback is needed. Understanding why a system made a particular decision can help in designing effective fallback actions and improving the system to prevent future issues. + +In summary, human input is essential in shaping the fairness of automated systems and establishing robust fallback mechanisms. By actively participating in data preparation, algorithm design, continuous monitoring, and ensuring transparency, humans can significantly influence the equity and reliability of these systems.","The context does not explicitly address how human input affects fairness and fallback in automated systems. However, it mentions that human consideration and fallback mechanisms should be proportionate, accessible, equitable, timely, and effective, which implies that human input is crucial in ensuring these aspects are upheld. The effectiveness of human involvement is emphasized through training, assessment, and oversight to combat automation bias and ensure appropriate results.",1.0,0.9946093379349089,0.0,0.0,0.8947758613249892 +What factors to consider for human options in automated systems per the AI Bill of Rights?,[],"The AI Bill of Rights emphasizes ensuring human options and control in automated systems. Here are the key factors to consider: + +1. **Right to Explanation**: Users should have the right to understand how automated decisions are made and the logic behind them. This fosters transparency and trust in the system. + +2. **Right to Contest**: Users should have the ability to contest and seek redress for decisions made by automated systems. This ensures that users are not unfairly disadvantaged by automated decisions. + +3. **Human Oversight**: There should be opportunities for human oversight and intervention in automated processes, particularly in high-stakes situations where decisions can significantly impact individuals' lives. + +4. **Accessibility and Usability**: Automated systems should be designed to be accessible and usable by all individuals, including those with disabilities, ensuring that no one is excluded from the benefits of automation. + +5. **Informed Consent**: Users should be informed about the use of automated systems and provide consent, particularly when their data is being used to make decisions about them. + +6. **Data Privacy and Security**: Ensuring that users' data is protected and that their privacy is respected is crucial in maintaining trust and safeguarding individuals' rights. + +These factors aim to ensure that automated systems are used responsibly and ethically, with adequate provisions for human control and intervention.",The answer to given question is not present in context,0.0,0.9549684542160496,0.0,0.0,0.17753085651710138 +How does a document retention policy support GAI system integrity?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","A document retention policy supports Generative AI (GAI) system integrity by ensuring that critical information is systematically logged, recorded, and maintained over time. This practice allows for: + +1. **Incident Management**: Detailed records of incidents enable the identification and analysis of issues, allowing for more effective responses and mitigation strategies. +2. **Information Sharing**: Facilitates smoother and more efficient sharing of information with relevant AI actors, such as developers, users, and regulators, ensuring that all parties are informed and can collaborate effectively. +3. **Change Management**: Maintains comprehensive records of changes, version history, and metadata, helping track the evolution of the system and understand the context of any issues that arise. +4. **Audit and Compliance**: Ensures that the system complies with legal and regulatory requirements by providing a clear and organized trail of documentation. +5. **Learning and Improvement**: Analyzing past records can help identify patterns and areas for improvement, contributing to the ongoing enhancement of the GAI system's safety and performance. + +Overall, a robust document retention policy helps maintain transparency, accountability, and continuous improvement, which are crucial for the integrity and reliability of GAI systems.",The context does not provide specific information on how a document retention policy supports GAI system integrity.,0.53125,0.931039171850767,1.0,0.0,0.26117726769374294 +What challenges did panelists see at the tech-health equity intersection?,"['delivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed.""]","Panelists identified several challenges at the intersection of technology and health equity: + +1. **Access and Expense of Broadband Service**: Limited access to and the high cost of broadband services can prevent equitable access to telehealth technologies. + +2. **Privacy Concerns**: The use of telehealth systems raises significant privacy issues that need to be addressed to protect patient information. + +3. **Cost of Health Monitoring Devices**: The expense associated with health monitoring devices can exacerbate existing equity issues, making it harder for underserved populations to benefit from these technologies. + +4. **Racial Biases in Medicine**: There are concerns about racial biases and the use of race in medical practices, which can perpetuate harms and embed prior discrimination within technology-enhanced care systems. + +5. **Accountability of Technologies**: It's crucial to ensure that the technologies used in medical care are accountable to the relevant stakeholders and that the voices of those subjected to these technologies are heard. + +Overall, these challenges underscore the importance of addressing both the systemic and technological aspects to promote health equity.","Panelists discussed several challenges at the tech-health equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense associated with health monitoring devices, which can exacerbate equity issues. They also highlighted the need for accountability in the technologies used in medical care, particularly regarding racial biases and the use of race in medicine, which perpetuate harms and embed prior discrimination.",0.8461538461538461,0.9511438486112066,1.0,0.99999999995,0.9945333361186519 +How do transparency policies help manage GAI risks and ensure compliance?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","Transparency policies play a crucial role in managing GAI (General Artificial Intelligence) risks and ensuring compliance in several ways: + +1. **Facilitating Accountability**: By maintaining clear documentation practices, such as logging, recording, and analyzing GAI incidents, organizations can hold relevant AI actors accountable. This ensures that any deviations from expected behavior are tracked and addressed systematically. + +2. **Enabling Information Sharing**: Regular information sharing and maintaining comprehensive change management records, version history, and metadata empower AI actors to respond to and manage AI incidents more effectively. Transparency in these areas helps in identifying and rectifying issues promptly. + +3. **Supporting Compliance**: Transparent documentation and communication practices help organizations demonstrate compliance with regulatory requirements. This includes providing evidence of how decisions were made, how incidents were handled, and how the AI systems were managed and updated. + +4. **Building Trust**: Transparency fosters trust among stakeholders, including users, regulators, and other AI actors. When stakeholders have access to detailed information about the GAI system's operations and incident responses, they are more likely to trust the system and the organization behind it. + +5. **Improving Incident Response**: Detailed records and transparent reporting make it easier for AI actors to understand the context and specifics of any incidents that occur. This understanding is crucial for developing effective responses and preventing future incidents. + +Overall, transparency policies create a structured framework for managing risks associated with GAI, ensuring that all relevant actors are informed, accountable, and compliant with regulatory standards.","Transparency policies help manage GAI risks by establishing processes for documenting the origin and history of training data and generated data for GAI applications. This promotes digital content transparency while balancing the proprietary nature of training approaches, thereby ensuring compliance with data privacy, information integrity, and intellectual property standards.",0.6428571428571429,0.9745964862125299,0.0,0.9999999999,0.31280964757769536 +How important are clear decision-making explanations in automated systems for risk assessment and user understanding?,[],"Clear decision-making explanations in automated systems are critically important for both risk assessment and user understanding. Here are a few key reasons why: + +1. **Transparency**: Users and stakeholders need to understand how decisions are made to trust the system. Transparency helps in building confidence in the automated processes. + +2. **Accountability**: Clear explanations allow for accountability by making it easier to trace back the decision-making process and identify any potential biases or errors. + +3. **Compliance**: Many regulatory frameworks require that automated systems, especially those used in sensitive areas like finance, healthcare, and criminal justice, provide clear explanations for their decisions. + +4. **User Empowerment**: When users understand how decisions are made, they are better equipped to challenge or appeal those decisions if necessary. This is particularly important in high-stakes scenarios where the outcome can significantly impact individuals' lives. + +5. **Improvement and Debugging**: Clear explanations can help developers and analysts identify areas for improvement within the system, facilitating continuous refinement and debugging. + +6. **Ethical Considerations**: Providing clear explanations aligns with ethical principles of fairness and respect for individuals affected by automated decisions. + +In summary, clear decision-making explanations enhance trust, accountability, compliance, user empowerment, system improvement, and ethical integrity in automated systems.","The context emphasizes the importance of clear decision-making explanations in automated systems, particularly for risk assessment and user understanding. It states that explanations should accurately reflect the factors influencing decisions and be tailored to the purpose and audience, ensuring they are understandable and accessible. This is crucial in settings with high consequences, where transparency and clarity are necessary for accountability and user trust.",0.0,0.9511840490917916,0.0,0.0,0.7521476419747697 +"What risks do prompt injection attacks pose to GAI, especially regarding misinformation and data poisoning?","['GAI-based systems present two primary information security risks: GAI could potentially discover or enable new cybersecurity risks by lowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it expands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data poisoning.']","Prompt injection attacks pose significant risks to Generative AI (GAI) systems, particularly concerning misinformation and data poisoning. Here are the key risks: + +1. **Misinformation:** + - **Manipulated Outputs:** Attackers can craft specific inputs (prompts) that cause the GAI to generate misleading or false information. This can be particularly harmful if the GAI is used in environments where accurate information is crucial, such as news generation, financial advice, or medical recommendations. + - **Erosion of Trust:** Consistent exposure to manipulated outputs can erode user trust in the GAI system, as users may become uncertain about the reliability of the information provided. + +2. **Data Poisoning:** + - **Corrupted Training Data:** Prompt injection can be used to insert malicious data into the training datasets of GAI systems. This corrupted data can then lead to the model learning and propagating false or harmful information. + - **Model Degradation:** Over time, data poisoning can degrade the overall performance and accuracy of the GAI, leading to broader negative impacts across all its applications. + +Overall, these risks highlight the importance of robust security measures to protect GAI systems from prompt injection attacks and ensure the integrity and reliability of the information they generate.","Prompt injection attacks pose significant risks to GAI by enabling attackers to modify inputs to the system, leading to unintended behaviors and potential misinformation. Direct prompt injections can result in malicious prompts being inputted, causing negative consequences for interconnected systems. Indirect prompt injection attacks exploit vulnerabilities in LLM-integrated applications, potentially leading to the theft of proprietary data or the execution of malicious code. Additionally, data poisoning is a risk where adversaries compromise training datasets, manipulating the outputs or operations of GAI systems, which can exacerbate misinformation and the reliability of generated content.",0.5,0.9067188400518439,0.5,0.9999999999,0.8393763166756766 +What key processes and stakeholder interactions ensure automated systems' safety and effectiveness?,"['ā€¢ Auditing and assessment \nā€¢ Change-management controls' + 'the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'receiving community input into the design and use of technologies, public reporting on crucial elements of these systems, better notice and consent procedures that ensure privacy based on context and use case, ability to opt-out of using these systems and receive a fallback to a human process, providing explanations of decisions and how these systems work, the need for governance including training in using these systems, ensuring the technological use cases are genuinely related to the goal task and are locally validated to work, and the need for institution and protection of third party audits to ensure systems continue to be accountable and valid.' + 'Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.' + 'Some companies have put in place safeguards that have prevented harm from occurring by ensuring that key development decisions are vetted by an ethics review; others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad tion processes that may be applied when considering the use of new automated systems, and existing product develop\xad ment and testing practices already protect the American public from many potential harms.']","Ensuring the safety and effectiveness of automated systems involves several key processes and interactions with various stakeholders. Here are the main components: + +1. **Auditing and Assessment**: + - Independent evaluations and reporting should confirm that the system is safe and effective. + - Reporting of steps taken to mitigate potential harms should be performed and made public where possible. + +2. **Change-management Controls**: + - These controls are necessary to manage and oversee any modifications to the system to ensure continuous compliance with safety and effectiveness standards. + +3. **Public Consultation and Community Input**: + - Governments and organizations should engage in public consultation processes to gather community input on the design and use of technologies. + - Concerns raised during these consultations should be addressed, and any decisions made due to these concerns should be transparent. + +4. **Risk Identification and Management**: + - Conducting risk assessments to identify potential harms and outlining steps to mitigate these risks is crucial. + - Performance testing results, including accuracy, differential demographic impact, and error rates, should be documented and compared to previously deployed systems. + +5. **Ongoing Monitoring and Reporting**: + - Implementing regular performance testing and ongoing monitoring procedures is essential. + - Monitoring frequency and reporting on performance should be maintained to ensure continuous oversight. + +6. **Public Reporting and Transparency**: + - Public reporting on key elements of the systems, such as the results of performance testing and steps taken to mitigate harms, should be encouraged. + - Providing explanations of decisions and how these systems work helps maintain transparency. + +7. **Governance and Training**: + - Governance structures, including ethics reviews, should be established to vet key development decisions. + - Training should be provided to ensure that users understand how to properly use these systems and that the systems are used for their intended purposes. + +8. **Third-Party Audits**: + - Independent third-party audits should be instituted and protected to ensure systems remain accountable and valid over time. + - These audits help verify that the systems operate as intended and do not present unforeseen risks. + +9. **Notice and Consent Procedures**: + - Ensuring privacy through better notice and consent procedures based on context and use case is important. + - Providing the ability to opt-out of using automated systems and offering a fallback to a human process when necessary. + +By integrating these processes and fostering interactions with relevant stakeholders, the safety and effectiveness of automated systems can be significantly enhanced.","Key processes and stakeholder interactions that ensure automated systems' safety and effectiveness include ongoing monitoring procedures, clear organizational oversight, consultation with the public during various phases of development, extensive testing before deployment, and proactive risk identification and mitigation. These processes involve continuous evaluation of performance metrics, involvement of organizational stakeholders, engagement with diverse impacted communities, and adherence to domain-specific best practices for testing.",0.9,0.9575967573781989,0.6666666666666666,0.6791666666496875,0.8867917382282611 +What strategies can help prevent algorithmic bias in automated systems for underserved communities?,[],"To prevent algorithmic bias in automated systems, especially for underserved communities, several strategies can be employed: + +1. **Diverse Data Collection**: Ensure that the data used to train algorithms is representative of all segments of the population, including underserved communities. This helps to reduce biases that arise from skewed or incomplete data sets. + +2. **Bias Audits and Testing**: Implement regular audits and bias testing throughout the development and deployment phases. This can help identify and mitigate biases before they affect real-world outcomes. + +3. **Inclusive Design and Development Teams**: Assemble diverse teams that include members from underserved communities to provide varied perspectives and insights during the design and development processes. + +4. **Transparent and Explainable AI**: Develop algorithms that are transparent and explainable, making it easier to understand how decisions are made and to identify potential biases. + +5. **Community Engagement**: Engage with the communities that are affected by the automated systems to understand their needs, concerns, and experiences. This engagement can inform better design and implementation practices. + +6. **Fairness Constraints and Metrics**: Integrate fairness constraints and metrics into the algorithmic design to ensure equitable outcomes across different groups. + +7. **Continuous Monitoring and Feedback Loops**: Establish mechanisms for continuous monitoring and feedback to detect and address biases as they arise over time. + +8. **Ethical Guidelines and Regulations**: Adhere to ethical guidelines and regulations that promote fairness and non-discrimination in AI systems. + +By incorporating these strategies, it is possible to create more equitable and fair automated systems that better serve all communities, including those that are underserved.","Strategies to prevent algorithmic bias in automated systems for underserved communities include conducting proactive equity assessments during the design phase, ensuring the use of representative and robust data, and guarding against the use of proxies that may lead to algorithmic discrimination. These strategies involve reviewing potential input data, historical context, and accessibility for people with disabilities, as well as testing for correlation between demographic information and attributes to identify and remove any proxies.",0.0,0.9712885477980597,0.0,0.0,0.6118886066790004 +What drives the choice of humans over automation in sensitive areas?,[],"The choice of humans over automation in sensitive areas is often driven by several key factors: + +1. **Ethical Considerations**: In areas such as healthcare, law enforcement, and legal decisions, the ethical implications of automated decisions can be significant. Human judgment is often preferred to ensure that ethical standards are upheld and that decisions are made with empathy and moral reasoning. + +2. **Complexity and Nuance**: Many sensitive tasks require a deep understanding of context and nuance that current AI systems may not possess. Human experts can interpret complex situations and make decisions based on a broad range of factors that go beyond what can be programmed into an algorithm. + +3. **Accountability**: When decisions have serious consequences, it is important to have a clear line of accountability. Humans can be held responsible for their actions, whereas it can be more difficult to attribute accountability in the case of automated systems. + +4. **Trust and Acceptance**: People may have greater trust in human decision-makers, especially in high-stakes situations. Human oversight can help ensure that automated systems are used appropriately and can provide a sense of security to those affected by the decisions. + +5. **Flexibility and Adaptability**: Humans can adapt to new information and changing circumstances more readily than automated systems. In dynamic environments, human judgment can be crucial for making timely and appropriate decisions. + +6. **Legal and Regulatory Requirements**: There may be legal or regulatory frameworks in place that mandate human involvement in certain types of decisions. These requirements are often in place to protect individuals' rights and ensure fairness. + +Overall, while automation can offer efficiency and consistency, the unique capabilities and qualities of human judgment are often essential in sensitive and complex areas.","The choice of humans over automation in sensitive areas is driven by the need for human consideration and remedy, particularly in contexts where automated systems may fail, produce errors, or where individuals wish to appeal or contest the impacts of these systems. This choice is also influenced by the requirement for appropriateness based on reasonable expectations, ensuring broad accessibility, and protecting the public from especially harmful impacts.",1.0,0.9844915602711537,0.0,0.0,0.5338551039869095 +What ensures good governance in automated systems?,['ā€¢ Auditing and assessment'],"Good governance in automated systems is ensured through several key practices, including: + +1. **Auditing and Assessment**: Regular audits and assessments can identify potential risks and ensure that the system operates as intended. This involves evaluating both the technical and ethical aspects of the system. + +2. **Transparency**: Ensuring that the decision-making processes of automated systems are transparent helps stakeholders understand how decisions are made and on what basis. + +3. **Accountability**: Establishing clear accountability mechanisms ensures that there are designated individuals or teams responsible for the outcomes of the automated systems. + +4. **Compliance with Regulations**: Ensuring that automated systems comply with relevant laws and regulations helps in maintaining ethical standards and protecting users' rights. + +5. **Robust Testing and Validation**: Thoroughly testing and validating the systems before deployment can help in catching potential problems early on. + +6. **Stakeholder Engagement**: Involving stakeholders in the development and deployment process can help address concerns and ensure the system meets the needs of all parties involved. + +7. **Continuous Monitoring and Improvement**: Continuously monitoring the system's performance and making improvements based on feedback and new developments helps in maintaining its reliability and effectiveness over time. + +By combining these practices, organizations can ensure good governance in their automated systems, making them more trustworthy and effective.","Good governance in automated systems is ensured by laying out clear governance structures and procedures, which include clearly-stated governance procedures before deploying the system, as well as the responsibility of specific individuals or entities to oversee ongoing assessment and mitigation. Organizational stakeholders should be involved in establishing these governance procedures, and responsibility should rest high enough in the organization to allow for prompt decision-making regarding resources, mitigation, incident response, and potential rollback. Additionally, those in charge should be aware of any use cases with the potential for meaningful impact on people's rights, opportunities, or access, and it may be appropriate for an independent ethics review to be conducted before deployment.",0.08333333333333333,0.9656997624194932,0.0,0.0,0.5289427759364693 +What links do harmful AI biases have to data privacy or misinformation risks?,[],"Harmful AI biases, data privacy, and misinformation risks are interconnected in several ways: + +1. **Data Privacy and Bias**: The data used to train AI systems often contains personal information. If this data is not handled with strict privacy measures, it can lead to privacy breaches. Moreover, biased data can result in AI systems that perpetuate stereotypes or unfair practices, affecting individuals based on their private information such as race, gender, or socioeconomic status. + +2. **Bias and Misinformation**: Biased AI models can contribute to the spread of misinformation. For instance, biased algorithms in social media platforms might prioritize or amplify misleading content that aligns with existing biases, leading to a skewed representation of information and further entrenching false beliefs. + +3. **Data Privacy and Misinformation**: Weak data privacy protections can facilitate the unauthorized use of personal data to create targeted misinformation campaigns. For example, personal data can be exploited to craft convincing fake news or deepfakes aimed at specific individuals or groups, making it harder for people to discern truth from falsehood. + +4. **Feedback Loops**: The interplay between these elements can create harmful feedback loops. Biased data can lead to privacy invasions and the spread of misinformation, which in turn can generate more biased data, perpetuating the cycle. + +Addressing these interconnected issues requires comprehensive strategies that include ensuring data privacy, mitigating biases in AI systems, and combating misinformation through accurate information dissemination and critical thinking education.","The context does not explicitly link harmful AI biases to data privacy or misinformation risks. However, it mentions risks such as harmful biases, data privacy, and misinformation in separate sections, indicating that these issues are recognized but not directly connected in the provided text.",0.0,0.9479098122446805,0.0,0.0,0.8973425128100697 +What to review for ethical use of sensitive data?,"['ā€¢ Data provenance' + 'a description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted, a summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the results of public consultation such as concerns raised and any decisions made due to these concerns; risk identification and management assessments and any steps taken to mitigate potential harms; the results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems; ongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","When reviewing the ethical use of sensitive data, several key aspects should be considered to ensure responsible handling and compliance with ethical standards. Here are some points to review: + +1. **Data Provenance**: + - Ensure a comprehensive description of the data sources used, including how the data was collected, processed, and interpreted. + - Identify any missing, incomplete, or erroneous data. + - Justify the relevancy of the data to the intended purpose. + - Review public consultation results, including concerns raised and decisions made to address those concerns. + - Assess and manage risks, implementing steps to mitigate potential harms. + +2. **Consent and Privacy**: + - Confirm that data collection complies with privacy laws and regulations. + - Ensure informed consent has been obtained from individuals whose data is being used. + - Implement measures to protect the privacy and confidentiality of the data. + +3. **Data Security**: + - Ensure robust security measures are in place to protect sensitive data from unauthorized access and breaches. + - Regularly update and review security protocols. + +4. **Fairness and Bias**: + - Evaluate the data for potential biases and take steps to mitigate them. + - Perform differential demographic impact assessments to ensure fairness across different groups. + - Compare outcomes to previously deployed systems to identify and address any disparities. + +5. **Transparency and Accountability**: + - Maintain transparency about how data is used and for what purposes. + - Document decision-making processes and be accountable for data use practices. + - Provide clear explanations and justifications for data handling practices. + +6. **Ongoing Monitoring and Performance Testing**: + - Implement ongoing monitoring procedures to track the performance and impact of data use. + - Conduct regular performance testing reports, including accuracy, error rates, and demographic impact. + - Adjust practices based on monitoring outcomes to continually improve ethical data use. + +By thoroughly reviewing these aspects, organizations can ensure the ethical use of sensitive data and mitigate potential risks associated with its handling.","Any use of sensitive data or decision processes based in part on sensitive data that might limit rights, opportunities, or access should go through a thorough ethical review and monitoring, both in advance and by periodic review. This may involve an independent ethics committee or a similarly robust process. The ethical review may determine that data should not be used or shared for specific uses even with consent.",0.5357142857142857,0.9599016674591888,0.3333333333333333,0.49999999995,0.5611122334163716 +What are the concerns with surveillance tech in education and healthcare?,"[""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed."" + 'pre-deployment assessment of their potential harms and scope limits to pro\xad tect privacy and civil liberties. Continuous surveillance and monitoring should not be used in education, work, housing, or in other contexts where the use of such surveillance technologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to reporting that confirms your data decisions have been respected and provides an assessment of the potential impact of surveillance technologies on your rights, opportunities, or access.' + 'the privacy concerns associated with telehealth systems']","There are several concerns associated with the use of surveillance technology in education and healthcare: + +### Education: +1. **Privacy and Civil Liberties**: Continuous surveillance in educational settings can infringe on students' privacy and civil liberties. It can create an environment of constant monitoring, which may stifle freedom of expression and thought. +2. **Chilling Effects**: Surveillance can have a chilling effect on students and educators, making them feel constantly watched and potentially altering their behavior in ways that are detrimental to open discourse and learning. +3. **Negative Signals**: The use of surveillance technology can send negative signals to students about how they are viewed, potentially undermining trust between students and educational institutions. +4. **Loss of Individualization**: Surveillance systems often lack the ability to individualize their responses, leading to standardized and potentially unjust outcomes that do not take into account the unique circumstances of each student. + +### Healthcare: +1. **Privacy Concerns**: Telehealth systems and other healthcare surveillance technologies raise significant privacy issues. Patients' sensitive health data could be exposed or misused, leading to a loss of confidentiality. +2. **Impact on Rights and Opportunities**: The use of surveillance in healthcare can potentially limit patients' rights and access to services. For example, continuous monitoring might lead to discrimination or stigmatization of certain health conditions. +3. **Data Security**: The security of the data collected through surveillance technologies in healthcare is a major concern. If not properly protected, this data could be vulnerable to breaches and misuse. +4. **Potential Harms**: There are concerns about the potential harms and scope limits of these technologies, highlighting the need for pre-deployment assessments to evaluate their impact on privacy and civil liberties. + +Overall, the deployment of surveillance technologies in both education and healthcare needs to be carefully assessed to ensure that it doesn't infringe on individual rights, perpetuate unjust outcomes, or create an environment of distrust and constant monitoring.","The concerns with surveillance technology in education and healthcare include its invasive nature, potential for discrimination, and the disproportionate harm it may cause to disabled individuals. Specifically, new surveillance technologies can monitor students in ways that may violate their privacy and exacerbate existing inequalities, particularly for those with disabilities.",1.0,0.9891455806322682,0.0,0.99999999995,0.40385285736620724 +What are the options for high-priority AI risks and their link to org tolerance?,[],"The options for high-priority AI risks typically include: + +1. **Misaligned Objectives**: AI systems might pursue goals that are not fully aligned with human values or intentions. This can happen if the objectives programmed into the AI do not fully capture the nuances of human values or if there are unintended consequences. + +2. **Control Problems**: Ensuring that humans can maintain control over highly autonomous AI systems is a significant risk. This includes the ability to interrupt or modify the AI's actions if necessary. + +3. **Value Alignment**: Ensuring that AI systems understand and can act according to human values is crucial. This involves complex challenges related to encoding ethical principles into AI systems. + +4. **Unintended Consequences**: AI systems might produce harmful side effects that were not anticipated by their creators. This is particularly concerning with highly complex systems where predicting outcomes is difficult. + +5. **Scalability of Errors**: Small errors or biases in AI systems can scale up rapidly, leading to significant impacts if the AI is deployed widely or in critical areas. + +6. **Self-Improving AI**: AI systems that have the ability to improve themselves could potentially surpass human control, leading to scenarios where their actions become unpredictable and potentially harmful. + +Link to Organizational Tolerance: + +- **Organizational Tolerance for Misaligned Objectives**: Organizations might tolerate some level of misalignment if the AI's performance benefits outweigh the risks. However, for critical applications, tolerance is typically low. + +- **Control Problems**: Tolerance is generally low since losing control over AI systems can lead to significant risks, especially in safety-critical industries like healthcare or autonomous driving. + +- **Value Alignment**: Organizations may have varying tolerance levels based on the context of AI application. In areas such as customer service, there might be higher tolerance compared to areas like law enforcement or military applications where ethical considerations are paramount. + +- **Unintended Consequences**: Tolerance is generally low, especially in high-stakes environments. Organizations often invest in extensive testing and monitoring to mitigate this risk. + +- **Scalability of Errors**: Tolerance is low in high-impact sectors. Organizations focus on robustness and reliability of AI systems to prevent small errors from having large-scale effects. + +- **Self-Improving AI**: Tolerance is very low due to the potential for uncontrollable and unpredictable behavior. Organizations typically implement strict oversight and containment measures for such AI systems. + +Overall, the tolerance for these risks varies significantly depending on the specific use case, potential impact, and the organization's risk management capabilities.","The options for high-priority AI risks include mitigating, transferring, avoiding, or accepting these risks. Specifically, for risks that do not surpass organizational risk tolerance, it is suggested to document trade-offs, decision processes, and relevant measurement and feedback results. For risks that surpass organizational risk tolerances, the recommended actions are to mitigate, transfer, or avoid those risks.",1.0,0.9512965270176226,0.0,0.0,0.2221444557320826 +How does Navigator training relate to health coverage access?,"[""Healthcare ā€œnavigatorsā€ help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is ā€œan individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.ā€106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \nā€œtrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.ā€107""]","Navigator training is essential for improving health coverage access as it equips individuals and organizations with the necessary skills and knowledge to assist consumers, small businesses, and employees in navigating the complex process of choosing and obtaining healthcare coverage through the Marketplace. By being trained and certified, Navigators can effectively help uninsured consumers understand their options, complete eligibility and enrollment forms, and ultimately find affordable and comprehensive health coverage. This support is crucial for ensuring that more people can access the healthcare services they need.","Navigator training is related to health coverage access as it equips individuals or organizations to help consumers, small businesses, and their employees navigate the process of finding and obtaining health coverage options through the Marketplace. This training enables Navigators to assist with completing eligibility and enrollment forms, thereby facilitating access to affordable and comprehensive health coverage for uninsured consumers.",0.6666666666666666,0.9563006708277157,1.0,0.9999999999,0.9961489319384436 +What connects NIST's AI Safety Institute to AI bias mgmt?,['National Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing Bias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-managing-bias-artiļ¬cial-intelligence'],"The National Institute of Standards and Technology (NIST) has been actively involved in establishing standards and guidelines for the development and deployment of artificial intelligence (AI) systems. One of the key areas NIST focuses on is the identification and management of bias in AI. + +In their publication ""Towards a Standard for Identifying and Managing Bias in Artificial Intelligence,"" NIST outlines the importance of recognizing and addressing bias to ensure the reliability, fairness, and safety of AI systems. This involves developing comprehensive frameworks and methodologies to detect, measure, and mitigate biases that can arise from data, algorithms, and human involvement in AI processes. + +The connection between NIST's AI Safety Institute and AI bias management lies in their shared goal of enhancing the trustworthiness and ethical deployment of AI technologies. By setting standards for bias management, NIST aims to guide organizations in creating AI systems that are not only technically robust but also fair and unbiased, ultimately contributing to overall AI safety.",The answer to given question is not present in context,0.20833333333333334,0.9082227963764112,1.0,0.0,0.5227355888565997 +What ensures automated systems are safe and fair?,"['Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.' + 'Some companies have put in place safeguards that have prevented harm from occurring by ensuring that key development decisions are vetted by an ethics review; others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes.']","Ensuring that automated systems are safe and fair involves several key practices: + +1. **Independent Evaluation and Reporting**: It is important to have independent evaluation and reporting that confirms the system is safe and effective. This includes documenting and publicly reporting the steps taken to mitigate potential harms whenever possible. + +2. **Safeguards and Ethics Review**: Companies can put in place safeguards to prevent harm by ensuring that key development decisions are vetted by an ethics review. This involves having a dedicated process to critically assess the ethical implications of the system's design and deployment. + +3. **Pre-deployment Testing and Ongoing Monitoring**: Identifying and mitigating potential harms through thorough pre-deployment testing is crucial. Additionally, continuous monitoring of the system after deployment helps in addressing any unforeseen issues that might arise. + +By implementing these practices, organizations can enhance the safety and fairness of their automated systems.","Automated systems should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring to ensure they are safe and effective. They should be developed with consultation from diverse communities, stakeholders, and domain experts, and should include protective measures to prevent endangering safety. Additionally, independent evaluation and reporting that confirms the system's safety and effectiveness should be performed, with results made public whenever possible.",0.46153846153846156,0.9772984114394849,0.6666666666666666,0.99999999995,0.7883343068602024 +What issues come from biased automated systems in hiring and justice?,['Algorithms used in hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination.'],"Biased automated systems in hiring and justice can lead to several significant issues: + +1. **Reinforcement of Existing Inequities:** These systems can reflect and perpetuate existing societal biases, leading to the reinforcement of historical inequities. For example, if an algorithm is trained on historical hiring data, it may preferentially select candidates who resemble those who were hired in the past, potentially discriminating against underrepresented groups. + +2. **Discrimination:** Automated systems can embed new forms of bias and discrimination. In hiring, this could mean unfairly disadvantaging candidates based on race, gender, age, or other protected characteristics. In the justice system, biased algorithms might unfairly target certain demographic groups, leading to disproportionate sentencing, bail decisions, or policing. + +3. **Lack of Transparency:** Many automated systems are ""black boxes,"" meaning their decision-making processes are not transparent or understandable. This lack of transparency can make it difficult to identify and correct biased outcomes, and it can prevent affected individuals from challenging unfair decisions. + +4. **Erosion of Trust:** When people become aware that automated systems are biased, it can erode trust in these technologies and the institutions that use them. In hiring, this could lead to distrust in the fairness of the recruitment process. In the justice system, it could undermine public confidence in the legal system's ability to deliver fair and impartial justice. + +5. **Legal and Ethical Concerns:** The use of biased automated systems can lead to legal challenges and ethical dilemmas. Organizations could face lawsuits for discriminatory practices, and there are broader ethical concerns about fairness, justice, and the societal impact of these technologies. + +Addressing these issues requires careful design, testing, and ongoing monitoring of automated systems to ensure they are fair, transparent, and accountable.","Biased automated systems in hiring can lead to discriminatory decisions, such as hiring tools that reject women applicants for spurious reasons, penalizing resumes with the word 'womenā€™s'. In the justice system, predictive models can disproportionately label Black students as high risk of dropping out, and risk assessment tools can overpredict recidivism for some groups of color, leading to unfair treatment and outcomes.",0.3181818181818182,0.9827757275482126,1.0,0.9999999999,0.9082553436296803 +What ensures independent eval & reporting for system safety?,"['Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.' + 'ā€¢ Auditing and assessment']","Independent evaluation and reporting for system safety are ensured through the processes of auditing and assessment. These processes involve confirming that the system is safe and effective, as well as reporting the steps taken to mitigate potential harms. The results of these evaluations and reports should be made public whenever possible to maintain transparency and accountability.","Independent evaluation for system safety is ensured by designing automated systems to allow for independent evaluation through mechanisms such as application programming interfaces. Independent evaluators, including researchers, journalists, ethics review boards, inspectors general, and third-party auditors, should have access to the system and samples of associated data, consistent with privacy, security, law, or regulation. Additionally, entities responsible for automated systems should provide regularly-updated reports that include an overview of the system, data used, risk assessments, performance testing results, and independent evaluation outcomes, all presented in plain language and a machine-readable format.",0.2,0.9618954216429755,0.0,0.9999999999,0.48720719188969336 +How does public input influence the AI Bill of Rights?,"['ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments.']","Public input plays a central role in shaping the AI Bill of Rights. According to the retrieved context, input was gathered through listening sessions, meetings, formal requests for information, and contributions to a widely-publicized email address. People across the United States, public servants from various Federal agencies, and members of the international community provided valuable insights. These contributions highlighted both the promises and potential harms of AI technologies. The core messages from these discussions emphasized the transformative potential of AI to improve lives and stressed the necessity and achievability of preventing the harms associated with these technologies. This broad engagement helped to inform and shape the Blueprint for an AI Bill of Rights.","Public input influences the AI Bill of Rights by providing insights and feedback from impacted communities, industry stakeholders, technology developers, and experts. The White House Office of Science and Technology Policy conducted a year-long process to gather this input through various means, including panel discussions and public listening sessions, which helped shape the principles and practices outlined in the Blueprint for an AI Bill of Rights.",0.9333333333333333,0.96750441871691,0.6666666666666666,0.9999999999,0.8152496806716342 +What issues arise from hidden criteria changes in benefit allocation?,[],"Hidden criteria changes in benefit allocation can lead to several significant issues: + +1. **Unfair Distribution**: When the criteria for benefit allocation are altered without transparency, certain groups may receive benefits unfairly, while others who are equally or more deserving may be overlooked. + +2. **Erosion of Trust**: Stakeholders, including beneficiaries, may lose trust in the system if they perceive that changes are being made secretly or without clear justification. This can lead to a lack of confidence in the institution or system managing the benefits. + +3. **Lack of Accountability**: Hidden criteria changes can obscure accountability, making it difficult to hold decision-makers responsible for unfair or biased allocations. This can lead to corruption or favoritism. + +4. **Legal and Ethical Concerns**: Undisclosed changes can raise legal and ethical issues, particularly if they result in discrimination or violate established regulations and guidelines. + +5. **Reduced Effectiveness**: The intended outcomes of the benefit allocation system may be compromised if changes are made without proper evaluation or consideration of their impact. This can undermine the effectiveness of the program. + +6. **Conflict and Discontent**: Beneficiaries who feel that they have been unfairly treated due to hidden criteria changes may become discontented, leading to conflicts, grievances, or even legal challenges. + +Ensuring transparency and clear communication about any changes in criteria is crucial to maintaining fairness, trust, accountability, and the overall effectiveness of benefit allocation systems.","Issues arising from hidden criteria changes in benefit allocation include individuals being denied benefits due to data entry errors and other system flaws, which were only revealed when an explanation of the system was demanded. The lack of transparency made it harder for errors to be corrected in a timely manner.",1.0,0.9932784200031123,0.0,0.0,0.29817598101696785 +What IP risks come from GAI using copyrighted works and data poisoning?,[],"The intellectual property (IP) risks associated with General AI (GAI) using copyrighted works and data poisoning are significant and multifaceted. Here are some key points to consider: + +1. **Use of Copyrighted Works**: + - **Infringement**: If GAI systems are trained on copyrighted materials without proper authorization, it can lead to copyright infringement claims. This is particularly problematic if the AI generates content that closely mimics or reproduces the original works. + - **Licensing Issues**: There may be complexities around licensing terms and conditions, especially if the AI-generated content is to be used commercially. + - **Attribution and Plagiarism**: AI systems might generate content that fails to properly attribute the original creators, leading to concerns about plagiarism and misappropriation of intellectual property. + +2. **Data Poisoning**: + - **Integrity of Data**: Data poisoning involves the deliberate introduction of misleading or harmful data into the training datasets of AI systems. This can compromise the integrity and reliability of the AI's outputs. + - **Misappropriation of IP**: Malicious actors could use data poisoning to insert copyrighted material into training datasets without detection, potentially leading to unintentional IP violations by the AI. + - **Legal and Compliance Risks**: Organizations using AI systems trained on poisoned data might unknowingly violate IP laws, leading to legal liabilities and compliance issues. + +Addressing these risks requires robust data management practices, including: +- Ensuring proper licensing and usage rights for all training data. +- Implementing rigorous data validation and cleaning processes to detect and mitigate data poisoning. +- Establishing clear policies and procedures for AI-generated content to ensure compliance with IP laws and ethical standards.","Intellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair use under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI outputs displaying instances of training data memorization could infringe on copyright. Additionally, data poisoning poses a risk where an adversary compromises a training dataset used by a model to manipulate its outputs or operation, potentially leading to malicious tampering with data or parts of the model.",1.0,0.8574728264242536,0.0,0.0,0.582347903327475 +What ensures human oversight in automated voting signatures?,[],"Ensuring human oversight in automated voting signatures typically involves a combination of technical and procedural measures. Here are some key practices to ensure human oversight: + +1. **Manual Review and Verification**: Even with automated systems in place, a certain percentage of signatures can be flagged for manual review. Election officials can then verify questionable signatures to ensure accuracy. + +2. **Audit Trails**: Implementing robust audit trails allows for tracking and reviewing the decisions made by automated systems. This ensures transparency and accountability. + +3. **Transparency and Public Reporting**: Regularly publishing reports on the performance and accuracy of automated systems can help maintain public trust and allow for independent verification. + +4. **Human-in-the-Loop (HITL) Systems**: Designing systems where human intervention is required at critical decision points. For example, if the system identifies a potential issue with a signature, it could escalate it to a human reviewer for final determination. + +5. **Training and Calibration**: Continually training and calibrating the automated system based on feedback from human reviewers to improve its accuracy and reliability. + +6. **Clear Guidelines and Protocols**: Establishing clear guidelines and protocols for when and how human oversight is applied ensures consistency and fairness in the review process. + +These measures, when implemented effectively, can help ensure that automated voting signature systems operate with the necessary human oversight to maintain the integrity of the voting process.","A human curing process helps voters confirm their signatures and correct other voting mistakes, ensuring that all votes are counted. This process is already standard practice in much of the country, allowing both an election official and the voter to review and correct any issues.",0.0,0.9755495452799258,0.0,0.0,0.47890106150175416 +How do algorithmic impact assessments relate to automated system transparency?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.']","Algorithmic impact assessments (AIAs) are directly related to automated system transparency, as they provide a structured framework for evaluating and reporting the effects and implications of an algorithm. These assessments often include details such as disparity testing results and mitigation information, which are crucial for understanding and addressing any biases or negative impacts an automated system may have. + +By making the findings of an AIA public, organizations can demonstrate transparency in how their algorithms operate and affect individuals and communities. This openness helps to build trust with users and stakeholders, ensuring that the protections intended to prevent harm and promote fairness are effectively communicated and verified.",The answer to given question is not present in context,0.625,0.9999999999999994,1.0,0.0,0.17543613372625697 +What knowledge and security factors should be noted for GAI deployment?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Data provenance']","For the deployment of General Artificial Intelligence (GAI), several critical knowledge and security factors should be noted to ensure its safe and effective use. These include: + +1. **Information Integrity**: Ensuring that the data used and produced by the GAI is accurate and reliable. This involves monitoring and verifying the integrity of the information to prevent misinformation and bias in AI outputs. + +2. **Information Security**: Protecting the AI system from unauthorized access and cyber threats. This includes implementing robust cybersecurity measures to safeguard sensitive data and the AI model itself from being compromised. + +3. **Dangerous, Violent, or Hateful Content**: Developing mechanisms to detect and filter out any output that could be harmful or inappropriate. This is crucial to prevent the AI from generating or propagating content that could lead to real-world harm. + +4. **AI Actor Credentials and Qualifications**: Ensuring that individuals and entities involved in the design, development, and deployment of GAI possess the necessary expertise and credentials. This helps in maintaining high standards and accountability. + +5. **Auditing and Assessment**: Regularly conducting audits and assessments of the AI system to evaluate its performance, compliance with standards, and adherence to ethical guidelines. This includes both internal reviews and third-party evaluations. + +6. **Change-Management Controls**: Implementing structured processes for managing changes to the AI system. This includes version control, documentation of changes, and impact assessment to ensure that updates do not introduce new risks. + +7. **Data Provenance**: Maintaining thorough records of the origins and lineage of the data used by the AI. This helps in tracing back any issues to their source and ensuring that the data is from reliable and ethical sources. + +By addressing these factors, organizations can enhance the safety, reliability, and ethical deployment of GAI systems.","The context mentions several knowledge and security factors for GAI deployment, including the need to document the extent of human domain knowledge employed to improve GAI system performance, verify sources and citations in GAI system outputs, track instances of anthropomorphization in GAI system interfaces, verify GAI system training data and TEVV data provenance, and regularly review security and safety guardrails, especially in novel circumstances.",0.8888888888888888,0.9130296992196719,0.0,0.99999999995,0.3477013807799245 +How do security measures relate to info integrity?,[],"Security measures are crucial for maintaining information integrity. Information integrity refers to the accuracy, consistency, and reliability of data throughout its lifecycle. Hereā€™s how security measures relate to information integrity: + +1. **Access Controls**: By restricting access to authorized users only, security measures such as passwords, biometrics, and multi-factor authentication help ensure that only those with the right permissions can manipulate or view data. + +2. **Data Encryption**: Encrypting data both in transit and at rest protects it from being tampered with or intercepted by unauthorized parties, thereby maintaining its integrity. + +3. **Audit Trails**: Security measures that include logging and monitoring user activities provide audit trails, which help trace any unauthorized or suspicious activities, ensuring that any breaches or alterations can be tracked and addressed. + +4. **Data Validation**: Security protocols can include validation checks to ensure that data input is correct and consistent, preventing errors and maintaining the integrity of the information. + +5. **Backup and Recovery**: Regular backups and disaster recovery plans ensure data can be restored to its original state in case of corruption or loss, maintaining its integrity over time. + +In summary, effective security measures are essential to protect data from unauthorized access and alterations, ensuring the information remains accurate and reliable.",The answer to given question is not present in context,0.0,0.9422883772694076,0.0,0.0,0.9240855365352358 +What links are there between tech protections and the AI Bill of Rights?,[],"The AI Bill of Rights is a set of guidelines aimed at protecting individuals from potential harms associated with artificial intelligence and automated systems. It encompasses principles such as privacy, transparency, fairness, and accountability. Tech protections, on the other hand, refer to various technical measures and practices put in place to safeguard users and systems against risks like data breaches, misuse of AI, and other cyber threats. + +The links between tech protections and the AI Bill of Rights include: + +1. **Privacy and Data Security**: Both emphasize the importance of protecting personal data. The AI Bill of Rights advocates for robust privacy protections, while tech protections include measures such as encryption and secure data storage to achieve this goal. + +2. **Transparency**: The AI Bill of Rights calls for transparency in AI systems, ensuring that users are aware of how their data is being used and how decisions affecting them are made. Tech protections can facilitate this through mechanisms like audit trails and transparent algorithms. + +3. **Fairness**: Ensuring AI systems do not perpetuate biases or discrimination is a core principle of the AI Bill of Rights. Tech protections might include bias detection and mitigation tools to ensure fair treatment across all users. + +4. **Accountability**: Both stress the need for accountability in AI deployment. The AI Bill of Rights advocates for mechanisms to hold entities responsible for AI-related harm, while tech protections might involve monitoring and compliance checks to uphold these standards. + +5. **Informed Consent**: Ensuring that users provide informed consent before their data is used is a principle in the AI Bill of Rights, supported by tech protections that ensure clear communication and easy-to-understand consent mechanisms. + +In summary, tech protections are the practical implementations that help enforce the principles laid out in the AI Bill of Rights, ensuring that AI systems are developed and used in a manner that is safe, fair, and respectful of individual rights.",The context does not provide specific links between tech protections and the AI Bill of Rights.,0.0,0.9633640705737508,1.0,0.0,0.20915479581033247 +Which NSF programs ensure automated system safety and compliance?,[],"I don't have specific information about which NSF (National Science Foundation) programs ensure automated system safety and compliance. However, the NSF funds a variety of programs and initiatives that focus on the advancement of technology, including the safety and ethical considerations of automated systems. If you need detailed information, I recommend visiting the NSF website or contacting the NSF directly for the most accurate and up-to-date information regarding their programs related to automated system safety and compliance.","The NSF programs that ensure automated system safety and compliance include the National AI Research Institutes, which support research on safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program, which supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program, which supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program, which supports research on rigorous formal verification and analysis of automated systems and machine learning; and the Designing Accountable Software Systems program, which supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.",0.8333333333333334,0.0,0.0,0.0,0.4470761903130137 +What drives the need for human input in sensitive automated systems?,[],"The need for human input in sensitive automated systems is driven by several factors: + +1. **Ethical Considerations**: Automated systems, particularly those that impact human lives (e.g., healthcare, criminal justice, autonomous vehicles), need human oversight to ensure that decisions align with societal values and ethical standards. + +2. **Accountability**: Humans are required to take responsibility for the actions and decisions made by automated systems, especially when things go wrong. This helps in maintaining public trust and provides a clear line of accountability. + +3. **Complex Decision-Making**: Some decisions require nuanced understanding and context that current AI systems may not fully grasp. Human judgment is crucial in scenarios where the automated systemā€™s decision may be too simplistic or lacks necessary context. + +4. **Adaptability**: Human input allows for flexibility and adaptability in situations where the automated system might not have been trained or programmed for. Humans can intervene to adjust or override the systemā€™s actions based on real-time observations and changes. + +5. **Bias and Fairness**: Automated systems can inadvertently perpetuate or amplify biases present in their training data. Human oversight can help identify and mitigate these biases, ensuring fairer outcomes. + +6. **Transparency and Explanation**: Humans can provide explanations for the decisions made by automated systems, making the processes more transparent and understandable for users and stakeholders. + +Human input in sensitive automated systems ensures they operate safely, ethically, and effectively, maintaining a balance between automation and human oversight.","The need for human input in sensitive automated systems is driven by the requirement for timely human consideration and remedy when automated systems fail, produce errors, or when individuals wish to appeal or contest the impacts of these systems. Additionally, human input is necessary to ensure that automated systems are tailored to their intended purpose, provide meaningful access for oversight, and incorporate human consideration for adverse or high-risk decisions.",0.0,0.979974222521601,0.0,0.0,0.8887516452125008 +"What links field testing, user feedback, and GAI eval?",[],"Field testing, user feedback, and GAI (General Artificial Intelligence) evaluation are interconnected processes that play crucial roles in developing and ensuring the safety and effectiveness of AI systems. + +1. **Field Testing**: This involves deploying an AI system in real-world scenarios to observe its performance in practical applications. Field testing helps identify unforeseen issues, validate the system's functionality, and gather initial data on how it operates outside controlled environments. + +2. **User Feedback**: During and after field testing, collecting user feedback is essential. This feedback provides insights into how end-users interact with the AI system, any difficulties they encounter, and the overall user experience. User feedback helps developers understand the system's strengths and weaknesses from the perspective of those who will use it. + +3. **GAI Evaluation**: This refers to the processes and methodologies used to assess the performance, safety, and alignment of General Artificial Intelligence systems. GAI evaluation includes rigorous testing against safety benchmarks, ethical guidelines, and performance metrics to ensure the AI operates as intended and does not pose risks to users or society. + +The link between these processes is that they collectively contribute to the iterative improvement and validation of AI systems. Field testing provides real-world performance data, user feedback offers practical insights and experiences, and GAI evaluation ensures comprehensive assessment against safety and ethical standards. Together, they create a robust framework for developing safe, reliable, and user-friendly AI technologies.","Field testing, user feedback, and GAI evaluation are linked through structured public feedback mechanisms that assess how GAI systems perform in real-world conditions. Field testing evaluates risks and impacts in controlled settings, while user feedback, gathered through participatory engagement methods, helps organizations understand user interactions and experiences with AI-generated information. Together, these approaches inform the design, implementation, and governance of GAI systems.",1.0,0.7911594340811906,0.0,0.0,0.5500369508595143 +What risk controls for third-party GAI in compliance?,['ā€¢ Auditing and assessment'],"Risk controls for third-party Generative AI (GAI) in compliance typically involve a combination of auditing, assessment, and continuous monitoring. Here are some key controls that organizations might implement: + +1. **Third-party Audits and Assessments**: Regularly conduct audits and assessments of third-party GAI providers to ensure they comply with regulatory requirements and industry standards. This can involve reviewing their data handling practices, security measures, and compliance with relevant laws. + +2. **Due Diligence**: Perform thorough due diligence before engaging with third-party GAI providers. This includes evaluating their reputation, compliance history, and the robustness of their AI models. + +3. **Contractual Safeguards**: Include specific clauses in contracts with third-party providers that address compliance requirements, data protection, and rights to audit. Ensure these contracts specify the responsibilities and liabilities of the third-party provider. + +4. **Data Privacy and Security**: Ensure that third-party GAI providers implement strong data privacy and security measures. This includes encryption, access controls, and regular security assessments. + +5. **Transparency and Explainability**: Require third-party providers to offer transparency in their AI models and ensure that the AIā€™s decision-making processes are explainable. This helps in understanding and mitigating potential biases and errors. + +6. **Continuous Monitoring**: Implement mechanisms for continuous monitoring of the third-party GAI's performance and compliance. This can involve automated tools that track the AIā€™s operations and highlight any deviations from expected behavior. + +7. **Incident Response Plans**: Develop and maintain incident response plans for addressing any compliance breaches or security incidents involving third-party GAI. Ensure these plans are regularly tested and updated. + +8. **Training and Awareness**: Provide training and raise awareness among employees about the risks and compliance requirements associated with using third-party GAI. This includes understanding how to identify potential issues and escalate them appropriately. + +By implementing these risk controls, organizations can better manage the compliance risks associated with third-party GAI and ensure that they align with regulatory and ethical standards.","Organizations can apply standard or existing risk controls and processes to proprietary or open-source GAI technologies, data, and third-party service providers, including acquisition and procurement due diligence, requests for software bills of materials (SBOMs), application of service level agreements (SLAs), and statement on standards for attestation engagement (SSAE) reports to help with third-party transparency and risk management for GAI systems.",1.0,0.9154241411367311,0.0,0.0,0.3505316722011165 +What ensures effective incident response for third-party GAI?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","Effective incident response for third-party GAI (General Artificial Intelligence) is ensured by thorough documentation practices. This includes: + +1. **Logging and Recording**: Keeping detailed logs and records of all incidents to ensure that there is a clear trail of what occurred, which can be reviewed and analyzed. + +2. **Analyzing Incidents**: Conducting thorough analyses of incidents to understand their causes and impacts, which helps in preventing future occurrences. + +3. **Regular Information Sharing**: Sharing information regularly with relevant AI actors to keep everyone informed and aligned. + +4. **Change Management Records**: Maintaining records of all changes made to the AI systems, which helps in tracking and understanding the evolution of the system. + +5. **Version History and Metadata**: Keeping a detailed version history and metadata to track changes and updates to the AI system, which facilitates troubleshooting and accountability. + +These practices collectively empower AI actors to respond to and manage AI incidents more effectively.","Effective incident response for third-party GAI is ensured by establishing incident response plans that align with impacts, communicating these plans to relevant AI actors, defining ownership of incident response functions, rehearsing the plans regularly, improving them based on retrospective learning, and reviewing for alignment with relevant laws.",0.5714285714285714,0.9608878716617756,0.0,0.0,0.8470429708184275 +What data leaks cause privacy issues?,"['Models may leak, generate, or correctly infer sensitive information about individuals. For example, \nduring adversarial attacks, LLMs have revealed sensitive information (from the public domain) that was \nincluded in their training data. This problem has been referred to as data memorization, and may pose \nexacerbated privacy risks even for data present only in a small number of training samples. \nIn addition to revealing sensitive information in GAI training data, GAI models may be able to correctly \ninfer PII or sensitive data that was not in their training data nor disclosed by the user by stitching \ntogether information from disparate sources. These inferences can have negative impact on an individual']","Data leaks that cause privacy issues primarily revolve around the following scenarios: + +1. **Adversarial Attacks**: During these attacks, large language models (LLMs) can reveal sensitive information that was included in their training data. This phenomenon is known as data memorization and poses significant privacy risks, even if the sensitive data appears only in a small number of training samples. + +2. **Sensitive Information in Training Data**: LLMs may inadvertently generate or leak sensitive information about individuals if such information was part of their training data. This can include personal identifiable information (PII) such as names, addresses, social security numbers, or other confidential details. + +3. **Inference of Sensitive Data**: Even if specific sensitive data was not included in the training data or disclosed by the user, LLMs may still be able to infer such information by piecing together clues from various sources. These inferences can lead to unintended privacy breaches and have negative impacts on individuals. + +These issues underscore the importance of implementing robust privacy-preserving techniques and stringent data handling practices when developing and deploying AI models.","The context mentions impacts due to leakage and unauthorized use, disclosure, or de-anonymization of biometric, health, location, or other personally identifiable information or sensitive data as causes of privacy issues.",0.8,0.9594770142460436,0.0,0.9999999999,0.39921054936027495 +What are the risks of collecting sensitive student data?,"['Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship history and legal status such as custody and divorce information, and home, work, or school environmental data); or have the reasonable potential to be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm' + 'due to identity theft.']","Collecting sensitive student data can pose several risks, including: + +1. **Privacy Violations**: Sensitive data can include personal information that, if disclosed, could violate students' privacy. This could range from academic records and health information to behavioral data and geolocation data. + +2. **Identity Theft**: If sensitive student data is not properly protected, it can be vulnerable to breaches. Unauthorized access to such data can lead to identity theft, where cybercriminals use the data for fraudulent activities. + +3. **Discrimination**: Sensitive data can include information about a student's disabilities, interactions with the criminal justice system, or other personal circumstances. If this data is improperly accessed or disclosed, it could lead to discriminatory practices or unfair treatment. + +4. **Psychological Harm**: Exposure of sensitive data can cause emotional and psychological distress to students. For example, if sensitive information about a student's relationship history or legal status is leaked, it can lead to bullying, stigmatization, or social isolation. + +5. **Financial Harm**: Sensitive data can include financial information or data that can be used to infer financial status. Unauthorized access to this information can lead to financial exploitation or harm. + +6. **Loss of Trust**: If students or their guardians perceive that their sensitive data is not being adequately protected, it can lead to a loss of trust in the educational institution, which can have long-term reputational damage. + +7. **Legal Consequences**: There are various laws and regulations governing the protection of sensitive data, such as FERPA (Family Educational Rights and Privacy Act) in the United States. Failure to protect sensitive student data can result in legal penalties and fines. + +Given these risks, it is crucial for educational institutions to implement robust data protection measures, including encryption, access controls, and regular security audits, to safeguard sensitive student data.","The risks of collecting sensitive student data include concerns about the lack of express parental consent, the lack of transparency in how the data is being used, and the potential for resulting discriminatory impacts. Additionally, the data collected can include sensitive information such as demographic details, drug use, and interest in LGBTQI+ groups, which may lead to inappropriate forecasting of student success and flagging of students with disabilities as cheating.",1.0,0.9976426424375219,0.5,0.9999999999,0.9077836150567778 +How do AI red-teaming and stakeholder engagement connect in privacy risk assessment?,"['ā€¢ \nAI Red-teaming: A structured testing exercise used to probe an AI system to ļ¬nd ļ¬‚aws and \nvulnerabilities such as inaccurate, harmful, or discriminatory outputs, often in a controlled \nenvironment and in collaboration with system developers.']","AI red-teaming and stakeholder engagement are both critical components in a comprehensive privacy risk assessment for AI systems. + +**AI Red-teaming** involves a structured testing exercise to identify flaws and vulnerabilities in AI systems. This process helps uncover issues such as privacy violations, data leaks, and other potential risks related to the handling of sensitive information. By simulating attacks and probing the system in a controlled environment, developers can better understand the system's weaknesses and implement necessary safeguards to protect user privacy. + +**Stakeholder Engagement** involves actively involving various stakeholders, including users, developers, privacy experts, and regulators, in the privacy risk assessment process. This engagement ensures that diverse perspectives are considered, and the concerns of all parties are addressed. Stakeholders can provide valuable insights into potential privacy risks and help in the development of more robust privacy protection strategies. + +**Connecting AI Red-teaming and Stakeholder Engagement**: +1. **Comprehensive Risk Identification**: By combining AI red-teaming with stakeholder engagement, organizations can identify a wider range of privacy risks. Red-teaming offers a technical perspective, while stakeholders provide contextual and user-centric insights. + +2. **Enhanced Trust and Transparency**: Engaging stakeholders in the red-teaming process can enhance trust and transparency. Stakeholders can see firsthand how the system is tested for vulnerabilities and how their privacy concerns are being addressed. + +3. **Improved Mitigation Strategies**: Input from stakeholders can inform the development of more effective privacy risk mitigation strategies based on real-world concerns and usage patterns, which can be tested and refined through red-teaming exercises. + +4. **Regulatory Compliance**: Stakeholder engagement ensures that privacy risk assessments align with regulatory requirements and best practices. Red-teaming can then be used to verify that the system complies with these standards. + +In summary, AI red-teaming and stakeholder engagement are interconnected in privacy risk assessment by providing a thorough understanding of potential privacy risks and developing informed, effective strategies to mitigate them.","AI red-teaming and stakeholder engagement connect in privacy risk assessment by engaging directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance. This feedback is then used to guide the design of provenance data-tracking techniques, which is essential for addressing privacy risks identified during AI red-teaming assessments.",0.06666666666666667,0.891244900184817,0.0,0.9999999999,0.35715815726132344 +What connects attack surfaces to system and data risks?,[],"Attack surfaces are the various points in a system where an unauthorized user (attacker) can attempt to enter or extract data. These surfaces are directly connected to system and data risks because the more attack surfaces a system has, the higher the potential risks. Each attack surface represents a potential vulnerability that could be exploited, leading to data breaches, system failures, or other security incidents. Essentially, a larger attack surface increases the chances of exposure to threats, thereby elevating the overall risk to the system and the data it holds. Reducing the attack surface is a key strategy in mitigating these risks.","The context discusses increased attack surfaces for targeted cyberattacks, which may compromise a system's availability or the confidentiality or integrity of training data, code, or model weights. This connection indicates that as attack surfaces increase, the risks to systems and data also escalate.",1.0,0.9478746832725861,0.0,0.0,0.6288225641941089 +What laws show data privacy principles in action?,[],"There are several laws around the world that embody data privacy principles and aim to protect individuals' personal information. Some of the most well-known include: + +1. **General Data Protection Regulation (GDPR)**: This is a comprehensive data protection law in the European Union that sets strict guidelines for how personal data should be collected, stored, and processed. It emphasizes principles such as data minimization, purpose limitation, and the rights of individuals to access and control their personal data. + +2. **California Consumer Privacy Act (CCPA)**: This law in the United States provides California residents with rights related to the access, deletion, and sharing of their personal information that is collected by businesses. It also mandates that businesses disclose the categories of personal data they collect and the purposes for which they use it. + +3. **Health Insurance Portability and Accountability Act (HIPAA)**: In the United States, HIPAA sets standards for protecting sensitive patient health information. It requires healthcare providers and related entities to implement safeguards to ensure the confidentiality, integrity, and security of health information. + +4. **Personal Data Protection Act (PDPA)**: This law in Singapore governs the collection, use, and disclosure of personal data by organizations. It ensures that individuals' personal data is handled in a manner that recognizes both their right to privacy and the needs of organizations to collect and use such data for legitimate purposes. + +5. **Brazilian General Data Protection Law (LGPD)**: Similar to the GDPR, the LGPD regulates the processing of personal data in Brazil. It outlines the rights of data subjects and the obligations of data controllers and processors. + +These laws demonstrate various data privacy principles in action, such as transparency, accountability, data minimization, and the rights of individuals to control their personal data.","The Privacy Act of 1974 exemplifies data privacy principles in action by requiring privacy protections for personal information in federal records systems, including limits on data retention and providing individuals a general right to access and correct their data. Additionally, federal law mandates employers to report the costs of surveilling employees during labor disputes, which serves as a transparency mechanism to protect worker organizing.",1.0,0.9689612097336013,0.0,0.0,0.8244243528044527 +What ensures AI transparency per NIST?,"['The \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems.']","The NIST framework ensures AI transparency by incorporating principles of transparency during the pre-design, design and development, deployment, use, and testing and evaluation stages of AI technologies and systems. This means that at each stage, efforts are made to ensure that the processes, decisions, and functioning of AI systems are clear and understandable to stakeholders.",The answer to given question is not present in context,0.0,0.960934441206761,1.0,0.0,0.925478895902012 +What safeguards do ethics reviews provide for automated systems?,['Some companies have put in place safeguards that have prevented harm from occurring by ensuring that key development decisions are vetted by an ethics review; others have identified and mitigated harms found through pre-deployment testing and ongoing monitoring processes.'],"Ethics reviews provide several crucial safeguards for automated systems, including: + +1. **Vetting Key Development Decisions:** Ethics reviews ensure that significant decisions made during the development process are scrutinized for their ethical implications. This helps in identifying and addressing potential ethical issues before they manifest in the deployed system. + +2. **Pre-Deployment Testing:** Ethics reviews often involve rigorous pre-deployment testing to identify and mitigate any potential harms the automated system might cause. This ensures that the system is safe and reliable before it is released to the public. + +3. **Ongoing Monitoring:** After deployment, ethics reviews typically include ongoing monitoring processes to continuously assess the system's performance and impact. This helps in identifying any new or unforeseen issues that may arise and allows for timely intervention to mitigate any harm. + +4. **Harm Mitigation:** By identifying potential risks and harms early in the development process, ethics reviews help in implementing measures to mitigate these risks, ensuring that the system operates in a manner that is safe and ethical. + +Overall, ethics reviews act as a checkpoint to ensure that automated systems are developed and deployed responsibly, with a focus on minimizing harm and upholding ethical standards.",Ethics reviews provide safeguards for automated systems by vetting key development decisions to prevent harm from occurring. They help identify and mitigate potential harms through pre-deployment testing and ongoing monitoring processes.,0.6666666666666666,0.9900890660859941,1.0,0.9999999999,0.9929358278217926 +What ensures fair design in automated systems?,"['Independent evaluation and report\xad ing that confirms that the system is safe and effective, including re\xad porting of steps taken to mitigate potential harms, should be per\xad formed and the results made public whenever possible.']","Ensuring fair design in automated systems involves several key practices, including: + +1. **Independent Evaluation and Reporting**: Conducting independent evaluations and reporting on the safety and effectiveness of the system. This includes documenting the steps taken to mitigate potential harms and making these results public whenever possible. + +2. **Transparency**: Providing clear and accessible information about how the system works, including the data it uses and the decision-making processes it follows. + +3. **Bias Mitigation**: Implementing techniques to detect and reduce biases in the data and algorithms used by the system. + +4. **Stakeholder Involvement**: Engaging a diverse group of stakeholders in the design and evaluation process to ensure different perspectives are considered. + +5. **Regulatory Compliance**: Adhering to relevant laws and regulations that promote fairness and non-discrimination. + +By following these practices, developers and organizations can help ensure that their automated systems are designed and implemented in a fair and ethical manner.","Fair design in automated systems is ensured through proactive and continuous measures to protect individuals and communities from algorithmic discrimination. This includes conducting equity assessments as part of the system design, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Additionally, independent evaluation and reporting, including algorithmic impact assessments and disparity testing results, should be made public whenever possible to confirm these protections.",0.25,0.9705380837276713,0.3333333333333333,0.9999999999,0.36366843036222285 +What GAI activities contribute most to carbon emissions?,[],"General AI activities that contribute most to carbon emissions primarily include: + +1. **Training Large Models:** Training large AI models, especially deep learning models, requires substantial computational power. This involves running powerful GPUs or TPUs for extended periods, which consumes significant amounts of electricity. The larger and more complex the model, the more computational resources it requires, leading to higher energy consumption and carbon emissions. + +2. **Data Center Operations:** The infrastructure supporting AI activities, particularly data centers, is another major source of carbon emissions. Data centers house the servers and other hardware necessary for AI training and inference, and they require a lot of energy for both computing and cooling systems. + +3. **Frequent Model Retraining and Updates:** Continuously retraining and updating AI models to improve performance or adapt to new data can also contribute to higher carbon emissions, as this process necessitates repeated use of computational resources. + +4. **Distributed Computing:** Distributed AI tasks, which involve multiple machines working in parallel across various locations, can also lead to increased energy use and carbon emissions due to the additional overhead in coordination and data transfer between machines. + +Efforts to mitigate these emissions include optimizing algorithms for efficiency, using renewable energy sources for data centers, and developing more energy-efficient hardware.","The GAI activities that contribute most to carbon emissions include training, maintaining, and operating GAI systems, particularly during the pre-training, fine-tuning, and inference stages. Current estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-trip flights between San Francisco and New York.",1.0,0.9048096938108907,0.0,0.0,0.8866448744979896 +What AI systems improve design in chem & bio?,[],"AI systems that improve design in chemistry and biology include: + +1. **DeepMindā€™s AlphaFold**: This AI system predicts protein folding with high accuracy, which is essential for understanding biological processes and designing drugs. + +2. **IBM's RXN for Chemistry**: This platform uses AI to predict the outcomes of chemical reactions, aiding in the design and synthesis of new compounds. + +3. **Insilico Medicineā€™s Pharma.AI**: This suite of AI tools accelerates drug discovery by identifying potential drug targets and designing new molecules. + +4. **Atomwise**: This AI-driven platform uses deep learning to analyze molecular structures for drug discovery. + +These systems leverage machine learning techniques to analyze vast amounts of data, predict outcomes, and optimize designs, significantly speeding up research and development in chemistry and biology.","Chemical and biological design tools (BDTs) are highly specialized AI systems trained on scientific data that aid in chemical and biological design, potentially improving design capabilities beyond what text-based LLMs can provide.",0.9523809523809523,0.9787847379867137,0.0,0.0,0.2764245146645313 +How to align synthetic data with real stats while ensuring privacy?,[],"Aligning synthetic data with real statistics while ensuring privacy is a complex task that involves several key steps and considerations. Here's a general approach to achieve this: + +1. **Statistical Alignment**: + - **Understand the Real Data**: Perform a thorough statistical analysis of your real dataset to understand its distributions, correlations, and other statistical properties. + - **Define Goals and Metrics**: Establish clear goals for what aspects of the real data you want your synthetic data to mimic. This could include means, variances, joint distributions, etc. + - **Use of Generative Models**: Employ generative models like Generative Adversarial Networks (GANs), Variational Autoencoders (VAEs), or other machine learning techniques to generate synthetic data that approximates the real data's statistical properties. + - **Iterative Refinement**: Continuously compare the generated synthetic data against the real data using statistical tests and refine the generative model until the synthetic data closely matches the real data's statistics. + +2. **Ensuring Privacy**: + - **Differential Privacy**: Implement differential privacy techniques to ensure that the synthetic data does not reveal information about any individual in the real dataset. This involves adding controlled noise to the data generation process to obscure the presence or absence of any single data point. + - **Privacy-Preserving Data Synthesis Algorithms**: Use algorithms specifically designed for privacy-preserving data synthesis, which incorporate privacy constraints into the data generation process. + - **Risk Assessment**: Conduct a privacy risk assessment to evaluate and mitigate the risk of re-identification or other privacy breaches. This can include testing the synthetic data for potential vulnerabilities. + - **Data Minimization**: Only include necessary features and data points in your synthetic dataset to minimize the risk of exposing sensitive information. + +3. **Validation and Testing**: + - **Privacy Audits**: Perform regular audits to ensure that the synthetic data complies with privacy standards and regulations. + - **Utility Assessment**: Test the utility of the synthetic data in your intended applications to ensure it serves its purpose without compromising privacy. + - **Feedback Loop**: Establish a feedback loop where the synthetic data is regularly evaluated, and both statistical alignment and privacy measures are improved based on findings and new developments. + +By combining these approaches, you can align synthetic data with real statistics while maintaining a strong privacy guarantee for individuals in the dataset.","Consider opportunities to responsibly use synthetic data and other privacy enhancing techniques in GAI development, where appropriate and applicable, to match the statistical properties of real-world data without disclosing personally identifiable information or contributing to homogenization.",0.0,0.9759954851628194,0.0,0.0,0.26828747146664 +What ensures AI transparency per NIST?,"['The \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems.']","Per the NIST framework, AI transparency is ensured by incorporating principles of transparency during the pre-design, design and development, deployment, use, and testing and evaluation phases of AI technologies and systems. This means that every stage of the AI lifecycle should be approached with an emphasis on transparency, ensuring that the operations and decision-making processes of AI systems are open and understandable to stakeholders.",The answer to given question is not present in context,0.0,0.960934441206761,1.0,0.0,0.17426087182722494 +What policies ensure GAI risk assessment with transparency and safety?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.']","To ensure GAI (General Artificial Intelligence) risk assessment with transparency and safety, the following policies are often recommended: + +1. **Documentation Practices**: This includes meticulous logging, recording, and analyzing of GAI incidents. Such documentation facilitates smooth information sharing among relevant AI actors and helps in managing AI incidents effectively. + +2. **Information Sharing**: Regular sharing of information, maintaining change management records, version history, and metadata can empower AI actors in responding to and managing AI incidents. + +3. **Algorithmic Impact Assessment**: Conducting evaluations and reporting them in plain language can help ensure transparency. This includes disparity testing results and information about mitigation measures, and whenever possible, making these assessments public to confirm protections. + +These practices collectively help in maintaining transparency and safety in GAI risk assessments.","The policies that ensure GAI risk assessment with transparency and safety include establishing transparency policies and processes for documenting the origin and history of training data and generated data for GAI applications, as well as establishing policies to evaluate risk-relevant capabilities of GAI and the robustness of safety measures prior to deployment and on an ongoing basis.",0.8125,0.9779751114512228,0.0,0.99999999995,0.5626937147638066 +What dual aspects should automated systems cover for effective oversight?,[],"Automated systems should cover both technical and institutional aspects for effective oversight. The technical aspect involves ensuring that the systems function correctly, securely, and as intended. The institutional aspect encompasses the policies, regulations, and governance structures that guide the deployment and use of these systems, ensuring accountability and ethical compliance. Together, these aspects help create robust safeguards against potential risks associated with automated systems.",Automated systems should cover ongoing monitoring procedures and clear organizational oversight for effective oversight.,0.0,0.9706560321871054,0.0,0.0,0.22325009694550932 +What term refers to GAI's misleading false content?,[],"The term that refers to General Artificial Intelligence (GAI) producing misleading false content is ""hallucination."" In the context of AI, hallucination occurs when the AI generates information or content that is not based on real data or facts, leading to inaccuracies or falsehoods.","The term that refers to GAI's misleading false content is ""confabulation.""",0.0,0.9100406255396573,0.0,0.0,0.22356345900308996 +What's the role of interdisciplinary teams & human-AI config in GAI risk mgmt?,['Human-AI Conļ¬guration;'],"The role of interdisciplinary teams and human-AI configuration in managing the risks associated with General Artificial Intelligence (GAI) is crucial. Here are some key points: + +1. **Interdisciplinary Teams**: + - **Diverse Expertise**: Interdisciplinary teams bring together experts from various fields such as computer science, ethics, psychology, law, and sociology. This diversity ensures that different perspectives are considered in addressing the multifaceted risks of GAI. + - **Holistic Risk Assessment**: By leveraging the knowledge and methodologies from different disciplines, these teams can conduct thorough and holistic assessments of potential risks, including technical, ethical, and societal implications. + - **Innovative Solutions**: Collaboration among diverse experts often leads to innovative solutions that might not emerge from a single-field perspective. This can be particularly valuable in anticipating and mitigating unexpected risks. + +2. **Human-AI Configuration**: + - **Human Oversight**: Proper human-AI configuration involves designing systems where human oversight is integral. This ensures that AI decisions can be monitored and intervened upon when necessary, reducing the chances of harmful autonomous actions. + - **Transparency and Accountability**: Configurations that emphasize transparency allow humans to understand and predict AI behavior. This is essential for accountability and for ensuring that AI systems act in alignment with human values and legal standards. + - **Adaptive Interaction**: Effective human-AI configurations allow for adaptive interactions where AI systems can learn from human feedback and vice versa. This continuous loop can help in refining AI behavior and aligning it more closely with human safety and ethical standards. + +In summary, interdisciplinary teams bring comprehensive insights and innovative strategies to the table, while human-AI configuration ensures that AI systems are designed with necessary safeguards and oversight mechanisms. Together, they form a robust approach to managing and mitigating the risks associated with General Artificial Intelligence.","Interdisciplinary teams play a crucial role in GAI risk management by reflecting a wide range of capabilities, competencies, demographic groups, domain expertise, educational backgrounds, lived experiences, professions, and skills. Their participation is documented, and opportunities for interdisciplinary collaboration are prioritized. Additionally, human-AI configuration is important as it addresses harmful bias and homogenization, ensuring that data or benchmarks used in risk measurement are representative of diverse in-context user populations.",0.0,0.9560810803627083,0.0,0.0,0.6780761174632701 +How do digital content transparency tools ensure AI traceability and integrity?,"['ā€¢ Data provenance' + 'instance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time.' + 'Documentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.']","Digital content transparency tools ensure AI traceability and integrity by providing a tamper-proof history of content, promoting transparency, and enabling traceability throughout the AI lifecycle. Here are some specific ways they achieve this: + +1. **Data Provenance**: These tools track the origins and modifications of data, ensuring that every change is recorded and can be traced back to its source. This helps in verifying the authenticity and integrity of the data used in AI models. + +2. **Version Control Systems**: Robust version control systems track changes over time, providing a clear history of how the AI model has evolved. This includes documenting alterations, updates, and the individuals responsible for these changes. + +3. **Logging and Recording**: Systematic logging and recording of AI incidents and activities ensure that any actions taken can be reviewed and analyzed. This is crucial for understanding the context of changes and for accountability purposes. + +4. **Regular Information Sharing**: Sharing information, including change management records, version history, and metadata, with relevant AI actors facilitates transparency and helps in managing and responding to AI incidents effectively. + +By implementing these practices, digital content transparency tools help maintain the integrity of AI systems and ensure that all modifications can be traced and verified, promoting trust and accountability in AI operations.","Digital content transparency solutions ensure AI traceability and integrity by enabling the documentation of each instance where content is generated, modified, or shared, providing a tamper-proof history of the content. Additionally, robust version control systems can be applied to track changes across the AI lifecycle over time.",0.18518518518518517,1.0,1.0,0.5833333333041666,0.40759390690225217 +What can be done to prevent algorithmic bias in automated systems?,['ā€¢ Auditing and assessment'],"Preventing algorithmic bias in automated systems involves several strategies and best practices: + +1. **Diverse and Representative Data**: Ensure that the training data used for machine learning models is diverse and representative of all relevant groups. This helps to avoid biases that can arise from underrepresented or overrepresented groups. + +2. **Regular Auditing and Assessment**: Conduct regular audits and assessments of the automated systems to identify and address any biases. This includes evaluating the system's outputs to ensure they are fair and equitable across different groups. + +3. **Bias Detection Tools**: Utilize bias detection tools and techniques to identify and measure bias in algorithms. These tools can help highlight areas where the system may be unfairly discriminating against certain groups. + +4. **Transparent Processes**: Maintain transparency in the development and deployment of automated systems. Document and share the decision-making processes, data sources, and algorithms used to build the system. + +5. **Inclusive Design**: Involve a diverse group of stakeholders, including those who may be affected by the system, in the design and development process. This can help ensure that different perspectives are considered and potential biases are addressed early on. + +6. **Ethical Guidelines and Standards**: Follow ethical guidelines and industry standards related to fairness and bias in AI. These guidelines can provide a framework for identifying and mitigating bias. + +7. **Continuous Monitoring and Feedback**: Implement continuous monitoring of the system's performance and gather feedback from users to identify and rectify any biases that may emerge over time. + +8. **Algorithmic Fairness Techniques**: Apply algorithmic fairness techniques such as reweighting, resampling, or adjusting decision thresholds to mitigate bias in the model's predictions. + +By incorporating these strategies, organizations can work towards developing automated systems that are fairer and less biased.","To prevent algorithmic bias in automated systems, proactive equity assessments should be conducted during the design phase to identify potential discrimination and effects on equity. Data used in system development should be representative and reviewed for bias, and the use of demographic information should be avoided to prevent algorithmic discrimination. Proactive testing should be performed to identify and remove proxies that may lead to discrimination, and organizations should monitor systems closely for any resulting algorithmic discrimination.",0.0625,0.9797285481049035,0.0,0.9999999999,0.7773595134074495 +How do you ensure ethical data collection and privacy?,"['You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive.' + '>>>\norganizations should follow human subjects research requirements and best practices such as informed consent and subject compensation.\n>>>' + 'Any consent requests should be brief, be understandable in plain language, and give you agency over data collection and the specific context of use; current hard-to\xadunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and restrictions for data and inferences related to sensitive domains, including health, work, education, criminal justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and related inferences should only be used for necessary functions, and you should be protected by ethical review and use prohibitions. You and your communities should be free from unchecked surveillance; surveillance technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their potential harms and scope limits to protect privacy and civil liberties.' + 'sent should only be used to justify collection of data in cases where it can be \nappropriately and meaningfully given. Any consent requests should be brief, \nbe understandable in plain language, and give you agency over data collection \nand the specific context of use; current hard-to-understand no\xad\ntice-and-choice practices for broad uses of data should be changed. Enhanced \nprotections and restrictions for data and inferences related to sensitive do\xad\nmains, including health, work, education, criminal justice, and finance, and \nfor data pertaining to youth should put you first. In sensitive domains, your \ndata and related inferences should only be used for necessary functions, and \nyou should be protected by ethical review and use prohibitions. You and your \ncommunities should be free from unchecked surveillance; surveillance tech\xad\nnologies should be subject to heightened oversight that includes at least \npre-deployment assessment of their potential harms and scope limits to pro\xad']","Ensuring ethical data collection and privacy involves several key principles and practices: + +1. **Built-in Protections**: Automated systems should have built-in protections to guard against abusive data practices. This includes design choices that ensure privacy protections are included by default. + +2. **User Agency**: Individuals should have control over how their data is used. This involves seeking permission and respecting decisions regarding data collection, use, access, transfer, and deletion to the greatest extent possible. Where explicit consent is not possible, alternative privacy-by-design safeguards should be used. + +3. **Informed Consent**: Consent requests should be brief, understandable in plain language, and specific to the context of use. This helps ensure that users can meaningfully give their consent. + +4. **Minimization of Data Collection**: Only data strictly necessary for the specific context should be collected. This means data collection should conform to reasonable expectations and avoid collecting excessive information. + +5. **Enhanced Protections for Sensitive Domains**: Data and inferences related to sensitive areas such as health, work, education, criminal justice, and finance should have enhanced protections and restrictions. These protections should prioritize the user's interests and include ethical review and use prohibitions. + +6. **Ethical Review and Oversight**: Organizations should follow human subjects research requirements and best practices, including informed consent and subject compensation. Surveillance technologies, in particular, should undergo pre-deployment assessment to evaluate potential harms and include scope limits to protect privacy and civil liberties. + +7. **Transparent User Experience**: Systems should avoid employing user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults. + +By adhering to these principles and practices, designers, developers, and deployers of automated systems can ensure ethical data collection and robust privacy protections.","To ensure ethical data collection and privacy, designers, developers, and deployers of automated systems should seek user permission and respect their decisions regarding data collection, use, access, transfer, and deletion. They should implement built-in protections, ensure data collection conforms to reasonable expectations, and only collect data that is strictly necessary. Consent should be meaningful and understandable, and enhanced protections should be in place for sensitive domains. Additionally, there should be oversight of surveillance technologies to protect privacy and civil liberties.",1.0,0.9597544062519486,1.0,0.999999999975,0.6343581776742813 +What are the perks of logging GAI incidents for AI risk mgmt?,"['Documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history and metadata can also empower AI Actors responding to and managing AI incidents.']","Logging GAI (General Artificial Intelligence) incidents offers several benefits for AI risk management, including: + +1. **Smoother Information Sharing**: Documentation practices like logging and recording incidents facilitate easier and more effective communication between relevant AI actors. This can help in quickly disseminating critical information and responses. + +2. **Enhanced Incident Response**: Detailed logs provide a comprehensive view of incidents, enabling AI actors to respond more effectively. They can analyze past incidents to understand what went wrong and how similar issues can be prevented in the future. + +3. **Change Management**: Keeping records of changes, including version history and metadata, helps in tracking the evolution of AI systems. This is crucial for understanding how changes impact system behavior and for rolling back to previous versions if necessary. + +4. **Root Cause Analysis**: Analyzing incident logs allows for thorough investigation of the root causes of incidents. This can lead to better understanding and mitigation of risks associated with AI systems. + +5. **Compliance and Accountability**: Maintaining detailed logs can ensure compliance with regulatory requirements and help in demonstrating accountability. This is particularly important in environments with stringent legal and ethical standards. + +6. **Continuous Improvement**: By regularly analyzing incident logs, organizations can identify patterns and trends that inform ongoing improvements to AI systems. This leads to more robust and resilient AI over time. + +7. **Empowerment of AI Actors**: Access to comprehensive incident records empowers AI actors to make informed decisions. They can better manage and respond to future incidents based on historical data. + +Overall, logging GAI incidents is a critical practice for enhancing the safety, reliability, and accountability of AI systems.","Logging GAI incidents can facilitate smoother sharing of information with relevant AI Actors, empower them in responding to and managing AI incidents, and improve GAI risk management across the AI ecosystem. It also aids in documenting and reviewing third-party inputs and plugins, which is crucial for incident disclosure.",0.13636363636363635,0.9742359859160864,0.5,0.9999999999,0.8951538444074862 +What makes it hard for people to challenge algorithmic decisions?,[],"Challenging algorithmic decisions can be difficult for several reasons: + +1. **Lack of Transparency**: Algorithms, especially complex ones like deep learning models, can be opaque and difficult to understand. This lack of transparency makes it hard for individuals to know how decisions are being made or to identify potential errors or biases. + +2. **Technical Complexity**: The technical nature of algorithms means that understanding and challenging them often requires specialized knowledge and skills that many people do not possess. + +3. **Data Access**: Individuals may not have access to the data used by the algorithm, which is necessary to understand and potentially contest its decisions. + +4. **Legal and Regulatory Barriers**: There may be insufficient legal frameworks or regulations in place to support individuals in challenging algorithmic decisions. Existing laws might not adequately address the nuances of algorithmic decision-making. + +5. **Power Imbalance**: Organizations that deploy algorithms often have more resources and expertise than individuals, creating a power imbalance that can make it intimidating or difficult for people to challenge decisions. + +6. **Perceived Authority of Algorithms**: There is often a perception that algorithmic decisions are objective and infallible, which can discourage people from questioning them. + +Addressing these challenges requires efforts to increase transparency, improve public understanding of algorithms, ensure accessible data, strengthen legal protections, and balance the power dynamics between individuals and organizations using algorithms.","People find it hard to challenge algorithmic decisions because they are often denied the knowledge needed to address the impact of automated systems on their lives. The decision-making processes of these systems tend to be opaque and complex, making it difficult for individuals to ascertain how or why a decision was made. Additionally, the lack of clear and timely explanations can hinder their ability to contest decisions effectively.",1.0,0.9473522437043465,0.0,0.0,0.3886310814902103 +How is AI performance evaluated with human safety and privacy in mind?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections.']","AI performance is evaluated with human safety and privacy in mind through a combination of methods, including: + +1. **Algorithmic Impact Assessments**: These assessments evaluate the potential effects of an AI system on individuals and society. They involve a thorough examination of the AI's design, deployment, and operation to identify any risks related to safety and privacy. + +2. **Disparity Testing**: This involves testing the AI system to identify any disparities or biases in its performance across different groups. The results of disparity testing help to understand whether the AI system performs equitably and does not disproportionately impact certain individuals or groups. + +3. **Mitigation Information**: When disparities or safety concerns are identified, mitigation strategies are developed and implemented. This could involve modifying the AI model, changing the data it uses, or implementing additional safeguards to protect human safety and privacy. + +4. **Public Reporting**: Whenever possible, the results of these evaluations and the mitigation strategies should be made publicly available. This transparency allows for external scrutiny and helps to build trust in the AI system by showing that safety and privacy concerns are being actively addressed. + +By combining these approaches, AI systems can be evaluated and adjusted to ensure they operate safely and respect user privacy.","AI performance is evaluated with human safety and privacy in mind by implementing measures such as assessing and managing statistical biases related to GAI content provenance, documenting how content provenance data is tracked, providing human subjects with options to withdraw participation or revoke consent, and using techniques like anonymization and differential privacy to minimize risks associated with linking AI-generated content back to individual human subjects.",0.07142857142857142,0.9949823637401242,0.0,0.9999999999,0.3070506043810194 +What issues come from easy access to obscene content?,[],"Easy access to obscene content can lead to several issues, including: + +1. **Psychological Impact**: Exposure to obscene content, especially at a young age, can lead to an altered perception of sex and relationships, potentially causing issues like desensitization, unrealistic expectations, and problematic sexual behaviors. + +2. **Addiction**: Repeated exposure to obscene content can lead to addictive behaviors, where individuals may increasingly seek out more extreme content to achieve the same level of arousal. + +3. **Impact on Relationships**: Consumption of obscene content can negatively impact real-life relationships, leading to issues like decreased intimacy, unrealistic expectations from partners, and potential infidelity. + +4. **Legal and Ethical Concerns**: Easy access to obscene content can sometimes lead to the consumption of illegal material, such as child pornography, which has severe legal consequences and ethical implications. + +5. **Impact on Minors**: Children and teenagers who access obscene content can suffer from developmental issues, including distorted views on consent, sex, and relationships, which can affect their psychological and emotional development. + +6. **Social and Cultural Effects**: Widespread access to obscene content can influence societal norms and attitudes towards sex, potentially leading to objectification and the perpetuation of harmful stereotypes. + +Addressing these issues often involves implementing better access controls, educating individuals (particularly young people) about the potential harms, and promoting healthy, respectful attitudes towards sex and relationships.","Easy access to obscene content can lead to the production of and access to obscene, degrading, and/or abusive imagery, which can cause harm, including synthetic child sexual abuse material (CSAM) and nonconsensual intimate images (NCII) of adults.",1.0,0.9784370190678408,0.0,0.0,0.311732479068065 +How do user feedback and community input assess AI risks?,['the results of public consultation such as concerns raised and any decisions made due to these concerns'],"User feedback and community input play a crucial role in assessing AI risks by providing diverse perspectives and highlighting potential concerns that might not be initially apparent to developers and researchers. Here are some ways in which they contribute: + +1. **Identifying Unforeseen Issues**: Users and community members can identify problems or risks that were not anticipated by the creators of the AI system. This includes ethical concerns, biases, and unintended consequences. + +2. **Highlighting Real-World Impact**: Feedback from actual users can shed light on how the AI system performs in real-world scenarios, which might differ significantly from controlled testing environments. This real-world input can reveal practical risks and usability issues. + +3. **Diverse Perspectives**: Community input often encompasses a wide range of views and experiences, which can help in understanding the broader societal impacts of AI systems. This diversity is crucial for identifying risks that might affect different demographics in various ways. + +4. **Improving Transparency and Trust**: Engaging with the community and incorporating their feedback can improve the transparency of AI development processes. This, in turn, builds trust and ensures that the AI systems are aligned with public values and expectations. + +5. **Regulatory and Ethical Guidance**: Public consultations and feedback can inform regulatory bodies and ethical guidelines by providing evidence of public concern or approval, which is essential for shaping policies that govern AI development and deployment. + +Overall, user feedback and community input are vital for a comprehensive assessment of AI risks, ensuring that AI systems are safe, ethical, and beneficial to society.","User feedback and community input assess AI risks through established feedback processes that allow end users and impacted communities to report problems and appeal system outcomes. These processes are integrated into AI system evaluation metrics, which include conducting impact assessments on how AI-generated content might affect different social, economic, and cultural groups, as well as understanding user perceptions and interactions with GAI content.",0.7368421052631579,0.9939107648348567,0.0,0.9999999999,0.8246337265754836 +What should automated systems consider for consent and ethics in sensitive data?,[],"When dealing with sensitive data, automated systems should consider several factors to ensure consent and ethical handling: + +1. **Informed Consent**: Ensure that individuals are fully aware of what data is being collected, how it will be used, and who it will be shared with. This involves clear, understandable, and accessible communication. + +2. **Data Minimization**: Collect only the data that is necessary for the purpose at hand. Avoid gathering excessive or irrelevant information. + +3. **Transparency**: Provide transparency about the data processing activities, including how data is stored, processed, and protected. This builds trust and allows individuals to make informed decisions. + +4. **Privacy by Design**: Integrate privacy considerations into the design and architecture of systems from the outset, rather than as an afterthought. + +5. **Anonymization and Encryption**: Use techniques such as anonymization to remove personally identifiable information and encryption to protect data during storage and transmission. + +6. **Compliance with Legal and Ethical Standards**: Adhere to relevant laws and regulations, such as GDPR, HIPAA, or other applicable data protection laws. Ethical guidelines should also be followed, even in the absence of specific legal requirements. + +7. **Accountability and Auditing**: Implement mechanisms for accountability, including regular audits and assessments to ensure that data handling practices comply with consent and ethical standards. + +8. **Risk Assessment and Mitigation**: Conduct regular risk assessments to identify potential threats to data privacy and take steps to mitigate these risks. + +9. **User Control and Rights**: Provide users with control over their data, including the ability to access, correct, delete, and restrict the processing of their data. + +10. **Ethical Use of Data**: Ensure that the use of data aligns with ethical principles, avoiding harm to individuals and considering the broader societal impacts. + +By addressing these factors, automated systems can better ensure that sensitive data is handled ethically and with proper consent.","Automated systems should consider that consent for sensitive data may need to be acquired from a guardian and/or child, and that consent for non-necessary functions should be optional. Additionally, any use of sensitive data or decision processes based on sensitive data that might limit rights, opportunities, or access should undergo a thorough ethical review and monitoring. This includes ensuring that data quality is maintained to avoid adverse consequences from flawed data, limiting access to sensitive data based on necessity, and providing regular public reports on data security lapses and ethical pre-reviews.",0.0,0.9361991969080089,0.0,0.0,0.8702052328288826 +"What links are there between digital IDs, welfare efficiency, and community impacts?",[],"Digital IDs can significantly influence welfare efficiency and community impacts through several interrelated mechanisms: + +1. **Improved Access to Services**: Digital IDs can streamline access to welfare programs, ensuring that resources are distributed more efficiently and reach those in need more quickly. This can reduce administrative costs and minimize the risk of errors or fraud. + +2. **Increased Inclusivity**: By providing a reliable form of identification, digital IDs can help marginalized or undocumented individuals gain access to essential services, such as healthcare, education, and financial services. This inclusivity can enhance overall community well-being and reduce inequalities. + +3. **Data-Driven Decision Making**: Digital IDs can facilitate the collection of accurate and comprehensive data, enabling governments and organizations to make more informed decisions about resource allocation and program implementation. This can improve the effectiveness of welfare initiatives and ensure that they are tailored to the specific needs of the community. + +4. **Community Empowerment**: With digital IDs, individuals can have more control over their personal information and interactions with various service providers. This empowerment can foster a sense of agency and trust within communities, as people feel more secure and recognized by the systems that serve them. + +5. **Enhanced Transparency and Accountability**: The use of digital IDs can improve transparency and accountability in the distribution of welfare services. By providing a clear record of transactions and interactions, digital IDs can help reduce corruption and ensure that resources are used appropriately. + +Overall, the integration of digital IDs can lead to more efficient welfare systems, greater inclusivity, and positive community impacts by ensuring that services are accessible, equitable, and responsive to the needs of all individuals.","Panelists described the increasing scope of technology use in providing for social welfare, including digital ID systems, which are focused on improving efficiency and reducing cost. However, they cautioned that these systems may reduce the burden for government agencies by increasing the burden and agency of people using and interacting with these technologies. Additionally, these systems can produce feedback loops and compounded harm, collecting data from communities and using it to reinforce inequality. To mitigate these harms, it was suggested that community input should be ensured at the beginning of the design process, and there should be ways to opt out of these systems and use associated human-driven mechanisms instead.",0.9032258064516129,0.9665352297169627,0.0,0.0,0.6747466057211955 +What drives extra data protections in health and finance?,[],"Extra data protections in health and finance are driven by the sensitivity and value of the information involved in these sectors. Here are some key reasons: + +1. **Personal Privacy**: Health and financial data are highly personal and sensitive. Unauthorized access or disclosure can lead to significant privacy violations. For example, medical records can reveal intimate details about a person's health, while financial records can expose someone's financial status and transactions. + +2. **Regulatory Requirements**: Both sectors are subject to stringent regulations that mandate the protection of data. In health, regulations like the Health Insurance Portability and Accountability Act (HIPAA) in the United States require strict confidentiality and security measures. In finance, regulations such as the General Data Protection Regulation (GDPR) in the European Union and the Gramm-Leach-Bliley Act (GLBA) in the United States impose similar requirements. + +3. **Risk of Fraud and Identity Theft**: Financial data, in particular, can be used for fraudulent activities and identity theft. Protecting this data helps prevent financial crimes. + +4. **Trust and Reputation**: Organizations in health and finance rely heavily on the trust of their clients and patients. Breaches of data can severely damage their reputation and erode trust, which can be detrimental to their business. + +5. **Legal and Financial Consequences**: Data breaches can lead to significant legal and financial repercussions, including lawsuits, fines, and other penalties. Organizations implement extra data protections to mitigate these risks. + +These factors collectively drive the need for robust data protection measures in the health and finance sectors.","Extra data protections in health and finance are driven by the intimate nature of these domains, the inability of individuals to opt out in a meaningful way, and the historical discrimination that has often accompanied data knowledge. Additionally, the potential for material harms, including significant adverse effects on human rights such as autonomy and dignity, civil liberties, and civil rights, necessitates enhanced protections.",0.9354838709677419,0.9824600205457986,0.0,0.0,0.7597017222269069 +What insights did OSTP seek from experts in AI Bill of Rights panels?,[],"The Office of Science and Technology Policy (OSTP) sought insights from experts in AI Bill of Rights panels to address key areas related to AI safety, ethics, and governance. This included understanding how to ensure that AI systems are safe and effective, protecting individuals and communities from harm, ensuring fairness and non-discrimination, preserving privacy, and promoting transparency and accountability in AI technologies. The goal was to gather a diverse range of perspectives to inform the development of guidelines and policies that would protect the public while fostering innovation in AI.","OSTP sought insights and analysis on the risks, harms, benefits, and policy opportunities of automated systems from a variety of experts, practitioners, advocates, and federal government officials during the AI Bill of Rights panels. The discussions focused on consumer rights and protections, the criminal justice system, equal opportunities and civil justice, artificial intelligence and democratic values, social welfare and development, and the healthcare system.",0.0,0.9106766525020542,0.0,0.0,0.6886059241244874 +What key elements ensure clarity in docs about an automated system's impact?,"['Designers, developers, and deployers of automat\xaded systems should provide generally accessible plain language docu\xadmentation including clear descriptions of the overall system func\xadtioning and the role automation plays, notice that such systems are in use, the individual or organization responsible for the system, and ex\xadplanations of outcomes that are clear, timely, and accessible. Such notice should be kept up-to-date and people impacted by the system should be notified of significant use case or key functionality chang\xades.' + 'Reporting that includes summary information about these automated systems in plain language and assessments of the clarity and quality of the notice and explanations should be made public whenever possible.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Reporting should be provided in a plain language and machine-readable manner.' + 'justification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness.']","To ensure clarity in documentation about an automated system's impact, the following key elements should be included: + +1. **Plain Language Descriptions**: Provide clear and accessible descriptions of the overall system functioning and the role of automation. +2. **Notice of Use**: Clearly indicate that such systems are in use and provide information on who is responsible for the system. +3. **Explanations of Outcomes**: Offer timely, clear, and accessible explanations of outcomes produced by the system. +4. **Up-to-Date Information**: Keep all notices and explanations updated, especially when there are significant changes in use cases or key functionalities. +5. **Data Documentation**: Include descriptions of any data used to train machine learning models or for other purposes, how data sources were processed and interpreted, and justifications for data relevancy. +6. **Risk Assessments**: Document risk identification and management assessments, and outline any steps taken to mitigate potential harms. +7. **Performance Testing Results**: Provide results of performance testing, including accuracy, differential demographic impacts, error rates, and comparisons to previously deployed systems. +8. **Public Consultation Results**: Summarize public consultation outcomes, concerns raised, and any resultant decisions. +9. **Ongoing Monitoring**: Detail ongoing monitoring procedures and regular performance testing reports, including monitoring frequency. + +Additionally, all reporting should be in a plain language and machine-readable manner to ensure accessibility and understanding by diverse audiences.","Key elements that ensure clarity in documentation about an automated system's impact include providing generally accessible plain language documentation, clear descriptions of the overall system functioning and the role of automation, timely updates about significant use case or key functionality changes, and explanations of outcomes that are clear, timely, and accessible.",0.9629629629629629,0.9499616446326086,1.0,0.999999999975,0.5101588114940716 +What biases to note for pre-deployment measurement error models?,[],"In the context of pre-deployment measurement error models for AI systems, it is essential to be aware of several potential biases and issues that could affect the performance and safety of the AI. Here are some key biases to consider: + +1. **Training Data Bias**: If the training data used to develop the AI system is biased, the model will likely inherit these biases. This can lead to systematic errors in predictions and measurements. + +2. **Sampling Bias**: This occurs when the data sampled to train or validate the model is not representative of the broader population or real-world scenarios the AI will encounter post-deployment. + +3. **Measurement Bias**: Errors that arise from inaccuracies in the measurement process itself. For instance, if the tools or methodologies used to collect data are flawed, this can introduce bias. + +4. **Observer Bias**: The introduction of bias by the individuals who collect or annotate the training data, consciously or unconsciously. + +5. **Algorithmic Bias**: Biases that originate from the algorithms used, which may favor certain outcomes due to their design or the heuristics they employ. + +6. **Feedback Loop Bias**: Occurs when an AI system's predictions influence the environment or behaviors being measured, thereby reinforcing certain biases over time. + +7. **Deployment Context Bias**: The environment where the AI system is deployed may differ significantly from the training environment, leading to performance degradation and biased outcomes. + +8. **Confirmation Bias**: The tendency to favor information that confirms existing beliefs or hypotheses, which can skew the model development and evaluation process. + +Addressing these biases requires careful design, validation, and continual monitoring of AI systems, along with diverse and representative training data, robust error analysis, and transparent methodologies.","The context mentions documenting biases or statistical variance in applied metrics or structured human feedback processes, particularly when modeling complex societal constructs such as hateful content. However, it does not specify particular biases to note for pre-deployment measurement error models.",0.0,0.9514810246235371,0.0,0.0,0.8848503479546943 +"Which automated systems affect equal opportunities in edu, housing, & jobs?","['Education-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems,']","Automated systems that can affect equal opportunities in education, housing, and jobs include: + +1. **Education**: + - Algorithms for detecting student cheating or plagiarism. + - Admissions algorithms that determine which applicants are accepted to schools or programs. + - Online or virtual reality student monitoring systems. + +2. **Housing**: + - Algorithms used by landlords or real estate companies to screen potential tenants. + - Systems that evaluate creditworthiness or risk for mortgage approvals. + - Platforms that determine rental prices based on various data inputs. + +3. **Jobs**: + - Recruitment algorithms that filter job applications and resumes. + - Automated interview systems and personality assessments. + - Performance monitoring and employee evaluation tools. + +These systems can potentially introduce biases or unfair practices if not designed and monitored carefully, impacting equal opportunities across these sectors.","Automated systems that affect equal opportunities in education include algorithms that detect student cheating or plagiarism, admissions algorithms, online or virtual reality student monitoring systems, projections of student progress or outcomes, algorithms that determine access to resources or programs, and surveillance of classes. In housing, tenant screening algorithms, automated valuation systems for mortgage underwriting or home insurance, and automated valuations from online aggregator websites are relevant. For employment, workplace algorithms that inform terms and conditions of employment, hiring or termination algorithms, virtual or augmented reality workplace training programs, and electronic workplace surveillance and management systems are included.",0.47619047619047616,0.9832243032189446,0.25,0.9999999999,0.8852517418201972 diff --git a/Tasks/Task 3/ai-safety-sdg.csv b/Tasks/Task 3/ai-safety-sdg.csv new file mode 100644 index 0000000000000000000000000000000000000000..8ec8958453cdd477760d398e09095721ea567bdd --- /dev/null +++ b/Tasks/Task 3/ai-safety-sdg.csv @@ -0,0 +1,275 @@ +question,contexts,ground_truth,evolution_type,metadata,episode_done +What actions did the OSTP take to engage with stakeholders regarding the use of artificial intelligence and biometric technologies?,"['APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation \nStephanie Dinkins and the Future \nHistories Studio at Stony Brook \nUniversity \nTechNet \nThe Alliance for Media Arts and \nCulture, MIT Open Documentary \nLab and Co-Creation Studio, and \nImmerse \nThe International Brotherhood of \nTeamsters \nThe Leadership Conference on \nCivil and Human Rights \nThorn \nU.S. Chamber of Commerceā€™s \nTechnology Engagement Center \nUber Technologies \nUniversity of Pittsburgh \nUndergraduate Student \nCollaborative \nUpturn \nUS Technology Policy Committee \nof the Association of Computing \nMachinery \nVirginia Puccio \nVisar Berisha and Julie Liss \nXR Association \nXR Safety Initiative \nā€¢ As an additional effort to reach out to stakeholders regarding the RFI, OSTP conducted two listening sessions\nfor members of the public. The listening sessions together drew upwards of 300 participants. The Science and\nTechnology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61\n', 'APPENDIX\nSummaries of Additional Engagements: \nā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of\nartificial intelligence and other data-driven technologies in their lives.\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The\npurpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or\nplanned use; the domains in which these technologies are being used; the entities making use of them; current\nprinciples, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their\nuse or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below\nlisted organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium \nat Computing Research Association \nConnected Health Initiative \nConsumer Technology Association \nCourtney Radsch \nCoworker \nCyber Farm Labs \nData & Society Research Institute \nData for Black Lives \nData to Actionable Knowledge Lab \nat Harvard University \nDeloitte \nDev Technology Group \nDigital Therapeutics Alliance \nDigital Welfare State & Human \nRights Project and Center for \nHuman Rights and Global Justice at \nNew York University School of \nLaw, and Temple University \nInstitute for Law, Innovation & \nTechnology \nDignari \nDouglas Goddard \nEdgar Dworsky \nElectronic Frontier Foundation \nElectronic Privacy Information \nCenter, Center for Digital \nDemocracy, and Consumer \nFederation of America \nFaceTec \nFight for the Future \nGanesh Mani \nGeorgia Tech Research Institute \nGoogle \nHealth Information Technology \nResearch and Development \nInteragency Working Group \nHireVue \nHR Policy Association \nID.me \nIdentity and Data Sciences \nLaboratory at Science Applications \nInternational Corporation \nInformation Technology and \nInnovation Foundation \nInformation Technology Industry \nCouncil \nInnocence Project \nInstitute for Human-Centered \nArtificial Intelligence at Stanford \nUniversity \nIntegrated Justice Information \nSystems Institute \nInternational Association of Chiefs \nof Police \nInternational Biometrics + Identity \nAssociation \nInternational Business Machines \nCorporation \nInternational Committee of the Red \nCross \nInventionphysics \niProov \nJacob Boudreau \nJennifer K. Wagner, Dan Berger, \nMargaret Hu, and Sara Katsanis \nJonathan Barry-Blocker \nJoseph Turow \nJoy Buolamwini \nJoy Mack \nKaren Bureau \nLamont Gholston \nLawyersā€™ Committee for Civil \nRights Under Law \n60\n']","OSTP engaged with stakeholders regarding the use of artificial intelligence and biometric technologies by conducting two listening sessions for members of the public, which drew upwards of 300 participants. Additionally, OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of artificial intelligence and issued a Request For Information (RFI) on the use and governance of biometric technologies to understand their extent, variety, and the stakeholders impacted by their use or regulation.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 60, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 59, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the potential issues associated with automated performance evaluation in the workplace?,"[' \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAn unemployment benefits system in Colorado required, as a condition of accessing benefits, that applicants\nhave a smartphone in order to verify their identity. No alternative human option was readily available,\nwhich denied many people access to benefits.101\nā€¢\nA fraud detection system for unemployment insurance distribution incorrectly flagged entries as fraudulent,\nleading to people with slight discrepancies or complexities in their files having their wages withheld and tax\nreturns seized without any chance to explain themselves or receive a review by a person.102\nā€¢ A patient was wrongly denied access to pain medication when the hospitalā€™s software confused her medica\xad\ntion history with that of her dogā€™s. Even after she tracked down an explanation for the problem, doctors\nwere afraid to override the system, and she was forced to go without pain relief due to the systemā€™s error.103\nā€¢ A large corporation automated performance evaluation and other HR functions, leading to workers being\nfired by an automated system without the possibility of human review, appeal or other form of recourse.104 \n48\n']","The potential issues associated with automated performance evaluation in the workplace include workers being fired by an automated system without the possibility of human review, appeal, or other forms of recourse.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 47, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role does synthetic content detection play in managing risks associated with AI-generated outputs?,"[' \n51 \ngeneral public participants. For example, expert AI red-teamers could modify or verify the \nprompts written by general public AI red-teamers. These approaches may also expand coverage \nof the AI risk attack surface. \nā€¢ \nHuman / AI: Performed by GAI in combination with specialist or non-specialist human teams. \nGAI-led red-teaming can be more cost eļ¬€ective than human red-teamers alone. Human or GAI-\nled AI red-teaming may be better suited for eliciting diļ¬€erent types of harms. \n \nA.1.6. Content Provenance \nOverview \nGAI technologies can be leveraged for many applications such as content generation and synthetic data. \nSome aspects of GAI outputs, such as the production of deepfake content, can challenge our ability to \ndistinguish human-generated content from AI-generated synthetic content. To help manage and mitigate \nthese risks, digital transparency mechanisms like provenance data tracking can trace the origin and \nhistory of content. Provenance data tracking and synthetic content detection can help facilitate greater \ninformation access about both authentic and synthetic content to users, enabling better knowledge of \ntrustworthiness in AI systems. When combined with other organizational accountability mechanisms, \ndigital content transparency approaches can enable processes to trace negative outcomes back to their \nsource, improve information integrity, and uphold public trust. Provenance data tracking and synthetic \ncontent detection mechanisms provide information about the origin and history of content to assist in \nGAI risk management eļ¬€orts. \nProvenance metadata can include information about GAI model developers or creators of GAI content, \ndate/time of creation, location, modiļ¬cations, and sources. Metadata can be tracked for text, images, \nvideos, audio, and underlying datasets. The implementation of provenance data tracking techniques can \nhelp assess the authenticity, integrity, intellectual property rights, and potential manipulations in digital \ncontent. Some well-known techniques for provenance data tracking include digital watermarking, \nmetadata recording, digital ļ¬ngerprinting, and human authentication, among others. \nProvenance Data Tracking Approaches \nProvenance data tracking techniques for GAI systems can be used to track the history and origin of data \ninputs, metadata, and synthetic content. Provenance data tracking records the origin and history for \ndigital content, allowing its authenticity to be determined. It consists of techniques to record metadata \nas well as overt and covert digital watermarks on content. Data provenance refers to tracking the origin \nand history of input data through metadata and digital watermarking techniques. Provenance data \ntracking processes can include and assist AI Actors across the lifecycle who may not have full visibility or \ncontrol over the various trade-oļ¬€s and cascading impacts of early-stage model decisions on downstream \nperformance and synthetic outputs. For example, by selecting a watermarking model to prioritize \nrobustness (the durability of a watermark), an AI actor may inadvertently diminish computational \ncomplexity (the resources required to implement watermarking). Organizational risk management \neļ¬€orts for enhancing content provenance include: \nā€¢ \nTracking provenance of training data and metadata for GAI systems; \nā€¢ \nDocumenting provenance data limitations within GAI systems; \n']","Synthetic content detection plays a crucial role in managing risks associated with AI-generated outputs by helping to distinguish human-generated content from AI-generated synthetic content. It facilitates greater information access about both authentic and synthetic content, enabling users to better understand the trustworthiness of AI systems. Additionally, it can assist in tracing negative outcomes back to their source, improving information integrity, and upholding public trust.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 54, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role does risk management play in the implementation of feedback activities for AI systems?,"[' \n50 \nParticipatory Engagement Methods \nOn an ad hoc or more structured basis, organizations can design and use a variety of channels to engage \nexternal stakeholders in product development or review. Focus groups with select experts can provide \nfeedback on a range of issues. Small user studies can provide feedback from representative groups or \npopulations. Anonymous surveys can be used to poll or gauge reactions to speciļ¬c features. Participatory \nengagement methods are often less structured than ļ¬eld testing or red teaming, and are more \ncommonly used in early stages of AI or product development. \nField Testing \nField testing involves structured settings to evaluate risks and impacts and to simulate the conditions \nunder which the GAI system will be deployed. Field style tests can be adapted from a focus on user \npreferences and experiences towards AI risks and impacts ā€“ both negative and positive. When carried \nout with large groups of users, these tests can provide estimations of the likelihood of risks and impacts \nin real world interactions. \nOrganizations may also collect feedback on outcomes, harms, and user experience directly from users in \nthe production environment after a model has been released, in accordance with human subject \nstandards such as informed consent and compensation. Organizations should follow applicable human \nsubjects research requirements, and best practices such as informed consent and subject compensation, \nwhen implementing feedback activities. \nAI Red-teaming \nAI red-teaming is an evolving practice that references exercises often conducted in a controlled \nenvironment and in collaboration with AI developers building AI models to identify potential adverse \nbehavior or outcomes of a GAI model or system, how they could occur, and stress test safeguardsā€. AI \nred-teaming can be performed before or after AI models or systems are made available to the broader \npublic; this section focuses on red-teaming in pre-deployment contexts. \nThe quality of AI red-teaming outputs is related to the background and expertise of the AI red team \nitself. Demographically and interdisciplinarily diverse AI red teams can be used to identify ļ¬‚aws in the \nvarying contexts where GAI will be used. For best results, AI red teams should demonstrate domain \nexpertise, and awareness of socio-cultural aspects within the deployment context. AI red-teaming results \nshould be given additional analysis before they are incorporated into organizational governance and \ndecision making, policy and procedural updates, and AI risk management eļ¬€orts. \nVarious types of AI red-teaming may be appropriate, depending on the use case: \nā€¢ \nGeneral Public: Performed by general users (not necessarily AI or technical experts) who are \nexpected to use the model or interact with its outputs, and who bring their own lived \nexperiences and perspectives to the task of AI red-teaming. These individuals may have been \nprovided instructions and material to complete tasks which may elicit harmful model behaviors. \nThis type of exercise can be more eļ¬€ective with large groups of AI red-teamers. \nā€¢ \nExpert: Performed by specialists with expertise in the domain or speciļ¬c AI red-teaming context \nof use (e.g., medicine, biotech, cybersecurity). \nā€¢ \nCombination: In scenarios when it is diļ¬ƒcult to identify and recruit specialists with suļ¬ƒcient \ndomain and contextual expertise, AI red-teaming exercises may leverage both expert and \n']",The answer to given question is not present in context,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 53, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What concerns arise from companies using surveillance software to track employee discussions about union activity?,"["" \n \n \n \nDATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAn insurer might collect data from a person's social media presence as part of deciding what life\ninsurance rates they should be offered.64\nā€¢\nA data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of\nthousands of people to potential identity theft. 65\nā€¢\nA local public housing authority installed a facial recognition system at the entrance to housing complexes to\nassist law enforcement with identifying individuals viewed via camera when police reports are filed, leading\nthe community, both those living in the housing complex and not, to have videos of them sent to the local\npolice department and made available for scanning by its facial recognition software.66\nā€¢\nCompanies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32\n""]","Concerns arise from companies using surveillance software to track employee discussions about union activity, as it leads to the surveillance of individual employees and allows companies to surreptitiously intervene in discussions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 31, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI?,"[' \n1 \n1. \nIntroduction \nThis document is a cross-sectoral proļ¬le of and companion resource for the AI Risk Management \nFramework (AI RMF 1.0) for Generative AI,1 pursuant to President Bidenā€™s Executive Order (EO) 14110 on \nSafe, Secure, and Trustworthy Artiļ¬cial Intelligence.2 The AI RMF was released in January 2023, and is \nintended for voluntary use and to improve the ability of organizations to incorporate trustworthiness \nconsiderations into the design, development, use, and evaluation of AI products, services, and systems. \nA proļ¬le is an implementation of the AI RMF functions, categories, and subcategories for a speciļ¬c \nsetting, application, or technology ā€“ in this case, Generative AI (GAI) ā€“ based on the requirements, risk \ntolerance, and resources of the Framework user. AI RMF proļ¬les assist organizations in deciding how to \nbest manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory \nrequirements and best practices, and reļ¬‚ects risk management priorities. Consistent with other AI RMF \nproļ¬les, this proļ¬le oļ¬€ers insights into how risk can be managed across various stages of the AI lifecycle \nand for GAI as a technology. \nAs GAI covers risks of models or applications that can be used across use cases or sectors, this document \nis an AI RMF cross-sectoral proļ¬le. Cross-sectoral proļ¬les can be used to govern, map, measure, and \nmanage risks associated with activities or business processes common across sectors, such as the use of \nlarge language models (LLMs), cloud-based services, or acquisition. \nThis document deļ¬nes risks that are novel to or exacerbated by the use of GAI. After introducing and \ndescribing these risks, the document provides a set of suggested actions to help organizations govern, \nmap, measure, and manage these risks. \n \n \n1 EO 14110 deļ¬nes Generative AI as ā€œthe class of AI models that emulate the structure and characteristics of input \ndata in order to generate derived synthetic content. This can include images, videos, audio, text, and other digital \ncontent.ā€ While not all GAI is derived from foundation models, for purposes of this document, GAI generally refers \nto generative foundation models. The foundation model subcategory of ā€œdual-use foundation modelsā€ is deļ¬ned by \nEO 14110 as ā€œan AI model that is trained on broad data; generally uses self-supervision; contains at least tens of \nbillions of parameters; is applicable across a wide range of contexts.ā€ \n2 This proļ¬le was developed per Section 4.1(a)(i)(A) of EO 14110, which directs the Secretary of Commerce, acting \nthrough the Director of the National Institute of Standards and Technology (NIST), to develop a companion \nresource to the AI RMF, NIST AI 100ā€“1, for generative AI. \n']","The purpose of the cross-sectoral profile in the context of the AI Risk Management Framework for Generative AI is to assist organizations in deciding how to best manage AI risks in a manner that aligns with their goals, considers legal/regulatory requirements and best practices, and reflects risk management priorities. It offers insights into how risk can be managed across various stages of the AI lifecycle and for Generative AI as a technology.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 4, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n- \nUSING THIS TECHNICAL COMPANION\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the design, \nuse, and deployment of automated systems to protect the rights of the American public in the age of artificial \nintelligence. This technical companion considers each principle in the Blueprint for an AI Bill of Rights and \nprovides examples and concrete steps for communities, industry, governments, and others to take in order to \nbuild these protections into policy, practice, or the technological design process. \nTaken together, the technical protections and practices laid out in the Blueprint for an AI Bill of Rights can help \nguard the American public against many of the potential and actual harms identified by researchers, technolo\xad\ngists, advocates, journalists, policymakers, and communities in the United States and around the world. This \ntechnical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing \nmonitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have\nconfidence that their rights, opportunities, and access as well as their expectations about technologies are respected. \n3\nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE: \nThis section provides real-life examples of how these guiding principles can become reality, through laws, policies, and practices. \nIt describes practical technical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe examples provided are not critiques or endorsements, but rather are offered as illustrative cases to help \nprovide a concrete vision for actualizing the Blueprint for an AI Bill of Rights. Effectively implementing these \nprocesses require the cooperation of and collaboration among industry, civil society, researchers, policymakers, \ntechnologists, and the public. \n14\n']","The Blueprint for an AI Bill of Rights proposes a set of five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It includes expectations for automated systems, practical steps for implementation, and emphasizes transparency through reporting to ensure that rights, opportunities, and access are respected.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 13, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the significance of the NSF Program on Fairness in Artificial Intelligence in collaboration with Amazon?,"[' \nENDNOTES\n96. National Science Foundation. NSF Program on Fairness in Artificial Intelligence in Collaboration\nwith Amazon (FAI). Accessed July 20, 2022.\nhttps://www.nsf.gov/pubs/2021/nsf21585/nsf21585.htm\n97. Kyle Wiggers. Automatic signature verification software threatens to disenfranchise U.S. voters.\nVentureBeat. Oct. 25, 2020.\nhttps://venturebeat.com/2020/10/25/automatic-signature-verification-software-threatens-to\xad\ndisenfranchise-u-s-voters/\n98. Ballotpedia. Cure period for absentee and mail-in ballots. Article retrieved Apr 18, 2022.\nhttps://ballotpedia.org/Cure_period_for_absentee_and_mail-in_ballots\n99. Larry Buchanan and Alicia Parlapiano. Two of these Mail Ballot Signatures are by the Same Person.\nWhich Ones? New York Times. Oct. 7, 2020.\nhttps://www.nytimes.com/interactive/2020/10/07/upshot/mail-voting-ballots-signature\xad\nmatching.html\n100. Rachel Orey and Owen Bacskai. The Low Down on Ballot Curing. Nov. 04, 2020.\nhttps://bipartisanpolicy.org/blog/the-low-down-on-ballot-curing/\n101. Andrew Kenney. \'I\'m shocked that they need to have a smartphone\': System for unemployment\nbenefits exposes digital divide. USA Today. May 2, 2021.\nhttps://www.usatoday.com/story/tech/news/2021/05/02/unemployment-benefits-system-leaving\xad\npeople-behind/4915248001/\n102. Allie Gross. UIA lawsuit shows how the state criminalizes the unemployed. Detroit Metro-Times.\nSep. 18, 2015.\nhttps://www.metrotimes.com/news/uia-lawsuit-shows-how-the-state-criminalizes-the\xad\nunemployed-2369412\n103. Maia Szalavitz. The Pain Was Unbearable. So Why Did Doctors Turn Her Away? Wired. Aug. 11,\n2021. https://www.wired.com/story/opioid-drug-addiction-algorithm-chronic-pain/\n104. Spencer Soper. Fired by Bot at Amazon: ""It\'s You Against the Machine"". Bloomberg, Jun. 28, 2021.\nhttps://www.bloomberg.com/news/features/2021-06-28/fired-by-bot-amazon-turns-to-machine\xad\nmanagers-and-workers-are-losing-out\n105. Definitions of ā€˜equityā€™ and ā€˜underserved communitiesā€™ can be found in the Definitions section of\nthis document as well as in Executive Order on Advancing Racial Equity and Support for Underserved\nCommunities Through the Federal Government:\nhttps://www.whitehouse.gov/briefing-room/presidential-actions/2021/01/20/executive-order\xad\nadvancing-racial-equity-and-support-for-underserved-communities-through-the-federal-government/\n106. HealthCare.gov. Navigator - HealthCare.gov Glossary. Accessed May 2, 2022.\nhttps://www.healthcare.gov/glossary/navigator/\n72\n']",The answer to given question is not present in context,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 71, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures should be taken to demonstrate the safety and effectiveness of automated systems?,"[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n']","To demonstrate the safety and effectiveness of automated systems, the following measures should be taken: 1. Independent evaluation should be allowed, enabling access for independent evaluators such as researchers and auditors to the system and associated data. 2. Reporting should be regularly updated, including an overview of the system, data used for training, risk assessments, performance testing results, and ongoing monitoring procedures. Reports should be provided in plain language and machine-readable formats.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of the impact documentation process in the context of GAI systems?,"[' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']","The purpose of the impact documentation process in the context of GAI systems is to document the risks and potential impacts of the AI technology designed, developed, deployed, evaluated, and used, and to communicate about these impacts more broadly.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What should be assessed to understand data privacy risks in the use of training data?,"["" \n27 \nMP-4.1-010 \nConduct appropriate diligence on training data use to assess intellectual property, \nand privacy, risks, including to examine whether use of proprietary or sensitive \ntraining data is consistent with applicable laws. \nIntellectual Property; Data Privacy \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n \nMAP 5.1: Likelihood and magnitude of each identiļ¬ed impact (both potentially beneļ¬cial and harmful) based on expected use, past \nuses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \nthe AI system, or other data are identiļ¬ed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \ndata generation capabilities for potential misuse or vulnerabilities. \nInformation Integrity; Information \nSecurity \nMP-5.1-002 \nIdentify potential content provenance harms of GAI, such as misinformation or \ndisinformation, deepfakes, including NCII, or tampered content. Enumerate and \nrank risks based on their likelihood and potential impact, and determine how well \nprovenance solutions address speciļ¬c risks and/or harms. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMP-5.1-003 \nConsider disclosing use of GAI to end users in relevant contexts, while considering \nthe objective of disclosure, the context of use, the likelihood and magnitude of the \nrisk posed, the audience of the disclosure, as well as the frequency of the \ndisclosures. \nHuman-AI Conļ¬guration \nMP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \nestimates. \nInformation Integrity; CBRN \nInformation or Capabilities; \nDangerous, Violent, or Hateful \nContent; Harmful Bias and \nHomogenization \nMP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \nidentify anomalous or unforeseen failure modes. \nInformation Security \nMP-5.1-006 \nProļ¬le threats and negative impacts arising from GAI systems interacting with, \nmanipulating, or generating content, and outlining known and potential \nvulnerabilities and the likelihood of their occurrence. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Design, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, End-\nUsers, Operation and Monitoring \n \n"", ' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n']","To understand data privacy risks in the use of training data, it is important to conduct appropriate diligence on training data use to assess intellectual property and privacy risks, including examining whether the use of proprietary or sensitive training data is consistent with applicable laws.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are proposed in the Blueprint for an AI Bill of Rights to protect the rights of the American public?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n- \nUSING THIS TECHNICAL COMPANION\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the design, \nuse, and deployment of automated systems to protect the rights of the American public in the age of artificial \nintelligence. This technical companion considers each principle in the Blueprint for an AI Bill of Rights and \nprovides examples and concrete steps for communities, industry, governments, and others to take in order to \nbuild these protections into policy, practice, or the technological design process. \nTaken together, the technical protections and practices laid out in the Blueprint for an AI Bill of Rights can help \nguard the American public against many of the potential and actual harms identified by researchers, technolo\xad\ngists, advocates, journalists, policymakers, and communities in the United States and around the world. This \ntechnical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing \nmonitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have\nconfidence that their rights, opportunities, and access as well as their expectations about technologies are respected. \n3\nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE: \nThis section provides real-life examples of how these guiding principles can become reality, through laws, policies, and practices. \nIt describes practical technical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe examples provided are not critiques or endorsements, but rather are offered as illustrative cases to help \nprovide a concrete vision for actualizing the Blueprint for an AI Bill of Rights. Effectively implementing these \nprocesses require the cooperation of and collaboration among industry, civil society, researchers, policymakers, \ntechnologists, and the public. \n14\n']","The Blueprint for an AI Bill of Rights proposes a set of five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It includes expectations for automated systems, practical steps for implementation, and emphasizes transparency through reporting to ensure that rights, opportunities, and access are respected.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 13, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What actions were taken by the New York state legislature regarding biometric identifying technology in schools?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nDATA PRIVACY \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe Privacy Act of 1974 requires privacy protections for personal information in federal \nrecords systems, including limits on data retention, and also provides individuals a general \nright to access and correct their data. Among other things, the Privacy Act limits the storage of individual \ninformation in federal systems of records, illustrating the principle of limiting the scope of data retention. Under \nthe Privacy Act, federal agencies may only retain data about an individual that is ā€œrelevant and necessaryā€ to \naccomplish an agencyā€™s statutory purpose or to comply with an Executive Order of the President. The law allows \nfor individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\xad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Actā€™s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individualā€™s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individualā€™s ā€œqualifications, character, rights, ā€¦ \nopportunitiesā€¦, or benefits.ā€ \nNISTā€™s Privacy Framework provides a comprehensive, detailed and actionable approach for \norganizations to manage privacy risks. The NIST Framework gives organizations ways to identify and \ncommunicate their privacy risks and goals to support ethical decision-making in system, product, and service \ndesign or deployment, as well as the measures they are taking to demonstrate compliance with applicable laws \nor regulations. It has been voluntarily adopted by organizations across many different sectors around the world.78\nA school boardā€™s attempt to surveil public school studentsā€”undertaken without \nadequate community inputā€”sparked a state-wide biometrics moratorium.79 Reacting to a plan in \nthe city of Lockport, New York, the stateā€™s legislature banned the use of facial recognition systems and other \nā€œbiometric identifying technologyā€ in schools until July 1, 2022.80 The law additionally requires that a report on \nthe privacy, civil rights, and civil liberties implications of the use of such technologies be issued before \nbiometric identification technologies can be used in New York schools. \nFederal law requires employers, and any consultants they may retain, to report the costs \nof surveilling employees in the context of a labor dispute, providing a transparency \nmechanism to help protect worker organizing. Employers engaging in workplace surveillance ""where \nan object there-of, directly or indirectly, is [ā€¦] to obtain information concerning the activities of employees or a \nlabor organization in connection with a labor dispute"" must report expenditures relating to this surveillance to \nthe Department of Labor Office of Labor-Management Standards, and consultants who employers retain for \nthese purposes must also file reports regarding their activities.81\nPrivacy choices on smartphones show that when technologies are well designed, privacy \nand data agency can be meaningful and not overwhelming. These choicesā€”such as contextual, timely \nalerts about location trackingā€”are brief, direct, and use-specific. Many of the expectations listed here for \nprivacy by design and use-specific consent mirror those distributed to developers as best practices when \ndeveloping for smart phone devices,82 such as being transparent about how user data will be used, asking for app \npermissions during their use so that the use-context will be clear to users, and ensuring that the app will still \nwork if users deny (or later revoke) some permissions. \n39\n']","The New York state legislature banned the use of facial recognition systems and other biometric identifying technology in schools until July 1, 2022. Additionally, the law requires that a report on the privacy, civil rights, and civil liberties implications of the use of such technologies be issued before biometric identification technologies can be used in New York schools.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 38, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the mental health impacts associated with increased use of surveillance technologies in schools and workplaces?,"[' \n \n \n \n \n \nDATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nData privacy is a foundational and cross-cutting principle required for achieving all others in this framework. Surveil\xad\nlance and data collection, sharing, use, and reuse now sit at the foundation of business models across many industries, \nwith more and more companies tracking the behavior of the American public, building individual profiles based on \nthis data, and using this granular-level information as input into automated systems that further track, profile, and \nimpact the American public. Government agencies, particularly law enforcement agencies, also use and help develop \na variety of technologies that enhance and expand surveillance capabilities, which similarly collect data used as input \ninto other automated systems that directly impact peopleā€™s lives. Federal law has not grown to address the expanding \nscale of private data collection, or of the ability of governments at all levels to access that data and leverage the means \nof private collection. \nMeanwhile, members of the American public are often unable to access their personal data or make critical decisions \nabout its collection and use. Data brokers frequently collect consumer data from numerous sources without \nconsumersā€™ permission or knowledge.60 Moreover, there is a risk that inaccurate and faulty data can be used to \nmake decisions about their lives, such as whether they will qualify for a loan or get a job. Use of surveillance \ntechnologies has increased in schools and workplaces, and, when coupled with consequential management and \nevaluation decisions, it is leading to mental health harms such as lowered self-confidence, anxiety, depression, and \na reduced ability to use analytical reasoning.61 Documented patterns show that personal data is being aggregated by \ndata brokers to profile communities in harmful ways.62 The impact of all this data harvesting is corrosive, \nbreeding distrust, anxiety, and other mental health problems; chilling speech, protest, and worker organizing; and \nthreatening our democratic process.63 The American public should be protected from these growing risks. \nIncreasingly, some companies are taking these concerns seriously and integrating mechanisms to protect consumer \nprivacy into their products by design and by default, including by minimizing the data they collect, communicating \ncollection and use clearly, and improving security practices. Federal government surveillance and other collection and \nuse of data is governed by legal protections that help to protect civil liberties and provide for limits on data retention \nin some cases. Many states have also enacted consumer data privacy protection regimes to address some of these \nharms. \nHowever, these are not yet standard practices, and the United States lacks a comprehensive statutory or regulatory \nframework governing the rights of the public when it comes to personal data. While a patchwork of laws exists to \nguide the collection and use of personal data in specific contexts, including health, employment, education, and credit, \nit can be unclear how these laws apply in other contexts and in an increasingly automated society. Additional protec\xad\ntions would assure the American public that the automated systems they use are not monitoring their activities, \ncollecting information on their lives, or otherwise surveilling them without context-specific consent or legal authori\xad\nty. \n31\n']","The mental health impacts associated with increased use of surveillance technologies in schools and workplaces include lowered self-confidence, anxiety, depression, and a reduced ability to use analytical reasoning.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 30, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the role of AI actors in the AI system lifecycle?,"[' \n13 \nā€¢ \nNot every suggested action applies to every AI Actor14 or is relevant to every AI Actor Task. For \nexample, suggested actions relevant to GAI developers may not be relevant to GAI deployers. \nThe applicability of suggested actions to relevant AI actors should be determined based on \norganizational considerations and their unique uses of GAI systems. \nEach table of suggested actions includes: \nā€¢ \nAction ID: Each Action ID corresponds to the relevant AI RMF function and subcategory (e.g., GV-\n1.1-001 corresponds to the ļ¬rst suggested action for Govern 1.1, GV-1.1-002 corresponds to the \nsecond suggested action for Govern 1.1). AI RMF functions are tagged as follows: GV = Govern; \nMP = Map; MS = Measure; MG = Manage. \nā€¢ \nSuggested Action: Steps an organization or AI actor can take to manage GAI risks. \nā€¢ \nGAI Risks: Tags linking suggested actions with relevant GAI risks. \nā€¢ \nAI Actor Tasks: Pertinent AI Actor Tasks for each subcategory. Not every AI Actor Task listed will \napply to every suggested action in the subcategory (i.e., some apply to AI development and \nothers apply to AI deployment). \nThe tables below begin with the AI RMF subcategory, shaded in blue, followed by suggested actions. \n \nGOVERN 1.1: Legal and regulatory requirements involving AI are understood, managed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.1-001 Align GAI development and use with applicable laws and regulations, including \nthose related to data privacy, copyright and intellectual property law. \nData Privacy; Harmful Bias and \nHomogenization; Intellectual \nProperty \nAI Actor Tasks: Governance and Oversight \n \n \n \n14 AI Actors are deļ¬ned by the OECD as ā€œthose who play an active role in the AI system lifecycle, including \norganizations and individuals that deploy or operate AI.ā€ See Appendix A of the AI RMF for additional descriptions \nof AI Actors and AI Actor Tasks. \n \n \n']","AI actors play an active role in the AI system lifecycle, including organizations and individuals that deploy or operate AI.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 16, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the significance of human-AI configuration in ensuring the adequacy of GAI system user instructions?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",The significance of human-AI configuration in ensuring the adequacy of GAI system user instructions is highlighted in the context where it mentions verifying the adequacy of GAI system user instructions through user testing. This suggests that human-AI configuration plays a crucial role in assessing and improving the effectiveness of user instructions.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of the AI Safety Institute established by NIST?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","The purpose of the AI Safety Institute established by NIST is to continue efforts to build the science necessary for safe, secure, and trustworthy development and use of artificial intelligence (AI), in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What criteria does the framework use to determine which automated systems are in scope for the AI Bill of Rights?,"["" \n \n \nSECTION TITLE\nApplying The Blueprint for an AI Bill of Rights \nWhile many of the concerns addressed in this framework derive from the use of AI, the technical \ncapabilities and specific definitions of such systems change with the speed of innovation, and the potential \nharms of their use occur even with less technologically sophisticated tools. Thus, this framework uses a two-\npart test to determine what systems are in scope. This framework applies to (1) automated systems that (2) \nhave the potential to meaningfully impact the American publicā€™s rights, opportunities, or access to \ncritical resources or services. These rights, opportunities, and access to critical resources of services should \nbe enjoyed equally and be fully protected, regardless of the changing role that automated systems may play in \nour lives. \nThis framework describes protections that should be applied with respect to all automated systems that \nhave the potential to meaningfully impact individuals' or communities' exercise of: \nRIGHTS, OPPORTUNITIES, OR ACCESS\nCivil rights, civil liberties, and privacy, including freedom of speech, voting, and protections from discrimi\xad\nnation, excessive punishment, unlawful surveillance, and violations of privacy and other freedoms in both \npublic and private sector contexts; \nEqual opportunities, including equitable access to education, housing, credit, employment, and other \nprograms; or, \nAccess to critical resources or services, such as healthcare, financial services, safety, social services, \nnon-deceptive information about goods and services, and government benefits. \nA list of examples of automated systems for which these principles should be considered is provided in the \nAppendix. The Technical Companion, which follows, offers supportive guidance for any person or entity that \ncreates, deploys, or oversees automated systems. \nConsidered together, the five principles and associated practices of the Blueprint for an AI Bill of \nRights form an overlapping set of backstops against potential harms. This purposefully overlapping \nframework, when taken as a whole, forms a blueprint to help protect the public from harm. \nThe measures taken to realize the vision set forward in this framework should be proportionate \nwith the extent and nature of the harm, or risk of harm, to people's rights, opportunities, and \naccess. \nRELATIONSHIP TO EXISTING LAW AND POLICY\nThe Blueprint for an AI Bill of Rights is an exercise in envisioning a future where the American public is \nprotected from the potential harms, and can fully enjoy the benefits, of automated systems. It describes princi\xad\nples that can help ensure these protections. Some of these protections are already required by the U.S. Constitu\xad\ntion or implemented under existing U.S. laws. For example, government surveillance, and data search and \nseizure are subject to legal requirements and judicial oversight. There are Constitutional requirements for \nhuman review of criminal investigative matters and statutory requirements for judicial review. Civil rights laws \nprotect the American people against discrimination. \n8\n""]","The framework uses a two-part test to determine which automated systems are in scope for the AI Bill of Rights: (1) automated systems that (2) have the potential to meaningfully impact the American publicā€™s rights, opportunities, or access to critical resources or services.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 7, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What procedures should be developed and updated in incident response and recovery plans for GAI systems when a previously unknown risk is identified?,"[' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']","Develop and update GAI system incident response and recovery plans and procedures to address the following: Review and maintenance of policies and procedures to account for newly encountered uses; Review and maintenance of policies and procedures for detection of unanticipated uses; Verify response and recovery plans account for the GAI system value chain; Verify response and recovery plans are updated for and include necessary details to communicate with downstream GAI system Actors: Points-of-Contact (POC), Contact information, notification format.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of structured human feedback exercises in the context of GAI risk measurement and management?,"[' \n29 \nMS-1.1-006 \nImplement continuous monitoring of GAI system impacts to identify whether GAI \noutputs are equitable across various sub-populations. Seek active and direct \nfeedback from aļ¬€ected communities via structured feedback mechanisms or red-\nteaming to monitor and improve outputs. \nHarmful Bias and Homogenization \nMS-1.1-007 \nEvaluate the quality and integrity of data used in training and the provenance of \nAI-generated content, for example by employing techniques like chaos \nengineering and seeking stakeholder feedback. \nInformation Integrity \nMS-1.1-008 \nDeļ¬ne use cases, contexts of use, capabilities, and negative impacts where \nstructured human feedback exercises, e.g., GAI red-teaming, would be most \nbeneļ¬cial for GAI risk measurement and management based on the context of \nuse. \nHarmful Bias and \nHomogenization; CBRN \nInformation or Capabilities \nMS-1.1-009 \nTrack and document risks or opportunities related to all GAI risks that cannot be \nmeasured quantitatively, including explanations as to why some risks cannot be \nmeasured (e.g., due to technological limitations, resource constraints, or \ntrustworthy considerations). Include unmeasured risks in marginal risks. \nInformation Integrity \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMEASURE 1.3: Internal experts who did not serve as front-line developers for the system and/or independent assessors are \ninvolved in regular assessments and updates. Domain experts, users, AI Actors external to the team that developed or deployed the \nAI system, and aļ¬€ected communities are consulted in support of assessments as necessary per organizational risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.3-001 \nDeļ¬ne relevant groups of interest (e.g., demographic groups, subject matter \nexperts, experience with GAI technology) within the context of use as part of \nplans for gathering structured public feedback. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-002 \nEngage in internal and external evaluations, GAI red-teaming, impact \nassessments, or other structured human feedback exercises in consultation \nwith representative AI Actors with expertise and familiarity in the context of \nuse, and/or who are representative of the populations associated with the \ncontext of use. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-003 \nVerify those conducting structured human feedback exercises are not directly \ninvolved in system development tasks for the same GAI model. \nHuman-AI Conļ¬guration; Data \nPrivacy \nAI Actor Tasks: AI Deployment, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, \nEnd-Users, Operation and Monitoring, TEVV \n \n']","The purpose of structured human feedback exercises in the context of GAI risk measurement and management is to define use cases, contexts of use, capabilities, and negative impacts where these exercises would be most beneficial. They are aimed at monitoring and improving outputs, evaluating the quality and integrity of data used in training, and tracking risks or opportunities related to GAI that cannot be measured quantitatively.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 32, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the significance of human-AI configuration in managing GAI risks and ensuring information integrity?,"[' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n']","The significance of human-AI configuration in managing GAI risks and ensuring information integrity lies in its role in evaluating content lineage and origin, adapting training programs for digital content transparency, and delineating human proficiency tests from GAI capabilities. It also involves continual monitoring of human-GAI configurations and engaging end-users in prototyping and testing activities to address various scenarios, including crisis situations and ethically sensitive contexts.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What criteria are used to measure AI system performance or assurance in deployment settings?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']",AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for conditions similar to deployment setting(s). Measures are documented.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are some suggested actions to address GAI risks in AI systems?,"[' \n35 \nMEASURE 2.9: The AI model is explained, validated, and documented, and AI system output is interpreted within its context ā€“ as \nidentiļ¬ed in the MAP function ā€“ to inform responsible use and governance. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.9-001 \nApply and document ML explanation results such as: Analysis of embeddings, \nCounterfactual prompts, Gradient-based attributions, Model \ncompression/surrogate models, Occlusion/term reduction. \nConfabulation \nMS-2.9-002 \nDocument GAI model details including: Proposed use and organizational value; \nAssumptions and limitations, Data collection methodologies; Data provenance; \nData quality; Model architecture (e.g., convolutional neural network, \ntransformers, etc.); Optimization objectives; Training algorithms; RLHF \napproaches; Fine-tuning or retrieval-augmented generation approaches; \nEvaluation data; Ethical considerations; Legal and regulatory requirements. \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.10-001 \nConduct AI red-teaming to assess issues such as: Outputting of training data \nsamples, and subsequent reverse engineering, model extraction, and \nmembership inference risks; Revealing biometric, conļ¬dential, copyrighted, \nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \nTracking or revealing location information of users or members of training \ndatasets. \nHuman-AI Conļ¬guration; \nInformation Integrity; Intellectual \nProperty \nMS-2.10-002 \nEngage directly with end-users and other stakeholders to understand their \nexpectations and concerns regarding content provenance. Use this feedback to \nguide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n']","Some suggested actions to address GAI risks in AI systems include: applying and documenting ML explanation results such as analysis of embeddings, counterfactual prompts, gradient-based attributions, model compression/surrogate models, and occlusion/term reduction. Additionally, documenting GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do GAI systems play in augmenting cybersecurity attacks?,"[' \n10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities. \nDisinformation and misinformation ā€“ both of which may be facilitated by GAI ā€“ may erode public trust in \ntrue or valid evidence and information, with downstream eļ¬€ects. For example, a synthetic image of a \nPentagon blast went viral and brieļ¬‚y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system \nand the integrity and (when applicable) the conļ¬dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speciļ¬c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published. \n']","GAI systems may augment cybersecurity attacks by advancing offensive cyber capabilities such as hacking, malware, and phishing. Reports indicate that large language models (LLMs) can discover vulnerabilities in systems and write code to exploit them. Sophisticated threat actors might develop GAI-powered security co-pilots to inform attackers on how to evade threat detection and escalate privileges after gaining system access.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 13, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What precautions should be taken when using derived data sources in automated systems?,"[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n']","Precautions that should be taken when using derived data sources in automated systems include careful tracking and validation of derived data, as it is viewed as potentially high-risk and may lead to feedback loops, compounded harm, or inaccurate results. Such data should be validated against the risk of collateral consequences.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the implications of the lack of explanation for decisions made by automated systems?,"[' \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nA predictive policing system claimed to identify individuals at greatest risk to commit or become the victim of\ngun violence (based on automated analysis of social ties to gang members, criminal histories, previous experi\xad\nences of gun violence, and other factors) and led to individuals being placed on a watch list with no\nexplanation or public transparency regarding how the system came to its conclusions.85 Both police and\nthe public deserve to understand why and how such a system is making these determinations.\nā€¢\nA system awarding benefits changed its criteria invisibly. Individuals were denied benefits due to data entry\nerrors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42\n', "" \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nAutomated systems now determine opportunities, from employment to credit, and directly shape the American \npublicā€™s experiences, from the courtroom to online classrooms, in ways that profoundly impact peopleā€™s lives. But this \nexpansive impact is not always visible. An applicant might not know whether a person rejected their resume or a \nhiring algorithm moved them to the bottom of the list. A defendant in the courtroom might not know if a judge deny\xad\ning their bail is informed by an automated system that labeled them ā€œhigh risk.ā€ From correcting errors to contesting \ndecisions, people are often denied the knowledge they need to address the impact of automated systems on their lives. \nNotice and explanations also serve an important safety and efficacy purpose, allowing experts to verify the reasonable\xad\nness of a recommendation before enacting it. \nIn order to guard against potential harms, the American public needs to know if an automated system is being used. \nClear, brief, and understandable notice is a prerequisite for achieving the other protections in this framework. Like\xad\nwise, the public is often unable to ascertain how or why an automated system has made a decision or contributed to a \nparticular outcome. The decision-making processes of automated systems tend to be opaque, complex, and, therefore, \nunaccountable, whether by design or by omission. These factors can make explanations both more challenging and \nmore important, and should not be used as a pretext to avoid explaining important decisions to the people impacted \nby those choices. In the context of automated systems, clear and valid explanations should be recognized as a baseline \nrequirement. \nProviding notice has long been a standard practice, and in many cases is a legal requirement, when, for example, \nmaking a video recording of someone (outside of a law enforcement or national security context). In some cases, such \nas credit, lenders are required to provide notice and explanation to consumers. Techniques used to automate the \nprocess of explaining such systems are under active research and improvement and such explanations can take many \nforms. Innovative companies and researchers are rising to the challenge and creating and deploying explanatory \nsystems that can help the public better understand decisions that impact them. \nWhile notice and explanation requirements are already in place in some sectors or situations, the American public \ndeserve to know consistently and across sectors if an automated system is being used in a way that impacts their rights, \nopportunities, or access. This knowledge should provide confidence in how the public is being treated, and trust in the \nvalidity and reasonable use of automated systems. \nā€¢\nA lawyer representing an older client with disabilities who had been cut off from Medicaid-funded home\nhealth-care assistance couldn't determine why, especially since the decision went against historical access\npractices. In a court hearing, the lawyer learned from a witness that the state in which the older client\nlived had recently adopted a new algorithm to determine eligibility.83 The lack of a timely explanation made it\nharder to understand and contest the decision.\nā€¢\nA formal child welfare investigation is opened against a parent based on an algorithm and without the parent\never being notified that data was being collected and used as part of an algorithmic child maltreatment\nrisk assessment.84 The lack of notice or an explanation makes it harder for those performing child\nmaltreatment assessments to validate the risk assessment and denies parents knowledge that could help them\ncontest a decision.\n41\n""]","The lack of explanation for decisions made by automated systems can lead to several implications, including the inability for individuals to understand or contest decisions that affect their lives. For instance, applicants may not know why their resumes were rejected, defendants may be unaware if their bail decisions are influenced by an automated system labeling them as 'high risk', and individuals may face difficulties in correcting errors or contesting decisions due to a lack of transparency. This opacity can result in unaccountable decision-making processes and can hinder the public's ability to trust the validity and reasonable use of automated systems.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 41, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 40, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What should users be notified about regarding automated systems that impact them?,"[' \nYou should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40\n']","Users should be notified about the use of automated systems, the individual or organization responsible for the system, significant use case or key functionality changes, and how and why an outcome impacting them was determined by the automated system.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 39, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the key considerations regarding data privacy in the context of the AI Bill of Rights?,"['TABLE OF CONTENTS\nFROM PRINCIPLES TO PRACTICE: A TECHNICAL COMPANION TO THE BLUEPRINT \nFOR AN AI BILL OF RIGHTS \n \nUSING THIS TECHNICAL COMPANION\n \nSAFE AND EFFECTIVE SYSTEMS\n \nALGORITHMIC DISCRIMINATION PROTECTIONS\n \nDATA PRIVACY\n \nNOTICE AND EXPLANATION\n \nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nAPPENDIX\n \nEXAMPLES OF AUTOMATED SYSTEMS\n \nLISTENING TO THE AMERICAN PEOPLE\nENDNOTES \n12\n14\n15\n23\n30\n40\n46\n53\n53\n55\n63\n13\n']",The answer to given question is not present in context,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 12, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures should be taken during disparity assessment of automated systems to ensure inclusivity and fairness?,"["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n""]","During disparity assessment of automated systems, measures should include testing using a broad set of measures to assess whether the system components produce disparities. The demographics of the assessed groups should be as inclusive as possible, covering aspects such as race, color, ethnicity, sex, religion, age, national origin, disability, and other classifications protected by law. The assessment should include demographic performance measures, overall and subgroup parity assessment, and calibration. Additionally, demographic data collected for disparity assessment should be separated from data used for the automated system, and privacy protections should be instituted.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the potential risks associated with generative AI models in the context of disinformation and cybersecurity?,"[' \n10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities. \nDisinformation and misinformation ā€“ both of which may be facilitated by GAI ā€“ may erode public trust in \ntrue or valid evidence and information, with downstream eļ¬€ects. For example, a synthetic image of a \nPentagon blast went viral and brieļ¬‚y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system \nand the integrity and (when applicable) the conļ¬dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speciļ¬c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published. \n']","The potential risks associated with generative AI models in the context of disinformation include the ease of producing or disseminating false, inaccurate, or misleading content at scale, both unintentionally (misinformation) and deliberately (disinformation). GAI systems can enable malicious actors to create targeted disinformation campaigns, generate realistic deepfakes, and produce compelling imagery and propaganda. In terms of cybersecurity, GAI models may lower barriers for offensive capabilities, expand the attack surface, and assist in discovering vulnerabilities and writing exploit code, thereby augmenting cybersecurity attacks such as hacking, malware, and phishing.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 13, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What topics were discussed regarding potential harms and oversight in the development of the AI Bill of Rights?,"[""APPENDIX\nā€¢ OSTP conducted meetings with a variety of stakeholders in the private sector and civil society. Some of these\nmeetings were specifically focused on providing ideas related to the development of the Blueprint for an AI\nBill of Rights while others provided useful general context on the positive use cases, potential harms, and/or\noversight possibilities for these technologies. Participants in these conversations from the private sector and\ncivil society included:\nAdobe \nAmerican Civil Liberties Union \n(ACLU) \nThe Aspen Commission on \nInformation Disorder \nThe Awood Center \nThe Australian Human Rights \nCommission \nBiometrics Institute \nThe Brookings Institute \nBSA | The Software Alliance \nCantellus Group \nCenter for American Progress \nCenter for Democracy and \nTechnology \nCenter on Privacy and Technology \nat Georgetown Law \nChristiana Care \nColor of Change \nCoworker \nData Robot \nData Trust Alliance \nData and Society Research Institute \nDeepmind \nEdSAFE AI Alliance \nElectronic Privacy Information \nCenter (EPIC) \nEncode Justice \nEqual AI \nGoogle \nHitachi's AI Policy Committee \nThe Innocence Project \nInstitute of Electrical and \nElectronics Engineers (IEEE) \nIntuit \nLawyers Committee for Civil Rights \nUnder Law \nLegal Aid Society \nThe Leadership Conference on \nCivil and Human Rights \nMeta \nMicrosoft \nThe MIT AI Policy Forum \nMovement Alliance Project \nThe National Association of \nCriminal Defense Lawyers \nOā€™Neil Risk Consulting & \nAlgorithmic Auditing \nThe Partnership on AI \nPinterest \nThe Plaintext Group \npymetrics \nSAP \nThe Security Industry Association \nSoftware and Information Industry \nAssociation (SIIA) \nSpecial Competitive Studies Project \nThorn \nUnited for Respect \nUniversity of California at Berkeley \nCitris Policy Lab \nUniversity of California at Berkeley \nLabor Center \nUnfinished/Project Liberty \nUpturn \nUS Chamber of Commerce \nUS Chamber of Commerce \nTechnology Engagement Center \nA.I. Working Group\nVibrent Health\nWarehouse Worker Resource\nCenter\nWaymap\n62\n""]","The context mentions that some meetings focused on providing ideas related to the development of the Blueprint for an AI Bill of Rights, and others provided useful general context on the positive use cases, potential harms, and/or oversight possibilities for these technologies. However, specific topics discussed regarding potential harms and oversight are not detailed in the provided context.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 61, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures should be in place to ensure human alternatives and consideration in the use of automated systems?,"['You should be able to opt out, where appropriate, and \nhave access to a person who can quickly consider and \nremedy problems you encounter. You should be able to opt \nout from automated systems in favor of a human alternative, where \nappropriate. Appropriateness should be determined based on rea\xad\nsonable expectations in a given context and with a focus on ensuring \nbroad accessibility and protecting the public from especially harm\xad\nful impacts. In some cases, a human or other alternative may be re\xad\nquired by law. You should have access to timely human consider\xad\nation and remedy by a fallback and escalation process if an automat\xad\ned system fails, it produces an error, or you would like to appeal or \ncontest its impacts on you. Human consideration and fallback \nshould be accessible, equitable, effective, maintained, accompanied \nby appropriate operator training, and should not impose an unrea\xad\nsonable burden on the public. Automated systems with an intended \nuse within sensitive domains, including, but not limited to, criminal \njustice, employment, education, and health, should additionally be \ntailored to the purpose, provide meaningful access for oversight, \ninclude training for any people interacting with the system, and in\xad\ncorporate human consideration for adverse or high-risk decisions. \nReporting that includes a description of these human governance \nprocesses and assessment of their timeliness, accessibility, out\xad\ncomes, and effectiveness should be made public whenever possible. \nHUMAN ALTERNATIVES, CONSIDERATION\nALLBACK\nF\nAND\n, \n46\n']","Measures to ensure human alternatives and consideration in the use of automated systems include the ability to opt out from automated systems in favor of a human alternative where appropriate, access to timely human consideration and remedy through a fallback and escalation process if an automated system fails, and ensuring that human consideration and fallback are accessible, equitable, effective, and maintained. Additionally, automated systems in sensitive domains should be tailored to their purpose, provide meaningful access for oversight, include training for people interacting with the system, and incorporate human consideration for adverse or high-risk decisions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 45, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures should be taken to ensure that automated systems are safe and effective?,"[' \n \n \nSAFE AND EFFECTIVE SYSTEMS \nYou should be protected from unsafe or ineffective sys\xad\ntems. Automated systems should be developed with consultation \nfrom diverse communities, stakeholders, and domain experts to iden\xad\ntify concerns, risks, and potential impacts of the system. Systems \nshould undergo pre-deployment testing, risk identification and miti\xad\ngation, and ongoing monitoring that demonstrate they are safe and \neffective based on their intended use, mitigation of unsafe outcomes \nincluding those beyond the intended use, and adherence to do\xad\nmain-specific standards. Outcomes of these protective measures \nshould include the possibility of not deploying the system or remov\xad\ning a system from use. Automated systems should not be designed \nwith an intent or reasonably foreseeable possibility of endangering \nyour safety or the safety of your community. They should be designed \nto proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15\n']","To ensure that automated systems are safe and effective, measures should include consultation with diverse communities, stakeholders, and domain experts to identify concerns and risks. Systems should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring. These measures should demonstrate safety and effectiveness based on intended use, mitigate unsafe outcomes, and adhere to domain-specific standards. Additionally, independent evaluation and reporting should confirm safety and effectiveness, with results made public whenever possible.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 14, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What considerations should be taken into account when using automated systems in sensitive domains?,"[' \nSECTION TITLE\nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nYou should be able to opt out, where appropriate, and have access to a person who can quickly \nconsider and remedy problems you encounter. You should be able to opt out from automated systems in \nfavor of a human alternative, where appropriate. Appropriateness should be determined based on reasonable \nexpectations in a given context and with a focus on ensuring broad accessibility and protecting the public from \nespecially harmful impacts. In some cases, a human or other alternative may be required by law. You should have \naccess to timely human consideration and remedy by a fallback and escalation process if an automated system \nfails, it produces an error, or you would like to appeal or contest its impacts on you. Human consideration and \nfallback should be accessible, equitable, effective, maintained, accompanied by appropriate operator training, and \nshould not impose an unreasonable burden on the public. Automated systems with an intended use within sensi\xad\ntive domains, including, but not limited to, criminal justice, employment, education, and health, should additional\xad\nly be tailored to the purpose, provide meaningful access for oversight, include training for any people interacting \nwith the system, and incorporate human consideration for adverse or high-risk decisions. Reporting that includes \na description of these human governance processes and assessment of their timeliness, accessibility, outcomes, \nand effectiveness should be made public whenever possible. \nDefinitions for key terms in The Blueprint for an AI Bill of Rights can be found in Applying the Blueprint for an AI Bill of Rights. \nAccompanying analysis and tools for actualizing each principle can be found in the Technical Companion. \n7\n']","When using automated systems in sensitive domains, considerations should include tailoring the systems to their intended purpose, providing meaningful access for oversight, ensuring training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 6, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are some examples of harms caused by algorithmic bias in automated systems?,"[' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nWhile technologies are being deployed to solve problems across a wide array of issues, our reliance on technology can \nalso lead to its use in situations where it has not yet been proven to workā€”either at all or within an acceptable range \nof error. In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. \nAutomated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informa\xad\ntion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposeful\xad\nly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \nor unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad\nvators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \nfrom unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consis\xad\ntently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harm\xad\nful outcomes. \nā€¢\nA proprietary model was developed to predict the likelihood of sepsis in hospitalized patients and was imple\xad\nmented at hundreds of hospitals around the country. An independent study showed that the model predictions\nunderperformed relative to the designerā€™s claims while also causing ā€˜alert fatigueā€™ by falsely alerting\nlikelihood of sepsis.6\nā€¢\nOn social media, Black people who quote and criticize racist messages have had their own speech silenced when\na platformā€™s automated moderation system failed to distinguish this ā€œcounter speechā€ (or other critique\nand journalism) from the original hateful messages to which such speech responded.7\nā€¢\nA device originally developed to help people track and find lost items has been used as a tool by stalkers to track\nvictimsā€™ locations in violation of their privacy and safety. The device manufacturer took steps after release to\nprotect people from unwanted tracking by alerting people on their phones when a device is found to be moving\nwith them over time and also by having the device make an occasional noise, but not all phones are able\nto receive the notification and the devices remain a safety concern due to their misuse.8 \nā€¢\nAn algorithm used to deploy police was found to repeatedly send police to neighborhoods they regularly visit,\neven if those neighborhoods were not the ones with the highest crime rates. These incorrect crime predictions\nwere the result of a feedback loop generated from the reuse of data from previous arrests and algorithm\npredictions.9\n16\n']","Examples of harms caused by algorithmic bias in automated systems include: 1) A proprietary model predicting sepsis in hospitalized patients that underperformed and caused alert fatigue by falsely alerting likelihood of sepsis. 2) An automated moderation system on social media that silenced Black people who quoted and criticized racist messages, failing to distinguish their counter speech from the original hateful messages. 3) A device meant to help track lost items being misused by stalkers to track victims' locations, despite manufacturer attempts to implement safety measures. 4) An algorithm used for police deployment that sent officers to neighborhoods they regularly visited, rather than those with the highest crime rates, due to a feedback loop from previous data and predictions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 15, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the challenges associated with value chain and component integration in GAI systems?,"[' \n12 \nCSAM. Even when trained on ā€œcleanā€ data, increasingly capable GAI models can synthesize or produce \nsynthetic NCII and CSAM. Websites, mobile apps, and custom-built models that generate synthetic NCII \nhave moved from niche internet forums to mainstream, automated, and scaled online businesses. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Safe, Privacy Enhanced \n2.12. \nValue Chain and Component Integration \nGAI value chains involve many third-party components such as procured datasets, pre-trained models, \nand software libraries. These components might be improperly obtained or not properly vetted, leading \nto diminished transparency or accountability for downstream users. While this is a risk for traditional AI \nsystems and some other digital technologies, the risk is exacerbated for GAI due to the scale of the \ntraining data, which may be too large for humans to vet; the diļ¬ƒculty of training foundation models, \nwhich leads to extensive reuse of limited numbers of models; and the extent to which GAI may be \nintegrated into other devices and services. As GAI systems often involve many distinct third-party \ncomponents and data sources, it may be diļ¬ƒcult to attribute issues in a systemā€™s behavior to any one of \nthese sources. \nErrors in third-party GAI components can also have downstream impacts on accuracy and robustness. \nFor example, test datasets commonly used to benchmark or validate models can contain label errors. \nInaccuracies in these labels can impact the ā€œstabilityā€ or robustness of these benchmarks, which many \nGAI practitioners consider during the model selection process. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n3. \nSuggested Actions to Manage GAI Risks \nThe following suggested actions target risks unique to or exacerbated by GAI. \nIn addition to the suggested actions below, AI risk management activities and actions set forth in the AI \nRMF 1.0 and Playbook are already applicable for managing GAI risks. Organizations are encouraged to \napply the activities suggested in the AI RMF and its Playbook when managing the risk of GAI systems. \nImplementation of the suggested actions will vary depending on the type of risk, characteristics of GAI \nsystems, stage of the GAI lifecycle, and relevant AI actors involved. \nSuggested actions to manage GAI risks can be found in the tables below: \nā€¢ \nThe suggested actions are organized by relevant AI RMF subcategories to streamline these \nactivities alongside implementation of the AI RMF. \nā€¢ \nNot every subcategory of the AI RMF is included in this document.13 Suggested actions are \nlisted for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later. \n']","Challenges associated with value chain and component integration in GAI systems include the improper acquisition or vetting of third-party components such as datasets, pre-trained models, and software libraries, which can lead to diminished transparency and accountability. The scale of training data may be too large for humans to vet, and the difficulty of training foundation models can result in extensive reuse of a limited number of models. Additionally, it may be difficult to attribute issues in a system's behavior to any one of these sources, and errors in third-party GAI components can have downstream impacts on accuracy and robustness.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 15, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What considerations should be taken into account when determining model release approaches?,"[' \n40 \nMANAGE 1.3: Responses to the AI risks deemed high priority, as identiļ¬ed by the MAP function, are developed, planned, and \ndocumented. Risk response options can include mitigating, transferring, avoiding, or accepting. \nAction ID \nSuggested Action \nGAI Risks \nMG-1.3-001 \nDocument trade-oļ¬€s, decision processes, and relevant measurement and \nfeedback results for risks that do not surpass organizational risk tolerance, for \nexample, in the context of model release: Consider diļ¬€erent approaches for \nmodel release, for example, leveraging a staged release approach. Consider \nrelease approaches in the context of the model and its projected use cases. \nMitigate, transfer, or avoid risks that surpass organizational risk tolerances. \nInformation Security \nMG-1.3-002 \nMonitor the robustness and eļ¬€ectiveness of risk controls and mitigation plans \n(e.g., via red-teaming, ļ¬eld testing, participatory engagements, performance \nassessments, user feedback mechanisms). \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Development, AI Deployment, AI Impact Assessment, Operation and Monitoring \n \nMANAGE 2.2: Mechanisms are in place and applied to sustain the value of deployed AI systems. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.2-001 \nCompare GAI system outputs against pre-deļ¬ned organization risk tolerance, \nguidelines, and principles, and review and test AI-generated content against \nthese guidelines. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content \nMG-2.2-002 \nDocument training data sources to trace the origin and provenance of AI-\ngenerated content. \nInformation Integrity \nMG-2.2-003 \nEvaluate feedback loops between GAI system content provenance and human \nreviewers, and update where needed. Implement real-time monitoring systems \nto aļ¬ƒrm that content provenance protocols remain eļ¬€ective. \nInformation Integrity \nMG-2.2-004 \nEvaluate GAI content and data for representational biases and employ \ntechniques such as re-sampling, re-ranking, or adversarial training to mitigate \nbiases in the generated content. \nInformation Security; Harmful Bias \nand Homogenization \nMG-2.2-005 \nEngage in due diligence to analyze GAI output for harmful content, potential \nmisinformation, and CBRN-related or NCII content. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content \n']","When determining model release approaches, considerations should include documenting trade-offs, decision processes, and relevant measurement and feedback results for risks that do not surpass organizational risk tolerance. Additionally, different approaches for model release should be considered, such as leveraging a staged release approach and evaluating release approaches in the context of the model and its projected use cases.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 43, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What considerations should be taken into account regarding intellectual property when conducting diligence on training data use?,"["" \n27 \nMP-4.1-010 \nConduct appropriate diligence on training data use to assess intellectual property, \nand privacy, risks, including to examine whether use of proprietary or sensitive \ntraining data is consistent with applicable laws. \nIntellectual Property; Data Privacy \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n \nMAP 5.1: Likelihood and magnitude of each identiļ¬ed impact (both potentially beneļ¬cial and harmful) based on expected use, past \nuses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \nthe AI system, or other data are identiļ¬ed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \ndata generation capabilities for potential misuse or vulnerabilities. \nInformation Integrity; Information \nSecurity \nMP-5.1-002 \nIdentify potential content provenance harms of GAI, such as misinformation or \ndisinformation, deepfakes, including NCII, or tampered content. Enumerate and \nrank risks based on their likelihood and potential impact, and determine how well \nprovenance solutions address speciļ¬c risks and/or harms. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMP-5.1-003 \nConsider disclosing use of GAI to end users in relevant contexts, while considering \nthe objective of disclosure, the context of use, the likelihood and magnitude of the \nrisk posed, the audience of the disclosure, as well as the frequency of the \ndisclosures. \nHuman-AI Conļ¬guration \nMP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \nestimates. \nInformation Integrity; CBRN \nInformation or Capabilities; \nDangerous, Violent, or Hateful \nContent; Harmful Bias and \nHomogenization \nMP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \nidentify anomalous or unforeseen failure modes. \nInformation Security \nMP-5.1-006 \nProļ¬le threats and negative impacts arising from GAI systems interacting with, \nmanipulating, or generating content, and outlining known and potential \nvulnerabilities and the likelihood of their occurrence. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Design, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, End-\nUsers, Operation and Monitoring \n \n""]","Considerations regarding intellectual property when conducting diligence on training data use include assessing risks related to intellectual property and privacy, and examining whether the use of proprietary or sensitive training data is consistent with applicable laws.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are some examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights?,"[' \n \n \n \n \n \n \n \n \nAPPENDIX\nExamples of Automated Systems \nThe below examples are meant to illustrate the breadth of automated systems that, insofar as they have the \npotential to meaningfully impact rights, opportunities, or access to critical resources or services, should \nbe covered by the Blueprint for an AI Bill of Rights. These examples should not be construed to limit that \nscope, which includes automated systems that may not yet exist, but which fall under these criteria. \nExamples of automated systems for which the Blueprint for an AI Bill of Rights should be considered include \nthose that have the potential to meaningfully impact: \nā€¢ Civil rights, civil liberties, or privacy, including but not limited to:\nSpeech-related systems such as automated content moderation tools; \nSurveillance and criminal justice system algorithms such as risk assessments, predictive \n policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems, \nprojections of student progress or outcomes, algorithms that determine access to resources or \n rograms, and surveillance of classes (whether online or in-person); \nHousing-related systems such as tenant screening algorithms, automated valuation systems that \n estimate the value of homes used in mortgage underwriting or home insurance, and automated \n valuations from online aggregator websites; and \nEmployment-related systems such as workplace algorithms that inform all aspects of the terms \n and conditions of employment including, but not limited to, pay or promotion, hiring or termina- \n tion algorithms, virtual or augmented reality workplace training programs, and electronic work \nplace surveillance and management systems. \nā€¢ Access to critical resources and services, including but not limited to:\nHealth and health insurance technologies such as medical AI systems and devices, AI-assisted \n diagnostic tools, algorithms or predictive models used to support clinical decision making, medical \n or insurance health risk assessments, drug addiction risk assessments and associated access alg \n-orithms, wearable technologies, wellness apps, insurance care allocation algorithms, and health\ninsurance cost and underwriting algorithms;\nFinancial system algorithms such as loan allocation algorithms, financial system access determi-\nnation algorithms, credit scoring systems, insurance algorithms including risk assessments, auto\n-mated interest rate determinations, and financial algorithms that apply penalties (e.g., that can\ngarnish wages or withhold tax returns);\n53\n']",Examples of automated systems that should be covered by the Blueprint for an AI Bill of Rights include: speech-related systems such as automated content moderation tools; surveillance and criminal justice system algorithms like risk assessments and predictive policing; voting-related systems such as signature matching tools; privacy-impacting systems like smart home systems and health-related data systems; education-related systems such as algorithms for detecting student cheating; housing-related systems like tenant screening algorithms; and employment-related systems that inform terms of employment.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 52, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are some concerns related to data privacy in the context of sensitive domains?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nā€¢\nContinuous positive airway pressure machines gather data for medical purposes, such as diagnosing sleep\napnea, and send usage data to a patientā€™s insurance company, which may subsequently deny coverage for the\ndevice based on usage data. Patients were not aware that the data would be used in this way or monitored\nby anyone other than their doctor.70 \nā€¢\nA department store company used predictive analytics applied to collected consumer data to determine that a\nteenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her\nhouse, revealing to her father that she was pregnant.71\nā€¢\nSchool audio surveillance systems monitor student conversations to detect potential ""stress indicators"" as\na warning of potential violence.72 Online proctoring systems claim to detect if a student is cheating on an\nexam using biometric markers.73 These systems have the potential to limit student freedom to express a range\nof emotions at school and may inappropriately flag students with disabilities who need accommodations or\nuse screen readers or dictation software as cheating.74\nā€¢\nLocation data, acquired from a data broker, can be used to identify people who visit abortion clinics.75\nā€¢\nCompanies collect student data such as demographic information, free or reduced lunch status, whether\nthey\'ve used drugs, or whether they\'ve expressed interest in LGBTQI+ groups, and then use that data to \nforecast student success.76 Parents and education experts have expressed concern about collection of such\nsensitive data without express parental consent, the lack of transparency in how such data is being used, and\nthe potential for resulting discriminatory impacts.\nā€¢ Many employers transfer employee data to third party job verification services. This information is then used\nby potential future employers, banks, or landlords. In one case, a former employee alleged that a\ncompany supplied false data about her job title which resulted in a job offer being revoked.77\n37\n']","Concerns related to data privacy in sensitive domains include the lack of awareness among patients regarding the use of their medical data by insurance companies, the revelation of personal information (such as pregnancy) through targeted advertising, the monitoring of student conversations which may limit emotional expression and unfairly flag students with disabilities, the use of location data to identify individuals visiting abortion clinics, the collection of sensitive student data without parental consent, and the potential for discriminatory impacts from such data usage. Additionally, there are concerns about the accuracy of employee data transferred to third parties, which can affect job opportunities.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 36, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What considerations should be taken into account when reviewing vendor contracts for third-party GAI technologies?,"[' \n22 \nGV-6.2-003 \nEstablish incident response plans for third-party GAI technologies: Align incident \nresponse plans with impacts enumerated in MAP 5.1; Communicate third-party \nGAI incident response plans to all relevant AI Actors; Deļ¬ne ownership of GAI \nincident response functions; Rehearse third-party GAI incident response plans at \na regular cadence; Improve incident response plans based on retrospective \nlearning; Review incident response plans for alignment with relevant breach \nreporting, data protection, data privacy, or other laws. \nData Privacy; Human-AI \nConļ¬guration; Information \nSecurity; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization \nGV-6.2-004 \nEstablish policies and procedures for continuous monitoring of third-party GAI \nsystems in deployment. \nValue Chain and Component \nIntegration \nGV-6.2-005 \nEstablish policies and procedures that address GAI data redundancy, including \nmodel weights and other system artifacts. \nHarmful Bias and Homogenization \nGV-6.2-006 \nEstablish policies and procedures to test and manage risks related to rollover and \nfallback technologies for GAI systems, acknowledging that rollover and fallback \nmay include manual processing. \nInformation Integrity \nGV-6.2-007 \nReview vendor contracts and avoid arbitrary or capricious termination of critical \nGAI technologies or vendor services and non-standard terms that may amplify or \ndefer liability in unexpected ways and/or contribute to unauthorized data \ncollection by vendors or third-parties (e.g., secondary data use). Consider: Clear \nassignment of liability and responsibility for incidents, GAI system changes over \ntime (e.g., ļ¬ne-tuning, drift, decay); Request: Notiļ¬cation and disclosure for \nserious incidents arising from third-party data and systems; Service Level \nAgreements (SLAs) in vendor contracts that address incident response, response \ntimes, and availability of critical support. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV, Third-party entities \n \nMAP 1.1: Intended purposes, potentially beneļ¬cial uses, context speciļ¬c laws, norms and expectations, and prospective settings in \nwhich the AI system will be deployed are understood and documented. Considerations include: the speciļ¬c set or types of users \nalong with their expectations; potential positive and negative impacts of system uses to individuals, communities, organizations, \nsociety, and the planet; assumptions and related limitations about AI system purposes, uses, and risks across the development or \nproduct AI lifecycle; and related TEVV and system metrics. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.1-001 \nWhen identifying intended purposes, consider factors such as internal vs. \nexternal use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty \n']","When reviewing vendor contracts for third-party GAI technologies, considerations should include avoiding arbitrary or capricious termination of critical GAI technologies or vendor services, avoiding non-standard terms that may amplify or defer liability in unexpected ways, and preventing unauthorized data collection by vendors or third-parties. Additionally, there should be a clear assignment of liability and responsibility for incidents, acknowledgment of GAI system changes over time, and requirements for notification and disclosure for serious incidents arising from third-party data and systems. Service Level Agreements (SLAs) in vendor contracts should also address incident response, response times, and availability of critical support.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 25, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the expectations for ensuring that automated systems are safe and effective?,"[' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","The expectations for ensuring that automated systems are safe and effective include: 1) Safeguards to protect the public from harm in a proactive and ongoing manner; 2) Avoiding the use of data that is inappropriate or irrelevant to the task at hand; 3) Demonstrating the safety and effectiveness of the system. Additionally, there should be consultation with the public during the design and implementation phases, extensive testing before deployment, and identification and mitigation of potential risks.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the key components of risk identification and mitigation in the development of automated systems?,"[' \n \n \nSAFE AND EFFECTIVE SYSTEMS \nYou should be protected from unsafe or ineffective sys\xad\ntems. Automated systems should be developed with consultation \nfrom diverse communities, stakeholders, and domain experts to iden\xad\ntify concerns, risks, and potential impacts of the system. Systems \nshould undergo pre-deployment testing, risk identification and miti\xad\ngation, and ongoing monitoring that demonstrate they are safe and \neffective based on their intended use, mitigation of unsafe outcomes \nincluding those beyond the intended use, and adherence to do\xad\nmain-specific standards. Outcomes of these protective measures \nshould include the possibility of not deploying the system or remov\xad\ning a system from use. Automated systems should not be designed \nwith an intent or reasonably foreseeable possibility of endangering \nyour safety or the safety of your community. They should be designed \nto proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15\n']","The key components of risk identification and mitigation in the development of automated systems include pre-deployment testing, risk identification and mitigation processes, ongoing monitoring, and adherence to domain-specific standards. These components aim to ensure that systems are safe and effective based on their intended use and to mitigate unsafe outcomes, including those beyond the intended use.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 14, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the implications of bias and discrimination in automated systems on the rights of the American public?,"[' \nSECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-\ndinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called \non people of conscience to act to preserve civil rightsā€”including the right to privacy, which he has called ā€œthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.ā€2\nTo advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanionā€”a handbook for anyone seeking to incorporate these protections into policy and practice, including \ndetailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3\n']","The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting or reproducing existing unwanted inequities. These outcomes can threaten people's opportunities, undermine their privacy, and lead to pervasive tracking of their activities, often without their knowledge or consent.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 2, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures are suggested to protect data privacy in evaluations involving human subjects?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']","Suggested measures to protect data privacy in evaluations involving human subjects include: anonymizing data to protect the privacy of human subjects, leveraging privacy output filters, removing any personally identifiable information (PII) to prevent potential harm or misuse, and providing human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of AI impact assessment in relation to feedback from individuals and communities?,"[' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']","The purpose of AI impact assessment in relation to feedback from individuals and communities is to collect, consider, prioritize, and integrate feedback regarding the potential individual and societal impacts related to AI risks. This process ensures that organizational policies and practices are in place to address these impacts effectively.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What principles are required for the design and use of trustworthy artificial intelligence in the federal government?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","The principles required for the design and use of trustworthy artificial intelligence in the federal government include: (a) lawful and respectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) safe, secure, and resilient; (e) understandable; (f) responsible and traceable; (g) regularly monitored; (h) transparent; and (i) accountable.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What organizational risk tolerances should be applied to the utilization of third-party GAI resources?,"[' \n42 \nMG-2.4-002 \nEstablish and maintain procedures for escalating GAI system incidents to the \norganizational risk management authority when speciļ¬c criteria for deactivation \nor disengagement is met for a particular context of use or for the GAI system as a \nwhole. \nInformation Security \nMG-2.4-003 \nEstablish and maintain procedures for the remediation of issues which trigger \nincident response processes for the use of a GAI system, and provide stakeholders \ntimelines associated with the remediation plan. \nInformation Security \n \nMG-2.4-004 Establish and regularly review speciļ¬c criteria that warrants the deactivation of \nGAI systems in accordance with set risk tolerances and appetites. \nInformation Security \n \nAI Actor Tasks: AI Deployment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 3.1: AI risks and beneļ¬ts from third-party resources are regularly monitored, and risk controls are applied and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.1-001 \nApply organizational risk tolerances and controls (e.g., acquisition and \nprocurement processes; assessing personnel credentials and qualiļ¬cations, \nperforming background checks; ļ¬ltering GAI input and outputs, grounding, ļ¬ne \ntuning, retrieval-augmented generation) to third-party GAI resources: Apply \norganizational risk tolerance to the utilization of third-party datasets and other \nGAI resources; Apply organizational risk tolerances to ļ¬ne-tuned third-party \nmodels; Apply organizational risk tolerance to existing third-party models \nadapted to a new domain; Reassess risk measurements after ļ¬ne-tuning third-\nparty GAI models. \nValue Chain and Component \nIntegration; Intellectual Property \nMG-3.1-002 \nTest GAI system value chain risks (e.g., data poisoning, malware, other software \nand hardware vulnerabilities; labor practices; data privacy and localization \ncompliance; geopolitical alignment). \nData Privacy; Information Security; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nMG-3.1-003 \nRe-assess model risks after ļ¬ne-tuning or retrieval-augmented generation \nimplementation and for any third-party GAI models deployed for applications \nand/or use cases that were not evaluated in initial testing. \nValue Chain and Component \nIntegration \nMG-3.1-004 \nTake reasonable measures to review training data for CBRN information, and \nintellectual property, and where appropriate, remove it. Implement reasonable \nmeasures to prevent, ļ¬‚ag, or take other action in response to outputs that \nreproduce particular training data (e.g., plagiarized, trademarked, patented, \nlicensed content or trade secret material). \nIntellectual Property; CBRN \nInformation or Capabilities \n']","Organizational risk tolerances that should be applied to the utilization of third-party GAI resources include applying risk tolerances to the utilization of third-party datasets and other GAI resources, fine-tuned third-party models, and existing third-party models adapted to a new domain. Additionally, it involves reassessing risk measurements after fine-tuning third-party GAI models.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 45, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do legal protections play in addressing algorithmic discrimination?,"[' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n']","The context mentions that algorithmic discrimination may violate legal protections depending on specific circumstances, indicating that legal protections play a role in addressing algorithmic discrimination.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What protections should be in place for data and inferences related to sensitive domains?,"[' \n \n \n \n \nSECTION TITLE\nDATA PRIVACY\nYou should be protected from abusive data practices via built-in protections and you \nshould have agency over how data about you is used. You should be protected from violations of \nprivacy through design choices that ensure such protections are included by default, including ensuring that \ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \nwhere it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \nin plain language, and give you agency over data collection and the specific context of use; current hard-to\xad\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the use of such surveillance \ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \nreporting that confirms your data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or access. \nNOTICE AND EXPLANATION\nYou should know that an automated system is being used and understand how and why it \ncontributes to outcomes that impact you. Designers, developers, and deployers of automated systems \nshould provide generally accessible plain language documentation including clear descriptions of the overall \nsystem functioning and the role automation plays, notice that such systems are in use, the individual or organiza\xad\ntion responsible for the system, and explanations of outcomes that are clear, timely, and accessible. Such notice \nshould be kept up-to-date and people impacted by the system should be notified of significant use case or key \nfunctionality changes. You should know how and why an outcome impacting you was determined by an \nautomated system, including when the automated system is not the sole input determining the outcome. \nAutomated systems should provide explanations that are technically valid, meaningful and useful to you and to \nany operators or others who need to understand the system, and calibrated to the level of risk based on the \ncontext. Reporting that includes summary information about these automated systems in plain language and \nassessments of the clarity and quality of the notice and explanations should be made public whenever possible. \n6\n', 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be \nappropriately and meaningfully given. Any consent requests should be brief, \nbe understandable in plain language, and give you agency over data collection \nand the specific context of use; current hard-to-understand no\xad\ntice-and-choice practices for broad uses of data should be changed. Enhanced \nprotections and restrictions for data and inferences related to sensitive do\xad\nmains, including health, work, education, criminal justice, and finance, and \nfor data pertaining to youth should put you first. In sensitive domains, your \ndata and related inferences should only be used for necessary functions, and \nyou should be protected by ethical review and use prohibitions. You and your \ncommunities should be free from unchecked surveillance; surveillance tech\xad\nnologies should be subject to heightened oversight that includes at least \npre-deployment assessment of their potential harms and scope limits to pro\xad\ntect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the \nuse of such surveillance technologies is likely to limit rights, opportunities, or \naccess. Whenever possible, you should have access to reporting that confirms \nyour data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or \naccess. \nDATA PRIVACY\n30\n']","Enhanced protections and restrictions for data and inferences related to sensitive domains, including health, work, education, criminal justice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and related inferences should only be used for necessary functions, and you should be protected by ethical review and use prohibitions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 5, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 29, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the potential consequences of using automated systems without protections against algorithmic discrimination?,"["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n""]","The potential consequences of using automated systems without protections against algorithmic discrimination include inequitable outcomes, wrongful and discriminatory arrests due to facial recognition technology, discriminatory hiring decisions informed by biased algorithms, and healthcare algorithms that may discount the severity of diseases in certain racial groups. These issues can lead to systemic biases being amplified and harm to underserved communities.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures should be taken to address confabulation in GAI system outputs?,"[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n']","To address confabulation in GAI system outputs, the following measures should be taken: review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities (MS-2.5-003), and avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments (MS-2.5-001).",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are some concerns related to data privacy in the context of sensitive domains?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nā€¢\nContinuous positive airway pressure machines gather data for medical purposes, such as diagnosing sleep\napnea, and send usage data to a patientā€™s insurance company, which may subsequently deny coverage for the\ndevice based on usage data. Patients were not aware that the data would be used in this way or monitored\nby anyone other than their doctor.70 \nā€¢\nA department store company used predictive analytics applied to collected consumer data to determine that a\nteenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her\nhouse, revealing to her father that she was pregnant.71\nā€¢\nSchool audio surveillance systems monitor student conversations to detect potential ""stress indicators"" as\na warning of potential violence.72 Online proctoring systems claim to detect if a student is cheating on an\nexam using biometric markers.73 These systems have the potential to limit student freedom to express a range\nof emotions at school and may inappropriately flag students with disabilities who need accommodations or\nuse screen readers or dictation software as cheating.74\nā€¢\nLocation data, acquired from a data broker, can be used to identify people who visit abortion clinics.75\nā€¢\nCompanies collect student data such as demographic information, free or reduced lunch status, whether\nthey\'ve used drugs, or whether they\'ve expressed interest in LGBTQI+ groups, and then use that data to \nforecast student success.76 Parents and education experts have expressed concern about collection of such\nsensitive data without express parental consent, the lack of transparency in how such data is being used, and\nthe potential for resulting discriminatory impacts.\nā€¢ Many employers transfer employee data to third party job verification services. This information is then used\nby potential future employers, banks, or landlords. In one case, a former employee alleged that a\ncompany supplied false data about her job title which resulted in a job offer being revoked.77\n37\n']","Concerns related to data privacy in sensitive domains include the lack of awareness among patients regarding the use of their medical data by insurance companies, the revelation of personal information (such as pregnancy) through targeted advertising, the monitoring of student conversations which may limit emotional expression and unfairly flag students with disabilities, the use of location data to identify individuals visiting abortion clinics, the collection of sensitive student data without parental consent, and the potential for discriminatory impacts from such data usage. Additionally, there are concerns about the accuracy of employee data transferred to third parties, which can affect job opportunities.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 36, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What factors should be considered when evaluating the risk-relevant capabilities of GAI?,"[' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n']","Factors to consider when evaluating the risk-relevant capabilities of GAI include abuses and impacts to information integrity, dependencies between GAI and other IT or data systems, harm to fundamental rights or public safety, presentation of obscene, objectionable, offensive, discriminatory, invalid or untruthful output, psychological impacts to humans (e.g., anthropomorphization, algorithmic aversion, emotional entanglement), possibility for malicious use, whether the system introduces significant new security vulnerabilities, anticipated system impact on some groups compared to others, and unreliable decision-making capabilities, validity, adaptability, and variability of GAI system performance over time.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What considerations should be taken into account when using automated systems in sensitive domains?,"[' \nSECTION TITLE\nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nYou should be able to opt out, where appropriate, and have access to a person who can quickly \nconsider and remedy problems you encounter. You should be able to opt out from automated systems in \nfavor of a human alternative, where appropriate. Appropriateness should be determined based on reasonable \nexpectations in a given context and with a focus on ensuring broad accessibility and protecting the public from \nespecially harmful impacts. In some cases, a human or other alternative may be required by law. You should have \naccess to timely human consideration and remedy by a fallback and escalation process if an automated system \nfails, it produces an error, or you would like to appeal or contest its impacts on you. Human consideration and \nfallback should be accessible, equitable, effective, maintained, accompanied by appropriate operator training, and \nshould not impose an unreasonable burden on the public. Automated systems with an intended use within sensi\xad\ntive domains, including, but not limited to, criminal justice, employment, education, and health, should additional\xad\nly be tailored to the purpose, provide meaningful access for oversight, include training for any people interacting \nwith the system, and incorporate human consideration for adverse or high-risk decisions. Reporting that includes \na description of these human governance processes and assessment of their timeliness, accessibility, outcomes, \nand effectiveness should be made public whenever possible. \nDefinitions for key terms in The Blueprint for an AI Bill of Rights can be found in Applying the Blueprint for an AI Bill of Rights. \nAccompanying analysis and tools for actualizing each principle can be found in the Technical Companion. \n7\n']","When using automated systems in sensitive domains, considerations should include tailoring the systems to their intended purpose, providing meaningful access for oversight, ensuring training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions. Additionally, there should be a focus on accessibility, equity, effectiveness, and the maintenance of these systems, along with public reporting on human governance processes and their outcomes.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 6, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What should be included in the summary reporting for automated systems?,"["" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTailored to the level of risk. An assessment should be done to determine the level of risk of the auto\xad\nmated system. In settings where the consequences are high as determined by a risk assessment, or extensive \noversight is expected (e.g., in criminal justice or some public sector settings), explanatory mechanisms should \nbe built into the system design so that the systemā€™s full behavior can be explained in advance (i.e., only fully \ntransparent models should be used), rather than as an after-the-decision interpretation. In other settings, the \nextent of explanation provided should be tailored to the risk level. \nValid. The explanation provided by a system should accurately reflect the factors and the influences that led \nto a particular decision, and should be meaningful for the particular customization based on purpose, target, \nand level of risk. While approximation and simplification may be necessary for the system to succeed based on \nthe explanatory purpose and target of the explanation, or to account for the risk of fraud or other concerns \nrelated to revealing decision-making information, such simplifications should be done in a scientifically \nsupportable way. Where appropriate based on the explanatory system, error ranges for the explanation should \nbe calculated and included in the explanation, with the choice of presentation of such information balanced \nwith usability and overall interface complexity concerns. \nDemonstrate protections for notice and explanation \nReporting. Summary reporting should document the determinations made based on the above consider\xad\nations, including: the responsible entities for accountability purposes; the goal and use cases for the system, \nidentified users, and impacted populations; the assessment of notice clarity and timeliness; the assessment of \nthe explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment \nof how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of \nrisk. Individualized profile information should be made readily available to the greatest extent possible that \nincludes explanations for any system impacts or inferences. Reporting should be provided in a clear plain \nlanguage and machine-readable manner. \n44\n""]","The summary reporting for automated systems should include: the responsible entities for accountability purposes; the goal and use cases for the system; identified users and impacted populations; the assessment of notice clarity and timeliness; the assessment of the explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment of how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of risk.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 43, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the key considerations for testing and deployment of automated systems to ensure their safety and effectiveness?,"[' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Key considerations for testing and deployment of automated systems to ensure their safety and effectiveness include extensive testing before deployment, following domain-specific best practices, considering the roles of human operators, mirroring real-world conditions during testing, comparing system performance with existing human-driven procedures, and identifying and mitigating potential risks proactively. Testing should include both automated and human-led testing, and decision possibilities should include the option of not deploying the system if performance does not meet standards.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of pre-deployment testing in the development of automated systems?,"[' \n \n \nSAFE AND EFFECTIVE SYSTEMS \nYou should be protected from unsafe or ineffective sys\xad\ntems. Automated systems should be developed with consultation \nfrom diverse communities, stakeholders, and domain experts to iden\xad\ntify concerns, risks, and potential impacts of the system. Systems \nshould undergo pre-deployment testing, risk identification and miti\xad\ngation, and ongoing monitoring that demonstrate they are safe and \neffective based on their intended use, mitigation of unsafe outcomes \nincluding those beyond the intended use, and adherence to do\xad\nmain-specific standards. Outcomes of these protective measures \nshould include the possibility of not deploying the system or remov\xad\ning a system from use. Automated systems should not be designed \nwith an intent or reasonably foreseeable possibility of endangering \nyour safety or the safety of your community. They should be designed \nto proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15\n']","The purpose of pre-deployment testing in the development of automated systems is to identify risks and potential impacts of the system, ensuring that it is safe and effective based on its intended use, and to mitigate unsafe outcomes, including those beyond the intended use.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 14, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of the AI Bill of Rights in relation to the Executive Order on trustworthy artificial intelligence?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","The Blueprint for an AI Bill of Rights is consistent with the Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the Federal Government, which requires federal agencies to adhere to nine principles when using AI.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are some examples of how data privacy principles aim to protect against identity theft?,"["" \n \n \n \nDATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAn insurer might collect data from a person's social media presence as part of deciding what life\ninsurance rates they should be offered.64\nā€¢\nA data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of\nthousands of people to potential identity theft. 65\nā€¢\nA local public housing authority installed a facial recognition system at the entrance to housing complexes to\nassist law enforcement with identifying individuals viewed via camera when police reports are filed, leading\nthe community, both those living in the housing complex and not, to have videos of them sent to the local\npolice department and made available for scanning by its facial recognition software.66\nā€¢\nCompanies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32\n""]","Examples of how data privacy principles aim to protect against identity theft include: a data broker harvesting large amounts of personal data and suffering a breach that exposes individuals to potential identity theft, and an insurer collecting data from a person's social media presence to determine life insurance rates, which could lead to misuse of personal information.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 31, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the concerns associated with unsafe diffusion in the context of AI-generated content?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n', ' \n57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence \nNorthcutt, C. et al. (2021) Pervasive Label Errors in Test Sets Destabilize Machine Learning Benchmarks. \narXiv. https://arxiv.org/pdf/2103.14749 \nOECD (2023) ""Advancing accountability in AI: Governing and managing risks throughout the lifecycle for \ntrustworthy AI"", OECD Digital Economy Papers, No. 349, OECD Publishing, Paris. \nhttps://doi.org/10.1787/2448f04b-en \nOECD (2024) ""Deļ¬ning AI incidents and related terms"" OECD Artiļ¬cial Intelligence Papers, No. 16, OECD \nPublishing, Paris. https://doi.org/10.1787/d1a8d965-en \nOpenAI (2023) GPT-4 System Card. https://cdn.openai.com/papers/gpt-4-system-card.pdf \nOpenAI (2024) GPT-4 Technical Report. https://arxiv.org/pdf/2303.08774 \nPadmakumar, V. et al. (2024) Does writing with language models reduce content diversity? ICLR. \nhttps://arxiv.org/pdf/2309.05196 \nPark, P. et. al. (2024) AI deception: A survey of examples, risks, and potential solutions. Patterns, 5(5). \narXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Diļ¬€usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Artiļ¬cial intelligence and biological misuse: Diļ¬€erentiating risks of language models \nand biological design tools. arXiv. https://arxiv.org/pdf/2306.13952 \n']",The answer to given question is not present in context,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 60, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures should entities take to maintain data quality in sensitive domains?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \xad\xad\xad\xad\xad\xad\nIn addition to the privacy expectations above for general non-sensitive data, any system collecting, using, shar-\ning, or storing sensitive data should meet the expectations below. Depending on the technological use case and \nbased on an ethical assessment, consent for sensitive data may need to be acquired from a guardian and/or child. \nProvide enhanced protections for data related to sensitive domains \nNecessary functions only. Sensitive data should only be used for functions strictly necessary for that \ndomain or for functions that are required for administrative reasons (e.g., school attendance records), unless \nconsent is acquired, if appropriate, and the additional expectations in this section are met. Consent for non-\nnecessary functions should be optional, i.e., should not be required, incentivized, or coerced in order to \nreceive opportunities or access to services. In cases where data is provided to an entity (e.g., health insurance \ncompany) in order to facilitate payment for such a need, that data should only be used for that purpose. \nEthical review and use prohibitions. Any use of sensitive data or decision process based in part on sensi-\ntive data that might limit rights, opportunities, or access, whether the decision is automated or not, should go \nthrough a thorough ethical review and monitoring, both in advance and by periodic review (e.g., via an indepen-\ndent ethics committee or similarly robust process). In some cases, this ethical review may determine that data \nshould not be used or shared for specific uses even with consent. Some novel uses of automated systems in this \ncontext, where the algorithm is dynamically developing and where the science behind the use case is not well \nestablished, may also count as human subject experimentation, and require special review under organizational \ncompliance bodies applying medical, scientific, and academic human subject experimentation ethics rules and \ngovernance procedures. \nData quality. In sensitive domains, entities should be especially careful to maintain the quality of data to \navoid adverse consequences arising from decision-making based on flawed or inaccurate data. Such care is \nnecessary in a fragmented, complex data ecosystem and for datasets that have limited access such as for fraud \nprevention and law enforcement. It should be not left solely to individuals to carry the burden of reviewing and \ncorrecting data. Entities should conduct regular, independent audits and take prompt corrective measures to \nmaintain accurate, timely, and complete data. \nLimit access to sensitive data and derived data. Sensitive data and derived data should not be sold, \nshared, or made public as part of data brokerage or other agreements. Sensitive data includes data that can be \nused to infer sensitive information; even systems that are not directly marketed as sensitive domain technologies \nare expected to keep sensitive data private. Access to such data should be limited based on necessity and based \non a principle of local control, such that those individuals closest to the data subject have more access while \nthose who are less proximate do not (e.g., a teacher has access to their studentsā€™ daily progress data while a \nsuperintendent does not). \nReporting. In addition to the reporting on data privacy (as listed above for non-sensitive data), entities devel-\noping technologies related to a sensitive domain and those collecting, using, storing, or sharing sensitive data \nshould, whenever appropriate, regularly provide public reports describing: any data security lapses or breaches \nthat resulted in sensitive data leaks; the number, type, and outcomes of ethical pre-reviews undertaken; a \ndescription of any data sold, shared, or made public, and how that data was assessed to determine it did not pres-\nent a sensitive data risk; and ongoing risk identification and management procedures, and any mitigation added \nbased on these procedures. Reporting should be provided in a clear and machine-readable manner. \n38\n']","Entities should be especially careful to maintain the quality of data in sensitive domains to avoid adverse consequences arising from decision-making based on flawed or inaccurate data. This includes conducting regular, independent audits and taking prompt corrective measures to maintain accurate, timely, and complete data.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 37, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of implementing a supplier risk assessment framework in evaluating third-party entities?,"[' \n21 \nGV-6.1-005 \nImplement a use-cased based supplier risk assessment framework to evaluate and \nmonitor third-party entitiesā€™ performance and adherence to content provenance \nstandards and technologies to detect anomalies and unauthorized changes; \nservices acquisition and value chain risk management; and legal compliance. \nData Privacy; Information \nIntegrity; Information Security; \nIntellectual Property; Value Chain \nand Component Integration \nGV-6.1-006 Include clauses in contracts which allow an organization to evaluate third-party \nGAI processes and standards. \nInformation Integrity \nGV-6.1-007 Inventory all third-party entities with access to organizational content and \nestablish approved GAI technology and service provider lists. \nValue Chain and Component \nIntegration \nGV-6.1-008 Maintain records of changes to content made by third parties to promote content \nprovenance, including sources, timestamps, metadata. \nInformation Integrity; Value Chain \nand Component Integration; \nIntellectual Property \nGV-6.1-009 \nUpdate and integrate due diligence processes for GAI acquisition and \nprocurement vendor assessments to include intellectual property, data privacy, \nsecurity, and other risks. For example, update processes to: Address solutions that \nmay rely on embedded GAI technologies; Address ongoing monitoring, \nassessments, and alerting, dynamic risk assessments, and real-time reporting \ntools for monitoring third-party GAI risks; Consider policy adjustments across GAI \nmodeling libraries, tools and APIs, ļ¬ne-tuned models, and embedded tools; \nAssess GAI vendors, open-source or proprietary GAI tools, or GAI service \nproviders against incident or vulnerability databases. \nData Privacy; Human-AI \nConļ¬guration; Information \nSecurity; Intellectual Property; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nGV-6.1-010 \nUpdate GAI acceptable use policies to address proprietary and open-source GAI \ntechnologies and data, and contractors, consultants, and other third-party \npersonnel. \nIntellectual Property; Value Chain \nand Component Integration \nAI Actor Tasks: Operation and Monitoring, Procurement, Third-party entities \n \nGOVERN 6.2: Contingency processes are in place to handle failures or incidents in third-party data or AI systems deemed to be \nhigh-risk. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.2-001 \nDocument GAI risks associated with system value chain to identify over-reliance \non third-party data and to identify fallbacks. \nValue Chain and Component \nIntegration \nGV-6.2-002 \nDocument incidents involving third-party GAI data and systems, including open-\ndata and open-source software. \nIntellectual Property; Value Chain \nand Component Integration \n']","The purpose of implementing a supplier risk assessment framework in evaluating third-party entities is to assess and monitor their performance and adherence to content provenance standards, detect anomalies and unauthorized changes, manage services acquisition and value chain risks, and ensure legal compliance.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 24, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes?,"[' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n']","The purpose of creating measurement error models for pre-deployment metrics in the context of TEVV processes is to demonstrate construct validity for each metric, ensuring that the metric effectively operationalizes the desired concept. This involves measuring or estimating and documenting biases or statistical variance in applied metrics or structured human feedback processes, while leveraging domain expertise when modeling complex societal constructs such as hateful content.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do legal protections play in addressing algorithmic discrimination?,"["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n"", ' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n']","The context mentions that algorithmic discrimination may violate legal protections, indicating that legal protections play a role in addressing algorithmic discrimination by providing a framework that designers, developers, and deployers of automated systems must adhere to in order to protect individuals and communities from unjustified different treatment based on various classifications.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the potential risks associated with the production and access to obscene and abusive content?,"[' \n5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a systemā€™s availability or the conļ¬dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across \nthe AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scientiļ¬c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational \nlikelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) ā€“ highly specialized AI systems trained on \nscientiļ¬c data that aid in chemical and biological design ā€“ may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \neļ¬ƒcacious, including for beneļ¬cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable \n']","The potential risks associated with the production and access to obscene and abusive content include eased production of and access to obscene, degrading, and/or abusive imagery, which can cause harm. This includes synthetic child sexual abuse material (CSAM) and nonconsensual intimate images (NCII) of adults.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 8, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What efforts is NIST making to ensure the development of safe and trustworthy AI?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","NIST is making efforts to ensure the development of safe and trustworthy AI by developing measurements, technology, tools, and standards that advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence. They have established the U.S. AI Safety Institute and the AI Safety Institute Consortium to build the necessary science for safe, secure, and trustworthy development and use of AI, in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What considerations are important for governing across the AI value chain in the context of generative AI?,"[' \n47 \nAppendix A. Primary GAI Considerations \nThe following primary considerations were derived as overarching themes from the GAI PWG \nconsultation process. These considerations (Governance, Pre-Deployment Testing, Content Provenance, \nand Incident Disclosure) are relevant for voluntary use by any organization designing, developing, and \nusing GAI and also inform the Actions to Manage GAI risks. Information included about the primary \nconsiderations is not exhaustive, but highlights the most relevant topics derived from the GAI PWG. \nAcknowledgments: These considerations could not have been surfaced without the helpful analysis and \ncontributions from the community and NIST staļ¬€ GAI PWG leads: George Awad, Luca Belli, Harold Booth, \nMat Heyman, Yooyoung Lee, Mark Pryzbocki, Reva Schwartz, Martin Stanley, and Kyra Yee. \nA.1. Governance \nA.1.1. Overview \nLike any other technology system, governance principles and techniques can be used to manage risks \nrelated to generative AI models, capabilities, and applications. Organizations may choose to apply their \nexisting risk tiering to GAI systems, or they may opt to revise or update AI system risk levels to address \nthese unique GAI risks. This section describes how organizational governance regimes may be re-\nevaluated and adjusted for GAI contexts. It also addresses third-party considerations for governing across \nthe AI value chain. \nA.1.2. Organizational Governance \nGAI opportunities, risks and long-term performance characteristics are typically less well-understood \nthan non-generative AI tools and may be perceived and acted upon by humans in ways that vary greatly. \nAccordingly, GAI may call for diļ¬€erent levels of oversight from AI Actors or diļ¬€erent human-AI \nconļ¬gurations in order to manage their risks eļ¬€ectively. Organizationsā€™ use of GAI systems may also \nwarrant additional human review, tracking and documentation, and greater management oversight. \nAI technology can produce varied outputs in multiple modalities and present many classes of user \ninterfaces. This leads to a broader set of AI Actors interacting with GAI systems for widely diļ¬€ering \napplications and contexts of use. These can include data labeling and preparation, development of GAI \nmodels, content moderation, code generation and review, text generation and editing, image and video \ngeneration, summarization, search, and chat. These activities can take place within organizational \nsettings or in the public domain. \nOrganizations can restrict AI applications that cause harm, exceed stated risk tolerances, or that conļ¬‚ict \nwith their tolerances or values. Governance tools and protocols that are applied to other types of AI \nsystems can be applied to GAI systems. These plans and actions include: \nā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance \n']","The important considerations for governing across the AI value chain in the context of generative AI include organizational governance, oversight levels, human-AI configurations, human review, tracking and documentation, and management oversight. Additionally, governance tools and protocols that apply to other types of AI systems can also be applied to generative AI systems, including accessibility, AI actor credentials, alignment to organizational values, auditing, change-management controls, commercial use, and data provenance.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 50, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the suggested actions to address confabulation in GAI systems?,"[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","The suggested actions to address confabulation in GAI systems include: 1) Avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments (MS-2.5-001). 2) Review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities (MS-2.5-003). 3) Evaluate GAI system performance in real-world scenarios to observe its behavior in practical environments and reveal issues that might not surface in controlled and optimized testing environments (MS-4.2-002).",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the implications of bias and discrimination in automated systems on the rights of the American public?,"[' \nSECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-\ndinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called \non people of conscience to act to preserve civil rightsā€”including the right to privacy, which he has called ā€œthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.ā€2\nTo advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanionā€”a handbook for anyone seeking to incorporate these protections into policy and practice, including \ndetailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3\n']","The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting and reproducing existing unwanted inequities. These outcomes can undermine civil rights and democratic values, which are foundational American principles.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 2, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What was the purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies?,"['APPENDIX\nSummaries of Additional Engagements: \nā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of\nartificial intelligence and other data-driven technologies in their lives.\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The\npurpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or\nplanned use; the domains in which these technologies are being used; the entities making use of them; current\nprinciples, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their\nuse or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below\nlisted organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium \nat Computing Research Association \nConnected Health Initiative \nConsumer Technology Association \nCourtney Radsch \nCoworker \nCyber Farm Labs \nData & Society Research Institute \nData for Black Lives \nData to Actionable Knowledge Lab \nat Harvard University \nDeloitte \nDev Technology Group \nDigital Therapeutics Alliance \nDigital Welfare State & Human \nRights Project and Center for \nHuman Rights and Global Justice at \nNew York University School of \nLaw, and Temple University \nInstitute for Law, Innovation & \nTechnology \nDignari \nDouglas Goddard \nEdgar Dworsky \nElectronic Frontier Foundation \nElectronic Privacy Information \nCenter, Center for Digital \nDemocracy, and Consumer \nFederation of America \nFaceTec \nFight for the Future \nGanesh Mani \nGeorgia Tech Research Institute \nGoogle \nHealth Information Technology \nResearch and Development \nInteragency Working Group \nHireVue \nHR Policy Association \nID.me \nIdentity and Data Sciences \nLaboratory at Science Applications \nInternational Corporation \nInformation Technology and \nInnovation Foundation \nInformation Technology Industry \nCouncil \nInnocence Project \nInstitute for Human-Centered \nArtificial Intelligence at Stanford \nUniversity \nIntegrated Justice Information \nSystems Institute \nInternational Association of Chiefs \nof Police \nInternational Biometrics + Identity \nAssociation \nInternational Business Machines \nCorporation \nInternational Committee of the Red \nCross \nInventionphysics \niProov \nJacob Boudreau \nJennifer K. Wagner, Dan Berger, \nMargaret Hu, and Sara Katsanis \nJonathan Barry-Blocker \nJoseph Turow \nJoy Buolamwini \nJoy Mack \nKaren Bureau \nLamont Gholston \nLawyersā€™ Committee for Civil \nRights Under Law \n60\n']","The purpose of the Request For Information (RFI) issued by OSTP regarding biometric technologies was to understand the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 59, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","The suggested action to address risks associated with intellectual property infringement in organizational GAI systems is to compile statistics on actual policy violations, take-down requests, and intellectual property infringement, and analyze transparency reports across demographic and language groups.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role does human-AI integration play in enhancing customer service?,"["" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nHealthcare ā€œnavigatorsā€ help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is ā€œan individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.ā€106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \nā€œtrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal \ncourts have found that such cure procedures are constitutionally required.110 \nBallot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot. \n52\n""]","Human-AI integration plays a key role in enhancing customer service by allowing companies to provide faster customer care through partially automated customer service platforms. These systems help answer customer questions and compile common problems for human agents to review, while maintaining human agents to respond to complicated requests. This integration is viewed as essential for successful customer service.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 51, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of the NIST AI Risk Management Framework?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","The purpose of the NIST AI Risk Management Framework is to help incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It aims to foster the development of innovative approaches to address characteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, robustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of harmful uses.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the different stages of the AI lifecycle where risks can arise?,"[' \n2 \nThis work was informed by public feedback and consultations with diverse stakeholder groups as part of NISTā€™s \nGenerative AI Public Working Group (GAI PWG). The GAI PWG was an open, transparent, and collaborative \nprocess, facilitated via a virtual workspace, to obtain multistakeholder input on GAI risk management and to \ninform NISTā€™s approach. \nThe focus of the GAI PWG was limited to four primary considerations relevant to GAI: Governance, Content \nProvenance, Pre-deployment Testing, and Incident Disclosure (further described in Appendix A). As such, the \nsuggested actions in this document primarily address these considerations. \nFuture revisions of this proļ¬le will include additional AI RMF subcategories, risks, and suggested actions based \non additional considerations of GAI as the space evolves and empirical evidence indicates additional risks. A \nglossary of terms pertinent to GAI risk management will be developed and hosted on NISTā€™s Trustworthy & \nResponsible AI Resource Center (AIRC), and added to The Language of Trustworthy AI: An In-Depth Glossary of \nTerms. \nThis document was also informed by public comments and consultations from several Requests for Information. \n \n2. \nOverview of Risks Unique to or Exacerbated by GAI \nIn the context of the AI RMF, risk refers to the composite measure of an eventā€™s probability (or \nlikelihood) of occurring and the magnitude or degree of the consequences of the corresponding event. \nSome risks can be assessed as likely to materialize in a given context, particularly those that have been \nempirically demonstrated in similar contexts. Other risks may be unlikely to materialize in a given \ncontext, or may be more speculative and therefore uncertain. \nAI risks can diļ¬€er from or intensify traditional software risks. Likewise, GAI can exacerbate existing AI \nrisks, and creates unique risks. GAI risks can vary along many dimensions: \nā€¢ \nStage of the AI lifecycle: Risks can arise during design, development, deployment, operation, \nand/or decommissioning. \nā€¢ \nScope: Risks may exist at individual model or system levels, at the application or implementation \nlevels (i.e., for a speciļ¬c use case), or at the ecosystem level ā€“ that is, beyond a single system or \norganizational context. Examples of the latter include the expansion of ā€œalgorithmic \nmonocultures,3ā€ resulting from repeated use of the same model, or impacts on access to \nopportunity, labor markets, and the creative economies.4 \nā€¢ \nSource of risk: Risks may emerge from factors related to the design, training, or operation of the \nGAI model itself, stemming in some cases from GAI model or system inputs, and in other cases, \nfrom GAI system outputs. Many GAI risks, however, originate from human behavior, including \n \n \n3 ā€œAlgorithmic monoculturesā€ refers to the phenomenon in which repeated use of the same model or algorithm in \nconsequential decision-making settings like employment and lending can result in increased susceptibility by \nsystems to correlated failures (like unexpected shocks), due to multiple actors relying on the same algorithm. \n4 Many studies have projected the impact of AI on the workforce and labor markets. Fewer studies have examined \nthe impact of GAI on the labor market, though some industry surveys indicate that that both employees and \nemployers are pondering this disruption. \n']","Risks can arise during the design, development, deployment, operation, and/or decommissioning stages of the AI lifecycle.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 5, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do technical protections play in the implementation of the Blueprint for an AI Bill of Rights?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n- \nUSING THIS TECHNICAL COMPANION\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the design, \nuse, and deployment of automated systems to protect the rights of the American public in the age of artificial \nintelligence. This technical companion considers each principle in the Blueprint for an AI Bill of Rights and \nprovides examples and concrete steps for communities, industry, governments, and others to take in order to \nbuild these protections into policy, practice, or the technological design process. \nTaken together, the technical protections and practices laid out in the Blueprint for an AI Bill of Rights can help \nguard the American public against many of the potential and actual harms identified by researchers, technolo\xad\ngists, advocates, journalists, policymakers, and communities in the United States and around the world. This \ntechnical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing \nmonitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have\nconfidence that their rights, opportunities, and access as well as their expectations about technologies are respected. \n3\nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE: \nThis section provides real-life examples of how these guiding principles can become reality, through laws, policies, and practices. \nIt describes practical technical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe examples provided are not critiques or endorsements, but rather are offered as illustrative cases to help \nprovide a concrete vision for actualizing the Blueprint for an AI Bill of Rights. Effectively implementing these \nprocesses require the cooperation of and collaboration among industry, civil society, researchers, policymakers, \ntechnologists, and the public. \n14\n']","Technical protections and practices laid out in the Blueprint for an AI Bill of Rights help guard the American public against many potential and actual harms associated with automated systems. They provide a framework for the design, use, and deployment of these systems to protect the rights of individuals, ensuring transparency and accountability in their operation.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 13, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What protections does the AI Bill of Rights provide against algorithmic discrimination?,"[' AI BILL OF RIGHTS\nFFECTIVE SYSTEMS\nineffective systems. Automated systems should be \ncommunities, stakeholders, and domain experts to identify \nSystems should undergo pre-deployment testing, risk \nthat demonstrate they are safe and effective based on \nincluding those beyond the intended use, and adherence to \nprotective measures should include the possibility of not \nAutomated systems should not be designed with an intent \nreasonably foreseeable possibility of endangering your safety or the safety of your community. They should \nstemming from unintended, yet foreseeable, uses or \n \n \n \n \n \n \n \nSECTION TITLE\nBLUEPRINT FOR AN\nSAFE AND E \nYou should be protected from unsafe or \ndeveloped with consultation from diverse \nconcerns, risks, and potential impacts of the system. \nidentification and mitigation, and ongoing monitoring \ntheir intended use, mitigation of unsafe outcomes \ndomain-specific standards. Outcomes of these \ndeploying the system or removing a system from use. \nor \nbe designed to proactively protect you from harms \nimpacts of automated systems. You should be protected from inappropriate or irrelevant data use in the \ndesign, development, and deployment of automated systems, and from the compounded harm of its reuse. \nIndependent evaluation and reporting that confirms that the system is safe and effective, including reporting of \nsteps taken to mitigate potential harms, should be performed and the results made public whenever possible. \nALGORITHMIC DISCRIMINATION PROTECTIONS\nYou should not face discrimination by algorithms and systems should be used and designed in \nan equitable way. Algorithmic discrimination occurs when automated systems contribute to unjustified \ndifferent treatment or impacts disfavoring people based on their race, color, ethnicity, sex (including \npregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other \nclassification protected by law. Depending on the specific circumstances, such algorithmic discrimination \nmay violate legal protections. Designers, developers, and deployers of automated systems should take \nproactive \nand \ncontinuous \nmeasures \nto \nprotect \nindividuals \nand \ncommunities \nfrom algorithmic \ndiscrimination and to use and design systems in an equitable way. This protection should include proactive \nequity assessments as part of the system design, use of representative data and protection against proxies \nfor demographic features, ensuring accessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent \nevaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5\n']","The AI Bill of Rights provides protections against algorithmic discrimination by ensuring that individuals should not face discrimination by algorithms. It mandates that systems should be designed and used in an equitable way, taking proactive and continuous measures to protect individuals and communities from algorithmic discrimination. This includes conducting proactive equity assessments, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing, and providing clear organizational oversight. Additionally, independent evaluation and reporting, including algorithmic impact assessments and disparity testing results, should be made public whenever possible to confirm these protections.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 4, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role does the 2023 Executive Order on Safe AI play in NIST's efforts to develop trustworthy artificial intelligence?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","The 2023 Executive Order on Safe, Secure, and Trustworthy AI plays a significant role in NIST's efforts by guiding the establishment of the U.S. AI Safety Institute and the AI Safety Institute Consortium, which are aimed at building the necessary science for the safe, secure, and trustworthy development and use of AI.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the importance of transparency in the context of watch lists used by predictive policing systems?,"[' \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nA predictive policing system claimed to identify individuals at greatest risk to commit or become the victim of\ngun violence (based on automated analysis of social ties to gang members, criminal histories, previous experi\xad\nences of gun violence, and other factors) and led to individuals being placed on a watch list with no\nexplanation or public transparency regarding how the system came to its conclusions.85 Both police and\nthe public deserve to understand why and how such a system is making these determinations.\nā€¢\nA system awarding benefits changed its criteria invisibly. Individuals were denied benefits due to data entry\nerrors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42\n']","Transparency is important in the context of watch lists used by predictive policing systems because both police and the public deserve to understand why and how the system makes its determinations. Without transparency, individuals may be placed on a watch list without explanation, leading to a lack of accountability and understanding of the system's conclusions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 41, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics?,"[' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n']","The purpose of establishing feedback processes for end users and impacted communities in AI system evaluation metrics is to allow these groups to report problems and appeal system outcomes, ensuring that the impact of AI-generated content on different social, economic, and cultural groups is assessed and understood.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are suggested to ensure information integrity in the context of AI systems?,"[' \n28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring \n \nMEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signiļ¬cant AI risks. The risks or trustworthiness characteristics that will not ā€“ or cannot ā€“ be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modiļ¬cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises \ninformed by representative AI Actors. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, oļ¬€ensive cyber, and CBRN, while \nmaintaining the modelsā€™ ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content \n']","Suggested measures to ensure information integrity in the context of AI systems include employing methods to trace the origin and modifications of digital content, integrating tools designed to analyze content provenance and detect data anomalies, verifying the authenticity of digital signatures, and identifying patterns associated with misinformation or manipulation. Additionally, it is recommended to disaggregate evaluation metrics by demographic factors to identify discrepancies in how content provenance mechanisms work across diverse populations.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 31, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the limitations of current pre-deployment testing approaches for GAI applications?,"[' \n49 \nearly lifecycle TEVV approaches are developed and matured for GAI, organizations may use \nrecommended ā€œpre-deployment testingā€ practices to measure performance, capabilities, limits, risks, \nand impacts. This section describes risk measurement and estimation as part of pre-deployment TEVV, \nand examines the state of play for pre-deployment testing methodologies. \nLimitations of Current Pre-deployment Test Approaches \nCurrently available pre-deployment TEVV processes used for GAI applications may be inadequate, non-\nsystematically applied, or fail to reļ¬‚ect or mismatched to deployment contexts. For example, the \nanecdotal testing of GAI system capabilities through video games or standardized tests designed for \nhumans (e.g., intelligence tests, professional licensing exams) does not guarantee GAI system validity or \nreliability in those domains. Similarly, jailbreaking or prompt engineering tests may not systematically \nassess validity or reliability risks. \nMeasurement gaps can arise from mismatches between laboratory and real-world settings. Current \ntesting approaches often remain focused on laboratory conditions or restricted to benchmark test \ndatasets and in silico techniques that may not extrapolate well toā€”or directly assess GAI impacts in real-\nworld conditions. For example, current measurement gaps for GAI make it diļ¬ƒcult to precisely estimate \nits potential ecosystem-level or longitudinal risks and related political, social, and economic impacts. \nGaps between benchmarks and real-world use of GAI systems may likely be exacerbated due to prompt \nsensitivity and broad heterogeneity of contexts of use. \nA.1.5. Structured Public Feedback \nStructured public feedback can be used to evaluate whether GAI systems are performing as intended \nand to calibrate and verify traditional measurement methods. Examples of structured feedback include, \nbut are not limited to: \nā€¢ \nParticipatory Engagement Methods: Methods used to solicit feedback from civil society groups, \naļ¬€ected communities, and users, including focus groups, small user studies, and surveys. \nā€¢ \nField Testing: Methods used to determine how people interact with, consume, use, and make \nsense of AI-generated information, and subsequent actions and eļ¬€ects, including UX, usability, \nand other structured, randomized experiments. \nā€¢ \nAI Red-teaming: A structured testing exercise used to probe an AI system to ļ¬nd ļ¬‚aws and \nvulnerabilities such as inaccurate, harmful, or discriminatory outputs, often in a controlled \nenvironment and in collaboration with system developers. \nInformation gathered from structured public feedback can inform design, implementation, deployment \napproval, maintenance, or decommissioning decisions. Results and insights gleaned from these exercises \ncan serve multiple purposes, including improving data quality and preprocessing, bolstering governance \ndecision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation. \n']","Current pre-deployment TEVV processes used for GAI applications may be inadequate, non-systematically applied, or fail to reflect or be mismatched to deployment contexts. Anecdotal testing of GAI system capabilities through video games or standardized tests designed for humans does not guarantee GAI system validity or reliability. Additionally, jailbreaking or prompt engineering tests may not systematically assess validity or reliability risks. Measurement gaps can arise from mismatches between laboratory and real-world settings, and current testing approaches often remain focused on laboratory conditions or restricted to benchmark test datasets that may not extrapolate well to real-world conditions.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 52, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are suggested to ensure effective human-AI configuration in the context of GAI systems?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",The suggested measures to ensure effective human-AI configuration in the context of GAI systems include documenting the instructions given to data annotators or AI red-teamers (MS-2.8-002) and verifying the adequacy of GAI system user instructions through user testing (MS-2.8-004).,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What issues does the automated sentiment analyzer address regarding bias in online statements?,"[' \n \n \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \xad\xad\xad\nā€¢\nAn automated sentiment analyzer, a tool often used by technology platforms to determine whether a state-\nment posted online expresses a positive or negative sentiment, was found to be biased against Jews and gay\npeople. For example, the analyzer marked the statement ā€œIā€™m a Jewā€ as representing a negative sentiment,\nwhile ā€œIā€™m a Christianā€ was identified as expressing a positive sentiment.36 This could lead to the\npreemptive blocking of social media comments such as: ā€œIā€™m gay.ā€ A related company with this bias concern\nhas made their data public to encourage researchers to help address the issue37 and has released reports\nidentifying and measuring this problem as well as detailing attempts to address it.38\nā€¢\nSearches for ā€œBlack girls,ā€ ā€œAsian girls,ā€ or ā€œLatina girlsā€ return predominantly39 sexualized content, rather\nthan role models, toys, or activities.40 Some search engines have been working to reduce the prevalence of\nthese results, but the problem remains.41\nā€¢\nAdvertisement delivery systems that predict who is most likely to click on a job advertisement end up deliv-\nering ads in ways that reinforce racial and gender stereotypes, such as overwhelmingly directing supermar-\nket cashier ads to women and jobs with taxi companies to primarily Black people.42\xad\nā€¢\nBody scanners, used by TSA at airport checkpoints, require the operator to select a ā€œmaleā€ or ā€œfemaleā€\nscanning setting based on the passengerā€™s sex, but the setting is chosen based on the operatorā€™s perception of\nthe passengerā€™s gender identity. These scanners are more likely to flag transgender travelers as requiring\nextra screening done by a person. Transgender travelers have described degrading experiences associated\nwith these extra screenings.43 TSA has recently announced plans to implement a gender-neutral algorithm44 \nwhile simultaneously enhancing the security effectiveness capabilities of the existing technology. \nā€¢\nThe National Disabled Law Students Association expressed concerns that individuals with disabilities were\nmore likely to be flagged as potentially suspicious by remote proctoring AI systems because of their disabili-\nty-specific access needs such as needing longer breaks or using screen readers or dictation software.45 \nā€¢\nAn algorithm designed to identify patients with high needs for healthcare systematically assigned lower\nscores (indicating that they were not as high need) to Black patients than to those of white patients, even\nwhen those patients had similar numbers of chronic conditions and other markers of health.46 In addition,\nhealthcare clinical algorithms that are used by physicians to guide clinical decisions may include\nsociodemographic variables that adjust or ā€œcorrectā€ the algorithmā€™s output on the basis of a patientā€™s race or\nethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections \n']","The automated sentiment analyzer addresses bias in online statements by identifying that it was found to be biased against Jews and gay people. For instance, it marked the statement 'Iā€™m a Jew' as negative while identifying 'Iā€™m a Christian' as positive. This bias could lead to the preemptive blocking of social media comments such as 'Iā€™m gay.'",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 24, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the expectations for automated systems regarding safety and effectiveness?,"[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n']","The expectations for automated systems regarding safety and effectiveness include the need for independent evaluation, where evaluators should have access to the system and associated data to perform evaluations. Additionally, entities responsible for automated systems should provide regularly-updated reports that cover an overview of the system, data used for training, risk management assessments, performance testing results, and ongoing monitoring procedures. These reports should be presented in plain language and a machine-readable format.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What criteria are used to measure AI system performance or assurance in deployment settings?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']",AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for conditions similar to deployment setting(s). Measures are documented.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What threat does automatic signature verification software pose to U.S. voters?,"[' \nENDNOTES\n96. National Science Foundation. NSF Program on Fairness in Artificial Intelligence in Collaboration\nwith Amazon (FAI). Accessed July 20, 2022.\nhttps://www.nsf.gov/pubs/2021/nsf21585/nsf21585.htm\n97. Kyle Wiggers. Automatic signature verification software threatens to disenfranchise U.S. voters.\nVentureBeat. Oct. 25, 2020.\nhttps://venturebeat.com/2020/10/25/automatic-signature-verification-software-threatens-to\xad\ndisenfranchise-u-s-voters/\n98. Ballotpedia. Cure period for absentee and mail-in ballots. Article retrieved Apr 18, 2022.\nhttps://ballotpedia.org/Cure_period_for_absentee_and_mail-in_ballots\n99. Larry Buchanan and Alicia Parlapiano. Two of these Mail Ballot Signatures are by the Same Person.\nWhich Ones? New York Times. Oct. 7, 2020.\nhttps://www.nytimes.com/interactive/2020/10/07/upshot/mail-voting-ballots-signature\xad\nmatching.html\n100. Rachel Orey and Owen Bacskai. The Low Down on Ballot Curing. Nov. 04, 2020.\nhttps://bipartisanpolicy.org/blog/the-low-down-on-ballot-curing/\n101. Andrew Kenney. \'I\'m shocked that they need to have a smartphone\': System for unemployment\nbenefits exposes digital divide. USA Today. May 2, 2021.\nhttps://www.usatoday.com/story/tech/news/2021/05/02/unemployment-benefits-system-leaving\xad\npeople-behind/4915248001/\n102. Allie Gross. UIA lawsuit shows how the state criminalizes the unemployed. Detroit Metro-Times.\nSep. 18, 2015.\nhttps://www.metrotimes.com/news/uia-lawsuit-shows-how-the-state-criminalizes-the\xad\nunemployed-2369412\n103. Maia Szalavitz. The Pain Was Unbearable. So Why Did Doctors Turn Her Away? Wired. Aug. 11,\n2021. https://www.wired.com/story/opioid-drug-addiction-algorithm-chronic-pain/\n104. Spencer Soper. Fired by Bot at Amazon: ""It\'s You Against the Machine"". Bloomberg, Jun. 28, 2021.\nhttps://www.bloomberg.com/news/features/2021-06-28/fired-by-bot-amazon-turns-to-machine\xad\nmanagers-and-workers-are-losing-out\n105. Definitions of ā€˜equityā€™ and ā€˜underserved communitiesā€™ can be found in the Definitions section of\nthis document as well as in Executive Order on Advancing Racial Equity and Support for Underserved\nCommunities Through the Federal Government:\nhttps://www.whitehouse.gov/briefing-room/presidential-actions/2021/01/20/executive-order\xad\nadvancing-racial-equity-and-support-for-underserved-communities-through-the-federal-government/\n106. HealthCare.gov. Navigator - HealthCare.gov Glossary. Accessed May 2, 2022.\nhttps://www.healthcare.gov/glossary/navigator/\n72\n']",Automatic signature verification software threatens to disenfranchise U.S. voters.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 71, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures are being taken to ensure equitable design in automated systems to protect against algorithmic discrimination?,"["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n""]","Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to changes or the prevention of product launches to avoid public harm. Federal government agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have created best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What factors should be considered to ensure information integrity in the context of GAI risk management?,"[' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n']","Factors to consider to ensure information integrity in the context of GAI risk management include abuses and impacts to information integrity, dependencies between GAI and other IT or data systems, harm to fundamental rights or public safety, presentation of obscene, objectionable, offensive, discriminatory, invalid or untruthful output, psychological impacts to humans, possibility for malicious use, introduction of significant new security vulnerabilities, anticipated system impact on some groups compared to others, and unreliable decision-making capabilities.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the reasons for implementing enhanced data protections in sensitive domains?,"[' \n \n \n \nDATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nSome domains, including health, employment, education, criminal justice, and personal finance, have long been \nsingled out as sensitive domains deserving of enhanced data protections. This is due to the intimate nature of these \ndomains as well as the inability of individuals to opt out of these domains in any meaningful way, and the \nhistorical discrimination that has often accompanied data knowledge.69 Domains understood by the public to be \nsensitive also change over time, including because of technological developments. Tracking and monitoring \ntechnologies, personal tracking devices, and our extensive data footprints are used and misused more than ever \nbefore; as such, the protections afforded by current legal guidelines may be inadequate. The American public \ndeserves assurances that data related to such sensitive domains is protected and used appropriately and only in \nnarrowly defined contexts with clear benefits to the individual and/or society. \nTo this end, automated systems that collect, use, share, or store data related to these sensitive domains should meet \nadditional expectations. Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined \nbelow); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or \nsensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, \ngeolocation data, data related to interaction with the criminal justice system, relationship history and legal status such \nas custody and divorce information, and home, work, or school environmental data); or have the reasonable potential \nto be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm \ndue to identity theft. Data and metadata generated by or about those who are not yet legal adults is also sensitive, even \nif not related to a sensitive domain. Such data includes, but is not limited to, numerical, text, image, audio, or video \ndata. ā€œSensitive domainsā€ are those in which activities being conducted can cause material harms, including signifi\xad\ncant adverse effects on human rights such as autonomy and dignity, as well as civil liberties and civil rights. Domains \nthat have historically been singled out as deserving of enhanced data protections or where such enhanced protections \nare reasonably expected by the public include, but are not limited to, health, family planning and care, employment, \neducation, criminal justice, and personal finance. In the context of this framework, such domains are considered \nsensitive whether or not the specifics of a system context would necessitate coverage under existing law, and domains \nand data that are considered sensitive are understood to change over time based on societal norms and context. \n36\n']","Enhanced data protections in sensitive domains are implemented due to the intimate nature of these domains, the inability of individuals to opt out meaningfully, and the historical discrimination that has often accompanied data knowledge. Additionally, the protections afforded by current legal guidelines may be inadequate given the misuse of tracking technologies and the extensive data footprints individuals leave behind. The American public deserves assurances that data related to sensitive domains is protected and used appropriately, only in narrowly defined contexts with clear benefits to individuals and society.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 35, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are some of the potential harms associated with automated systems?,"[' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nWhile technologies are being deployed to solve problems across a wide array of issues, our reliance on technology can \nalso lead to its use in situations where it has not yet been proven to workā€”either at all or within an acceptable range \nof error. In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. \nAutomated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informa\xad\ntion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposeful\xad\nly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \nor unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad\nvators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \nfrom unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consis\xad\ntently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harm\xad\nful outcomes. \nā€¢\nA proprietary model was developed to predict the likelihood of sepsis in hospitalized patients and was imple\xad\nmented at hundreds of hospitals around the country. An independent study showed that the model predictions\nunderperformed relative to the designerā€™s claims while also causing ā€˜alert fatigueā€™ by falsely alerting\nlikelihood of sepsis.6\nā€¢\nOn social media, Black people who quote and criticize racist messages have had their own speech silenced when\na platformā€™s automated moderation system failed to distinguish this ā€œcounter speechā€ (or other critique\nand journalism) from the original hateful messages to which such speech responded.7\nā€¢\nA device originally developed to help people track and find lost items has been used as a tool by stalkers to track\nvictimsā€™ locations in violation of their privacy and safety. The device manufacturer took steps after release to\nprotect people from unwanted tracking by alerting people on their phones when a device is found to be moving\nwith them over time and also by having the device make an occasional noise, but not all phones are able\nto receive the notification and the devices remain a safety concern due to their misuse.8 \nā€¢\nAn algorithm used to deploy police was found to repeatedly send police to neighborhoods they regularly visit,\neven if those neighborhoods were not the ones with the highest crime rates. These incorrect crime predictions\nwere the result of a feedback loop generated from the reuse of data from previous arrests and algorithm\npredictions.9\n16\n']","Some potential harms associated with automated systems include: reliance on unproven technologies that may not work as intended, causing substantial and unjustified harm; the use of historical data that can lead to irrelevant information affecting decision-making; technologies designed to violate safety, such as those facilitating stalking; unintended harms from intended or unintended uses; and issues like alert fatigue from false alerts, as seen in a sepsis prediction model. Additionally, automated moderation systems may fail to distinguish between counter-speech and hateful messages, silencing critics.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 15, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the significance of human-AI configuration in managing GAI risks and ensuring information integrity?,"[' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n']","The significance of human-AI configuration in managing GAI risks and ensuring information integrity lies in its role in evaluating content lineage and origin, adapting training programs for digital content transparency, developing certification programs for managing GAI risks, delineating human proficiency tests from GAI capabilities, and implementing systems to monitor and track outcomes of human-GAI configurations for future improvements. Involving end-users, practitioners, and operators in prototyping and testing activities is also crucial, especially in various scenarios including crisis situations or ethically sensitive contexts.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the key oversight functions involved in the GAI lifecycle?,"[' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']","The key oversight functions involved in the GAI lifecycle include senior leadership, legal, compliance, and internal evaluation.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of the AI Safety Institute established by NIST?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","The purpose of the AI Safety Institute established by NIST is to continue efforts to build the science necessary for safe, secure, and trustworthy development and use of artificial intelligence (AI), in alignment with the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of using structured feedback mechanisms in relation to AI-generated content?,"[' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']",The purpose of using structured feedback mechanisms in relation to AI-generated content is to solicit and capture user input about the content to detect subtle shifts in quality or alignment with community and societal values.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are suggested to mitigate risks related to harmful bias in generative AI systems?,"[' \n43 \nMG-3.1-005 Review various transparency artifacts (e.g., system cards and model cards) for \nthird-party models. \nInformation Integrity; Information \nSecurity; Value Chain and \nComponent Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, Third-party entities \n \nMANAGE 3.2: Pre-trained models which are used for development are monitored as part of AI system regular monitoring and \nmaintenance. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.2-001 \nApply explainable AI (XAI) techniques (e.g., analysis of embeddings, model \ncompression/distillation, gradient-based attributions, occlusion/term reduction, \ncounterfactual prompts, word clouds) as part of ongoing continuous \nimprovement processes to mitigate risks related to unexplainable GAI systems. \nHarmful Bias and Homogenization \nMG-3.2-002 \nDocument how pre-trained models have been adapted (e.g., ļ¬ne-tuned, or \nretrieval-augmented generation) for the speciļ¬c generative task, including any \ndata augmentations, parameter adjustments, or other modiļ¬cations. Access to \nun-tuned (baseline) models supports debugging the relative inļ¬‚uence of the pre-\ntrained weights compared to the ļ¬ne-tuned model weights or other system \nupdates. \nInformation Integrity; Data Privacy \nMG-3.2-003 \nDocument sources and types of training data and their origins, potential biases \npresent in the data related to the GAI application and its content provenance, \narchitecture, training process of the pre-trained model including information on \nhyperparameters, training duration, and any ļ¬ne-tuning or retrieval-augmented \ngeneration processes applied. \nInformation Integrity; Harmful Bias \nand Homogenization; Intellectual \nProperty \nMG-3.2-004 Evaluate user reported problematic content and integrate feedback into system \nupdates. \nHuman-AI Conļ¬guration, \nDangerous, Violent, or Hateful \nContent \nMG-3.2-005 \nImplement content ļ¬lters to prevent the generation of inappropriate, harmful, \nfalse, illegal, or violent content related to the GAI application, including for CSAM \nand NCII. These ļ¬lters can be rule-based or leverage additional machine learning \nmodels to ļ¬‚ag problematic inputs and outputs. \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMG-3.2-006 \nImplement real-time monitoring processes for analyzing generated content \nperformance and trustworthiness characteristics related to content provenance \nto identify deviations from the desired standards and trigger alerts for human \nintervention. \nInformation Integrity \n']","To mitigate risks related to harmful bias in generative AI systems, the suggested measures include applying explainable AI (XAI) techniques as part of ongoing continuous improvement processes, documenting how pre-trained models have been adapted for specific generative tasks, and documenting sources and types of training data along with potential biases present in the data.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 46, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the implications of bias and discrimination in automated systems on the rights of the American public?,"[' \nSECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-\ndinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called \non people of conscience to act to preserve civil rightsā€”including the right to privacy, which he has called ā€œthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.ā€2\nTo advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanionā€”a handbook for anyone seeking to incorporate these protections into policy and practice, including \ndetailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3\n']","The implications of bias and discrimination in automated systems on the rights of the American public include limiting opportunities, preventing access to critical resources or services, and reflecting or reproducing existing unwanted inequities. These outcomes can threaten people's opportunities, undermine their privacy, and lead to pervasive tracking of their activity, often without their knowledge or consent.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 2, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the main principles outlined in the AI Bill of Rights and how do they aim to protect the rights of the American public?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n- \nUSING THIS TECHNICAL COMPANION\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the design, \nuse, and deployment of automated systems to protect the rights of the American public in the age of artificial \nintelligence. This technical companion considers each principle in the Blueprint for an AI Bill of Rights and \nprovides examples and concrete steps for communities, industry, governments, and others to take in order to \nbuild these protections into policy, practice, or the technological design process. \nTaken together, the technical protections and practices laid out in the Blueprint for an AI Bill of Rights can help \nguard the American public against many of the potential and actual harms identified by researchers, technolo\xad\ngists, advocates, journalists, policymakers, and communities in the United States and around the world. This \ntechnical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing \nmonitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have\nconfidence that their rights, opportunities, and access as well as their expectations about technologies are respected. \n3\nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE: \nThis section provides real-life examples of how these guiding principles can become reality, through laws, policies, and practices. \nIt describes practical technical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe examples provided are not critiques or endorsements, but rather are offered as illustrative cases to help \nprovide a concrete vision for actualizing the Blueprint for an AI Bill of Rights. Effectively implementing these \nprocesses require the cooperation of and collaboration among industry, civil society, researchers, policymakers, \ntechnologists, and the public. \n14\n']","The main principles outlined in the AI Bill of Rights are not explicitly listed in the provided context. However, the context discusses the Blueprint for an AI Bill of Rights, which consists of five principles aimed at guiding the design, use, and deployment of automated systems to protect the rights of the American public. It emphasizes the importance of technical protections and practices to guard against potential harms and outlines expectations for automated systems, including transparency and reporting.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 13, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures are suggested to assess the environmental impact of AI model training and management activities?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","The suggested measures to assess the environmental impact of AI model training and management activities include: 1) Assessing safety to physical environments when deploying GAI systems, 2) Documenting anticipated environmental impacts of model development, maintenance, and deployment in product design decisions, 3) Measuring or estimating environmental impacts such as energy and water consumption for training, fine-tuning, and deploying models, and verifying trade-offs between resources used at inference time versus additional resources required at training time, and 4) Verifying the effectiveness of carbon capture or offset programs for GAI training and applications, while addressing green-washing concerns.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do fraud detection algorithms play in the adjudication of benefits and penalties?,"['APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54\n']",Fraud detection algorithms assist in the adjudication of benefits and penalties by analyzing information and matching records to support decision-makers.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 53, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role does community participation play in the design of technology for democratic values?,"["" \n \n \n \n \nAPPENDIX\nPanel 4: Artificial Intelligence and Democratic Values. This event examined challenges and opportunities in \nthe design of technology that can help support a democratic vision for AI. It included discussion of the \ntechnical aspects \nof \ndesigning \nnon-discriminatory \ntechnology, \nexplainable \nAI, \nhuman-computer \ninteraction with an emphasis on community participation, and privacy-aware design. \nWelcome:\nā€¢\nSorelle Friedler, Assistant Director for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nJ. Bob Alotta, Vice President for Global Programs, Mozilla Foundation\nā€¢\nNavrina Singh, Board Member, Mozilla Foundation\nModerator: Kathy Pham Evans, Deputy Chief Technology Officer for Product and Engineering, U.S \nFederal Trade Commission. \nPanelists: \nā€¢\nLiz Oā€™Sullivan, CEO, Parity AI\nā€¢\nTimnit Gebru, Independent Scholar\nā€¢\nJennifer Wortman Vaughan, Senior Principal Researcher, Microsoft Research, New York City\nā€¢\nPamela Wisniewski, Associate Professor of Computer Science, University of Central Florida; Director,\nSocio-technical Interaction Research (STIR) Lab\nā€¢\nSeny Kamara, Associate Professor of Computer Science, Brown University\nEach panelist individually emphasized the risks of using AI in high-stakes settings, including the potential for \nbiased data and discriminatory outcomes, opaque decision-making processes, and lack of public trust and \nunderstanding of the algorithmic systems. The interventions and key needs various panelists put forward as \nnecessary to the future design of critical AI systems included ongoing transparency, value sensitive and \nparticipatory design, explanations designed for relevant stakeholders, and public consultation. \nVarious \npanelists emphasized the importance of placing trust in people, not technologies, and in engaging with \nimpacted communities to understand the potential harms of technologies and build protection by design into \nfuture systems. \nPanel 5: Social Welfare and Development. This event explored current and emerging uses of technology to \nimplement or improve social welfare systems, social development programs, and other systems that can impact \nlife chances. \nWelcome:\nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nAnne-Marie Slaughter, CEO, New America\nModerator: Michele Evermore, Deputy Director for Policy, Office of Unemployment Insurance \nModernization, Office of the Secretary, Department of Labor \nPanelists:\nā€¢\nBlake Hall, CEO and Founder, ID.Me\nā€¢\nKarrie Karahalios, Professor of Computer Science, University of Illinois, Urbana-Champaign\nā€¢\nChristiaan van Veen, Director of Digital Welfare State and Human Rights Project, NYU School of Law's\nCenter for Human Rights and Global Justice\n58\n""]","Community participation plays a crucial role in the design of technology for democratic values by emphasizing human-computer interaction that involves the community, ensuring that the technology is non-discriminatory, explainable, and privacy-aware. Engaging with impacted communities helps to understand the potential harms of technologies and build protection by design into future systems.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 57, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the policies and procedures related to human-AI configuration in the oversight of AI systems?,"[' \n18 \nGOVERN 3.2: Policies and procedures are in place to deļ¬ne and diļ¬€erentiate roles and responsibilities for human-AI conļ¬gurations \nand oversight of AI systems. \nAction ID \nSuggested Action \nGAI Risks \nGV-3.2-001 \nPolicies are in place to bolster oversight of GAI systems with independent \nevaluations or assessments of GAI models or systems where the type and \nrobustness of evaluations are proportional to the identiļ¬ed risks. \nCBRN Information or Capabilities; \nHarmful Bias and Homogenization \nGV-3.2-002 \nConsider adjustment of organizational roles and components across lifecycle \nstages of large or complex GAI systems, including: Test and evaluation, validation, \nand red-teaming of GAI systems; GAI content moderation; GAI system \ndevelopment and engineering; Increased accessibility of GAI tools, interfaces, and \nsystems, Incident response and containment. \nHuman-AI Conļ¬guration; \nInformation Security; Harmful Bias \nand Homogenization \nGV-3.2-003 \nDeļ¬ne acceptable use policies for GAI interfaces, modalities, and human-AI \nconļ¬gurations (i.e., for chatbots and decision-making tasks), including criteria for \nthe kinds of queries GAI applications should refuse to respond to. \nHuman-AI Conļ¬guration \nGV-3.2-004 \nEstablish policies for user feedback mechanisms for GAI systems which include \nthorough instructions and any mechanisms for recourse. \nHuman-AI Conļ¬guration \nGV-3.2-005 \nEngage in threat modeling to anticipate potential risks from GAI systems. \nCBRN Information or Capabilities; \nInformation Security \nAI Actors: AI Design \n \nGOVERN 4.1: Organizational policies and practices are in place to foster a critical thinking and safety-ļ¬rst mindset in the design, \ndevelopment, deployment, and uses of AI systems to minimize potential negative impacts. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.1-001 \nEstablish policies and procedures that address continual improvement processes \nfor GAI risk measurement. Address general risks associated with a lack of \nexplainability and transparency in GAI systems by using ample documentation and \ntechniques such as: application of gradient-based attributions, occlusion/term \nreduction, counterfactual prompts and prompt engineering, and analysis of \nembeddings; Assess and update risk measurement approaches at regular \ncadences. \nConfabulation \nGV-4.1-002 \nEstablish policies, procedures, and processes detailing risk measurement in \ncontext of use with standardized measurement protocols and structured public \nfeedback exercises such as AI red-teaming or independent external evaluations. \nCBRN Information and Capability; \nValue Chain and Component \nIntegration \n']","Policies and procedures are in place to define and differentiate roles and responsibilities for human-AI configurations and oversight of AI systems. This includes establishing acceptable use policies for GAI interfaces, modalities, and human-AI configurations, as well as defining criteria for the kinds of queries GAI applications should refuse to respond to.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 21, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of the AI Risk Management Framework for Generative AI?,"[' \n1 \n1. \nIntroduction \nThis document is a cross-sectoral proļ¬le of and companion resource for the AI Risk Management \nFramework (AI RMF 1.0) for Generative AI,1 pursuant to President Bidenā€™s Executive Order (EO) 14110 on \nSafe, Secure, and Trustworthy Artiļ¬cial Intelligence.2 The AI RMF was released in January 2023, and is \nintended for voluntary use and to improve the ability of organizations to incorporate trustworthiness \nconsiderations into the design, development, use, and evaluation of AI products, services, and systems. \nA proļ¬le is an implementation of the AI RMF functions, categories, and subcategories for a speciļ¬c \nsetting, application, or technology ā€“ in this case, Generative AI (GAI) ā€“ based on the requirements, risk \ntolerance, and resources of the Framework user. AI RMF proļ¬les assist organizations in deciding how to \nbest manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory \nrequirements and best practices, and reļ¬‚ects risk management priorities. Consistent with other AI RMF \nproļ¬les, this proļ¬le oļ¬€ers insights into how risk can be managed across various stages of the AI lifecycle \nand for GAI as a technology. \nAs GAI covers risks of models or applications that can be used across use cases or sectors, this document \nis an AI RMF cross-sectoral proļ¬le. Cross-sectoral proļ¬les can be used to govern, map, measure, and \nmanage risks associated with activities or business processes common across sectors, such as the use of \nlarge language models (LLMs), cloud-based services, or acquisition. \nThis document deļ¬nes risks that are novel to or exacerbated by the use of GAI. After introducing and \ndescribing these risks, the document provides a set of suggested actions to help organizations govern, \nmap, measure, and manage these risks. \n \n \n1 EO 14110 deļ¬nes Generative AI as ā€œthe class of AI models that emulate the structure and characteristics of input \ndata in order to generate derived synthetic content. This can include images, videos, audio, text, and other digital \ncontent.ā€ While not all GAI is derived from foundation models, for purposes of this document, GAI generally refers \nto generative foundation models. The foundation model subcategory of ā€œdual-use foundation modelsā€ is deļ¬ned by \nEO 14110 as ā€œan AI model that is trained on broad data; generally uses self-supervision; contains at least tens of \nbillions of parameters; is applicable across a wide range of contexts.ā€ \n2 This proļ¬le was developed per Section 4.1(a)(i)(A) of EO 14110, which directs the Secretary of Commerce, acting \nthrough the Director of the National Institute of Standards and Technology (NIST), to develop a companion \nresource to the AI RMF, NIST AI 100ā€“1, for generative AI. \n']","The purpose of the AI Risk Management Framework (AI RMF) for Generative AI is to improve the ability of organizations to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems. It assists organizations in deciding how to best manage AI risks in alignment with their goals, legal/regulatory requirements, and best practices.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 4, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What does the term 'underserved communities' refer to in the context of the AI Bill of Rights?,"[' \n \n \nApplying The Blueprint for an AI Bill of Rights \nSENSITIVE DATA: Data and metadata are sensitive if they pertain to an individual in a sensitive domain \n(defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a \nsensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric \ndata, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship \nhistory and legal status such as custody and divorce information, and home, work, or school environmental \ndata); or have the reasonable potential to be used in ways that are likely to expose individuals to meaningful \nharm, such as a loss of privacy or financial harm due to identity theft. Data and metadata generated by or about \nthose who are not yet legal adults is also sensitive, even if not related to a sensitive domain. Such data includes, \nbut is not limited to, numerical, text, image, audio, or video data. \nSENSITIVE DOMAINS: ā€œSensitive domainsā€ are those in which activities being conducted can cause material \nharms, including significant adverse effects on human rights such as autonomy and dignity, as well as civil liber\xad\nties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections \nor where such enhanced protections are reasonably expected by the public include, but are not limited to, \nhealth, family planning and care, employment, education, criminal justice, and personal finance. In the context \nof this framework, such domains are considered sensitive whether or not the specifics of a system context \nwould necessitate coverage under existing law, and domains and data that are considered sensitive are under\xad\nstood to change over time based on societal norms and context. \nSURVEILLANCE TECHNOLOGY: ā€œSurveillance technologyā€ refers to products or services marketed for \nor that can be lawfully used to detect, monitor, intercept, collect, exploit, preserve, protect, transmit, and/or \nretain data, identifying information, or communications concerning individuals or groups. This framework \nlimits its focus to both government and commercial use of surveillance technologies when juxtaposed with \nreal-time or subsequent automated analysis and when such systems have a potential for meaningful impact \non individualsā€™ or communitiesā€™ rights, opportunities, or access. \nUNDERSERVED COMMUNITIES: The term ā€œunderserved communitiesā€ refers to communities that have \nbeen systematically denied a full opportunity to participate in aspects of economic, social, and civic life, as \nexemplified by the list in the preceding definition of ā€œequity.ā€ \n11\n']","The term 'underserved communities' refers to communities that have been systematically denied a full opportunity to participate in aspects of economic, social, and civic life.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 10, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the challenges associated with value chain and component integration in GAI systems?,"[' \n12 \nCSAM. Even when trained on ā€œcleanā€ data, increasingly capable GAI models can synthesize or produce \nsynthetic NCII and CSAM. Websites, mobile apps, and custom-built models that generate synthetic NCII \nhave moved from niche internet forums to mainstream, automated, and scaled online businesses. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Safe, Privacy Enhanced \n2.12. \nValue Chain and Component Integration \nGAI value chains involve many third-party components such as procured datasets, pre-trained models, \nand software libraries. These components might be improperly obtained or not properly vetted, leading \nto diminished transparency or accountability for downstream users. While this is a risk for traditional AI \nsystems and some other digital technologies, the risk is exacerbated for GAI due to the scale of the \ntraining data, which may be too large for humans to vet; the diļ¬ƒculty of training foundation models, \nwhich leads to extensive reuse of limited numbers of models; and the extent to which GAI may be \nintegrated into other devices and services. As GAI systems often involve many distinct third-party \ncomponents and data sources, it may be diļ¬ƒcult to attribute issues in a systemā€™s behavior to any one of \nthese sources. \nErrors in third-party GAI components can also have downstream impacts on accuracy and robustness. \nFor example, test datasets commonly used to benchmark or validate models can contain label errors. \nInaccuracies in these labels can impact the ā€œstabilityā€ or robustness of these benchmarks, which many \nGAI practitioners consider during the model selection process. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n3. \nSuggested Actions to Manage GAI Risks \nThe following suggested actions target risks unique to or exacerbated by GAI. \nIn addition to the suggested actions below, AI risk management activities and actions set forth in the AI \nRMF 1.0 and Playbook are already applicable for managing GAI risks. Organizations are encouraged to \napply the activities suggested in the AI RMF and its Playbook when managing the risk of GAI systems. \nImplementation of the suggested actions will vary depending on the type of risk, characteristics of GAI \nsystems, stage of the GAI lifecycle, and relevant AI actors involved. \nSuggested actions to manage GAI risks can be found in the tables below: \nā€¢ \nThe suggested actions are organized by relevant AI RMF subcategories to streamline these \nactivities alongside implementation of the AI RMF. \nā€¢ \nNot every subcategory of the AI RMF is included in this document.13 Suggested actions are \nlisted for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later. \n']","Challenges associated with value chain and component integration in GAI systems include the improper acquisition or vetting of third-party components such as datasets, pre-trained models, and software libraries, which can lead to diminished transparency and accountability. The scale of training data may be too large for humans to vet, and the difficulty of training foundation models can result in extensive reuse of a limited number of models. Additionally, it may be difficult to attribute issues in a system's behavior to any one of these sources, and errors in third-party GAI components can have downstream impacts on accuracy and robustness.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 15, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What should entities do to proactively identify and manage risks associated with collecting, using, sharing, or storing sensitive data?","[' \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTraditional terms of serviceā€”the block of text that the public is accustomed to clicking through when using a web\xad\nsite or digital appā€”are not an adequate mechanism for protecting privacy. The American public should be protect\xad\ned via built-in privacy protections, data minimization, use and collection limitations, and transparency, in addition \nto being entitled to clear mechanisms to control access to and use of their dataā€”including their metadataā€”in a \nproactive, informed, and ongoing way. Any automated system collecting, using, sharing, or storing personal data \nshould meet these expectations. \nProtect privacy by design and by default \nPrivacy by design and by default. Automated systems should be designed and built with privacy protect\xad\ned by default. Privacy risks should be assessed throughout the development life cycle, including privacy risks \nfrom reidentification, and appropriate technical and policy mitigation measures should be implemented. This \nincludes potential harms to those who are not users of the automated system, but who may be harmed by \ninferred data, purposeful privacy violations, or community surveillance or other community harms. Data \ncollection should be minimized and clearly communicated to the people whose data is collected. Data should \nonly be collected or used for the purposes of training or testing machine learning models if such collection and \nuse is legal and consistent with the expectations of the people whose data is collected. User experience \nresearch should be conducted to confirm that people understand what data is being collected about them and \nhow it will be used, and that this collection matches their expectations and desires. \nData collection and use-case scope limits. Data collection should be limited in scope, with specific, \nnarrow identified goals, to avoid ""mission creep."" Anticipated data collection should be determined to be \nstrictly necessary to the identified goals and should be minimized as much as possible. Data collected based on \nthese identified goals and for a specific context should not be used in a different context without assessing for \nnew privacy risks and implementing appropriate mitigation measures, which may include express consent. \nClear timelines for data retention should be established, with data deleted as soon as possible in accordance \nwith legal or policy-based limitations. Determined data retention timelines should be documented and justi\xad\nfied. \nRisk identification and mitigation. Entities that collect, use, share, or store sensitive data should \nattempt to proactively identify harms and seek to manage them so as to avoid, mitigate, and respond appropri\xad\nately to identified risks. Appropriate responses include determining not to process data when the privacy risks \noutweigh the benefits or implementing measures to mitigate acceptable risks. Appropriate responses do not \ninclude sharing or transferring the privacy risks to users via notice or consent requests where users could not \nreasonably be expected to understand the risks without further support. \nPrivacy-preserving security. Entities creating, using, or governing automated systems should follow \nprivacy and security best practices designed to ensure data and metadata do not leak beyond the specific \nconsented use case. Best practices could include using privacy-enhancing cryptography or other types of \nprivacy-enhancing technologies or fine-grained permissions and access control mechanisms, along with \nconventional system security protocols. \n33\n']","Entities that collect, use, share, or store sensitive data should attempt to proactively identify harms and seek to manage them to avoid, mitigate, and respond appropriately to identified risks. Appropriate responses include determining not to process data when the privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 32, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role do algorithmic harms play in shaping the principles of the Blueprint for an AI Bill of Rights?,"[' \n \n \nABOUT THIS FRAMEWORK\xad\xad\xad\xad\xad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizationsā€”from governments at all levels to companies of \nall sizesā€”to uphold these values. Experts from across the private sector, governments, and international \nconsortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policyā€”such as \nsector-specific privacy laws and oversight requirementsā€”do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the countryā€”from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal governmentā€”on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-\ning sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4\n']",The answer to given question is not present in context,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 3, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of reviewing transparency artifacts in the context of third-party models?,"[' \n43 \nMG-3.1-005 Review various transparency artifacts (e.g., system cards and model cards) for \nthird-party models. \nInformation Integrity; Information \nSecurity; Value Chain and \nComponent Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, Third-party entities \n \nMANAGE 3.2: Pre-trained models which are used for development are monitored as part of AI system regular monitoring and \nmaintenance. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.2-001 \nApply explainable AI (XAI) techniques (e.g., analysis of embeddings, model \ncompression/distillation, gradient-based attributions, occlusion/term reduction, \ncounterfactual prompts, word clouds) as part of ongoing continuous \nimprovement processes to mitigate risks related to unexplainable GAI systems. \nHarmful Bias and Homogenization \nMG-3.2-002 \nDocument how pre-trained models have been adapted (e.g., ļ¬ne-tuned, or \nretrieval-augmented generation) for the speciļ¬c generative task, including any \ndata augmentations, parameter adjustments, or other modiļ¬cations. Access to \nun-tuned (baseline) models supports debugging the relative inļ¬‚uence of the pre-\ntrained weights compared to the ļ¬ne-tuned model weights or other system \nupdates. \nInformation Integrity; Data Privacy \nMG-3.2-003 \nDocument sources and types of training data and their origins, potential biases \npresent in the data related to the GAI application and its content provenance, \narchitecture, training process of the pre-trained model including information on \nhyperparameters, training duration, and any ļ¬ne-tuning or retrieval-augmented \ngeneration processes applied. \nInformation Integrity; Harmful Bias \nand Homogenization; Intellectual \nProperty \nMG-3.2-004 Evaluate user reported problematic content and integrate feedback into system \nupdates. \nHuman-AI Conļ¬guration, \nDangerous, Violent, or Hateful \nContent \nMG-3.2-005 \nImplement content ļ¬lters to prevent the generation of inappropriate, harmful, \nfalse, illegal, or violent content related to the GAI application, including for CSAM \nand NCII. These ļ¬lters can be rule-based or leverage additional machine learning \nmodels to ļ¬‚ag problematic inputs and outputs. \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMG-3.2-006 \nImplement real-time monitoring processes for analyzing generated content \nperformance and trustworthiness characteristics related to content provenance \nto identify deviations from the desired standards and trigger alerts for human \nintervention. \nInformation Integrity \n']","The purpose of reviewing transparency artifacts in the context of third-party models is to ensure information integrity, security, and effective value chain and component integration.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 46, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What types of automated systems should be covered by the AI Bill of Rights?,"[' \n \n \n \n \n \n \n \n \nAPPENDIX\nExamples of Automated Systems \nThe below examples are meant to illustrate the breadth of automated systems that, insofar as they have the \npotential to meaningfully impact rights, opportunities, or access to critical resources or services, should \nbe covered by the Blueprint for an AI Bill of Rights. These examples should not be construed to limit that \nscope, which includes automated systems that may not yet exist, but which fall under these criteria. \nExamples of automated systems for which the Blueprint for an AI Bill of Rights should be considered include \nthose that have the potential to meaningfully impact: \nā€¢ Civil rights, civil liberties, or privacy, including but not limited to:\nSpeech-related systems such as automated content moderation tools; \nSurveillance and criminal justice system algorithms such as risk assessments, predictive \n policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems, \nprojections of student progress or outcomes, algorithms that determine access to resources or \n rograms, and surveillance of classes (whether online or in-person); \nHousing-related systems such as tenant screening algorithms, automated valuation systems that \n estimate the value of homes used in mortgage underwriting or home insurance, and automated \n valuations from online aggregator websites; and \nEmployment-related systems such as workplace algorithms that inform all aspects of the terms \n and conditions of employment including, but not limited to, pay or promotion, hiring or termina- \n tion algorithms, virtual or augmented reality workplace training programs, and electronic work \nplace surveillance and management systems. \nā€¢ Access to critical resources and services, including but not limited to:\nHealth and health insurance technologies such as medical AI systems and devices, AI-assisted \n diagnostic tools, algorithms or predictive models used to support clinical decision making, medical \n or insurance health risk assessments, drug addiction risk assessments and associated access alg \n-orithms, wearable technologies, wellness apps, insurance care allocation algorithms, and health\ninsurance cost and underwriting algorithms;\nFinancial system algorithms such as loan allocation algorithms, financial system access determi-\nnation algorithms, credit scoring systems, insurance algorithms including risk assessments, auto\n-mated interest rate determinations, and financial algorithms that apply penalties (e.g., that can\ngarnish wages or withhold tax returns);\n53\n']","The types of automated systems that should be covered by the AI Bill of Rights include those that have the potential to meaningfully impact civil rights, civil liberties, or privacy, equal opportunities, and access to critical resources and services. Examples include speech-related systems, surveillance and criminal justice algorithms, voting-related systems, education-related systems, housing-related systems, employment-related systems, health technologies, and financial system algorithms.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 52, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the significance of content provenance in managing risks associated with AI-generated synthetic content?,"[' \n51 \ngeneral public participants. For example, expert AI red-teamers could modify or verify the \nprompts written by general public AI red-teamers. These approaches may also expand coverage \nof the AI risk attack surface. \nā€¢ \nHuman / AI: Performed by GAI in combination with specialist or non-specialist human teams. \nGAI-led red-teaming can be more cost eļ¬€ective than human red-teamers alone. Human or GAI-\nled AI red-teaming may be better suited for eliciting diļ¬€erent types of harms. \n \nA.1.6. Content Provenance \nOverview \nGAI technologies can be leveraged for many applications such as content generation and synthetic data. \nSome aspects of GAI outputs, such as the production of deepfake content, can challenge our ability to \ndistinguish human-generated content from AI-generated synthetic content. To help manage and mitigate \nthese risks, digital transparency mechanisms like provenance data tracking can trace the origin and \nhistory of content. Provenance data tracking and synthetic content detection can help facilitate greater \ninformation access about both authentic and synthetic content to users, enabling better knowledge of \ntrustworthiness in AI systems. When combined with other organizational accountability mechanisms, \ndigital content transparency approaches can enable processes to trace negative outcomes back to their \nsource, improve information integrity, and uphold public trust. Provenance data tracking and synthetic \ncontent detection mechanisms provide information about the origin and history of content to assist in \nGAI risk management eļ¬€orts. \nProvenance metadata can include information about GAI model developers or creators of GAI content, \ndate/time of creation, location, modiļ¬cations, and sources. Metadata can be tracked for text, images, \nvideos, audio, and underlying datasets. The implementation of provenance data tracking techniques can \nhelp assess the authenticity, integrity, intellectual property rights, and potential manipulations in digital \ncontent. Some well-known techniques for provenance data tracking include digital watermarking, \nmetadata recording, digital ļ¬ngerprinting, and human authentication, among others. \nProvenance Data Tracking Approaches \nProvenance data tracking techniques for GAI systems can be used to track the history and origin of data \ninputs, metadata, and synthetic content. Provenance data tracking records the origin and history for \ndigital content, allowing its authenticity to be determined. It consists of techniques to record metadata \nas well as overt and covert digital watermarks on content. Data provenance refers to tracking the origin \nand history of input data through metadata and digital watermarking techniques. Provenance data \ntracking processes can include and assist AI Actors across the lifecycle who may not have full visibility or \ncontrol over the various trade-oļ¬€s and cascading impacts of early-stage model decisions on downstream \nperformance and synthetic outputs. For example, by selecting a watermarking model to prioritize \nrobustness (the durability of a watermark), an AI actor may inadvertently diminish computational \ncomplexity (the resources required to implement watermarking). Organizational risk management \neļ¬€orts for enhancing content provenance include: \nā€¢ \nTracking provenance of training data and metadata for GAI systems; \nā€¢ \nDocumenting provenance data limitations within GAI systems; \n']","Content provenance is significant in managing risks associated with AI-generated synthetic content as it involves digital transparency mechanisms like provenance data tracking, which can trace the origin and history of content. This helps in distinguishing human-generated content from AI-generated synthetic content, facilitating greater information access about both authentic and synthetic content. Provenance data tracking can assist in assessing authenticity, integrity, intellectual property rights, and potential manipulations in digital content, thereby improving information integrity and upholding public trust.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 54, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do legal protections play in addressing algorithmic discrimination?,"[' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n']","The context mentions that algorithmic discrimination may violate legal protections depending on specific circumstances, indicating that legal protections play a role in addressing algorithmic discrimination.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures should be taken to ensure that surveillance technologies do not infringe on privacy and civil liberties?,"[' \n \n \n \n \nSECTION TITLE\nDATA PRIVACY\nYou should be protected from abusive data practices via built-in protections and you \nshould have agency over how data about you is used. You should be protected from violations of \nprivacy through design choices that ensure such protections are included by default, including ensuring that \ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \nwhere it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \nin plain language, and give you agency over data collection and the specific context of use; current hard-to\xad\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the use of such surveillance \ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \nreporting that confirms your data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or access. \nNOTICE AND EXPLANATION\nYou should know that an automated system is being used and understand how and why it \ncontributes to outcomes that impact you. Designers, developers, and deployers of automated systems \nshould provide generally accessible plain language documentation including clear descriptions of the overall \nsystem functioning and the role automation plays, notice that such systems are in use, the individual or organiza\xad\ntion responsible for the system, and explanations of outcomes that are clear, timely, and accessible. Such notice \nshould be kept up-to-date and people impacted by the system should be notified of significant use case or key \nfunctionality changes. You should know how and why an outcome impacting you was determined by an \nautomated system, including when the automated system is not the sole input determining the outcome. \nAutomated systems should provide explanations that are technically valid, meaningful and useful to you and to \nany operators or others who need to understand the system, and calibrated to the level of risk based on the \ncontext. Reporting that includes summary information about these automated systems in plain language and \nassessments of the clarity and quality of the notice and explanations should be made public whenever possible. \n6\n']","Surveillance technologies should be subject to heightened oversight that includes at least pre-deployment assessment of their potential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring should not be used in education, work, housing, or in other contexts where the use of such surveillance technologies is likely to limit rights, opportunities, or access.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 5, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the requirements for employers regarding workplace surveillance during a labor dispute?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nDATA PRIVACY \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe Privacy Act of 1974 requires privacy protections for personal information in federal \nrecords systems, including limits on data retention, and also provides individuals a general \nright to access and correct their data. Among other things, the Privacy Act limits the storage of individual \ninformation in federal systems of records, illustrating the principle of limiting the scope of data retention. Under \nthe Privacy Act, federal agencies may only retain data about an individual that is ā€œrelevant and necessaryā€ to \naccomplish an agencyā€™s statutory purpose or to comply with an Executive Order of the President. The law allows \nfor individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\xad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Actā€™s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individualā€™s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individualā€™s ā€œqualifications, character, rights, ā€¦ \nopportunitiesā€¦, or benefits.ā€ \nNISTā€™s Privacy Framework provides a comprehensive, detailed and actionable approach for \norganizations to manage privacy risks. The NIST Framework gives organizations ways to identify and \ncommunicate their privacy risks and goals to support ethical decision-making in system, product, and service \ndesign or deployment, as well as the measures they are taking to demonstrate compliance with applicable laws \nor regulations. It has been voluntarily adopted by organizations across many different sectors around the world.78\nA school boardā€™s attempt to surveil public school studentsā€”undertaken without \nadequate community inputā€”sparked a state-wide biometrics moratorium.79 Reacting to a plan in \nthe city of Lockport, New York, the stateā€™s legislature banned the use of facial recognition systems and other \nā€œbiometric identifying technologyā€ in schools until July 1, 2022.80 The law additionally requires that a report on \nthe privacy, civil rights, and civil liberties implications of the use of such technologies be issued before \nbiometric identification technologies can be used in New York schools. \nFederal law requires employers, and any consultants they may retain, to report the costs \nof surveilling employees in the context of a labor dispute, providing a transparency \nmechanism to help protect worker organizing. Employers engaging in workplace surveillance ""where \nan object there-of, directly or indirectly, is [ā€¦] to obtain information concerning the activities of employees or a \nlabor organization in connection with a labor dispute"" must report expenditures relating to this surveillance to \nthe Department of Labor Office of Labor-Management Standards, and consultants who employers retain for \nthese purposes must also file reports regarding their activities.81\nPrivacy choices on smartphones show that when technologies are well designed, privacy \nand data agency can be meaningful and not overwhelming. These choicesā€”such as contextual, timely \nalerts about location trackingā€”are brief, direct, and use-specific. Many of the expectations listed here for \nprivacy by design and use-specific consent mirror those distributed to developers as best practices when \ndeveloping for smart phone devices,82 such as being transparent about how user data will be used, asking for app \npermissions during their use so that the use-context will be clear to users, and ensuring that the app will still \nwork if users deny (or later revoke) some permissions. \n39\n']","Federal law requires employers, and any consultants they may retain, to report the costs of surveilling employees in the context of a labor dispute. Employers engaging in workplace surveillance aimed at obtaining information concerning the activities of employees or a labor organization in connection with a labor dispute must report expenditures relating to this surveillance to the Department of Labor Office of Labor-Management Standards, and consultants who employers retain for these purposes must also file reports regarding their activities.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 38, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the importance of documenting roles and responsibilities related to managing AI risks within an organization?,"[' \n17 \nGOVERN 1.7: Processes and procedures are in place for decommissioning and phasing out AI systems safely and in a manner that \ndoes not increase risks or decrease the organizationā€™s trustworthiness. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.7-001 Protocols are put in place to ensure GAI systems are able to be deactivated when \nnecessary. \nInformation Security; Value Chain \nand Component Integration \nGV-1.7-002 \nConsider the following factors when decommissioning GAI systems: Data \nretention requirements; Data security, e.g., containment, protocols, Data leakage \nafter decommissioning; Dependencies between upstream, downstream, or other \ndata, internet of things (IOT) or AI systems; Use of open-source data or models; \nUsersā€™ emotional entanglement with GAI functions. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nGOVERN 2.1: Roles and responsibilities and lines of communication related to mapping, measuring, and managing AI risks are \ndocumented and are clear to individuals and teams throughout the organization. \nAction ID \nSuggested Action \nGAI Risks \nGV-2.1-001 \nEstablish organizational roles, policies, and procedures for communicating GAI \nincidents and performance to AI Actors and downstream stakeholders (including \nthose potentially impacted), via community or oļ¬ƒcial resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor). \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nGV-2.1-002 Establish procedures to engage teams for GAI system incident response with \ndiverse composition and responsibilities based on the particular incident type. \nHarmful Bias and Homogenization \nGV-2.1-003 Establish processes to verify the AI Actors conducting GAI incident response tasks \ndemonstrate and maintain the appropriate skills and training. \nHuman-AI Conļ¬guration \nGV-2.1-004 When systems may raise national security risks, involve national security \nprofessionals in mapping, measuring, and managing those risks. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Information Security \nGV-2.1-005 \nCreate mechanisms to provide protections for whistleblowers who report, based \non reasonable belief, when the organization violates relevant laws or poses a \nspeciļ¬c and empirically well-substantiated negative risk to public safety (or has \nalready caused harm). \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent \nAI Actor Tasks: Governance and Oversight \n \n']","The importance of documenting roles and responsibilities related to managing AI risks within an organization is to ensure that these roles and lines of communication are clear to individuals and teams throughout the organization. This clarity helps in mapping, measuring, and managing AI risks effectively.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 20, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the importance of assessing the proportion of synthetic to non-synthetic training data in AI model development?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","The importance of assessing the proportion of synthetic to non-synthetic training data in AI model development is to verify that the training data is not overly homogenous or generated by Generative AI (GAI), which helps mitigate concerns of model collapse.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the significance of technological diffusion in the context of integrating AI technologies within communities?,"["" \n \n \n \nAPPENDIX\nPanelists discussed the benefits of AI-enabled systems and their potential to build better and more \ninnovative infrastructure. They individually noted that while AI technologies may be new, the process of \ntechnological diffusion is not, and that it was critical to have thoughtful and responsible development and \nintegration of technology within communities. Some panelists suggested that the integration of technology \ncould benefit from examining how technological diffusion has worked in the realm of urban planning: \nlessons learned from successes and failures there include the importance of balancing ownership rights, use \nrights, and community health, safety and welfare, as well ensuring better representation of all voices, \nespecially those traditionally marginalized by technological advances. Some panelists also raised the issue of \npower structures ā€“ providing examples of how strong transparency requirements in smart city projects \nhelped to reshape power and give more voice to those lacking the financial or political power to effect change. \nIn discussion of technical and governance interventions that that are needed to protect against the harms \nof these technologies, various panelists emphasized the need for transparency, data collection, and \nflexible and reactive policy development, analogous to how software is continuously updated and deployed. \nSome panelists pointed out that companies need clear guidelines to have a consistent environment for \ninnovation, with principles and guardrails being the key to fostering responsible innovation. \nPanel 2: The Criminal Justice System. This event explored current and emergent uses of technology in \nthe criminal justice system and considered how they advance or undermine public safety, justice, and \ndemocratic values. \nWelcome: \nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nBen Winters, Counsel, Electronic Privacy Information Center\nModerator: Chiraag Bains, Deputy Assistant to the President on Racial Justice & Equity \nPanelists: \nā€¢\nSean Malinowski, Director of Policing Innovation and Reform, University of Chicago Crime Lab\nā€¢\nKristian Lum, Researcher\nā€¢\nJumana Musa, Director, Fourth Amendment Center, National Association of Criminal Defense Lawyers\nā€¢\nStanley Andrisse, Executive Director, From Prison Cells to PHD; Assistant Professor, Howard University\nCollege of Medicine\nā€¢\nMyaisha Hayes, Campaign Strategies Director, MediaJustice\nPanelists discussed uses of technology within the criminal justice system, including the use of predictive \npolicing, pretrial risk assessments, automated license plate readers, and prison communication tools. The \ndiscussion emphasized that communities deserve safety, and strategies need to be identified that lead to safety; \nsuch strategies might include data-driven approaches, but the focus on safety should be primary, and \ntechnology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, various panelists emphasized that transparency is important but is not enough to achieve \naccountability. Some panelists discussed their individual views on additional system needs for validity, and \nagreed upon the importance of advisory boards and compensated community input early in the design process \n(before the technology is built and instituted). Various panelists also emphasized the importance of regulation \nthat includes limits to the type and cost of such technologies. \n56\n"", "" \n \n \n \n \nAPPENDIX\nPanel 4: Artificial Intelligence and Democratic Values. This event examined challenges and opportunities in \nthe design of technology that can help support a democratic vision for AI. It included discussion of the \ntechnical aspects \nof \ndesigning \nnon-discriminatory \ntechnology, \nexplainable \nAI, \nhuman-computer \ninteraction with an emphasis on community participation, and privacy-aware design. \nWelcome:\nā€¢\nSorelle Friedler, Assistant Director for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nJ. Bob Alotta, Vice President for Global Programs, Mozilla Foundation\nā€¢\nNavrina Singh, Board Member, Mozilla Foundation\nModerator: Kathy Pham Evans, Deputy Chief Technology Officer for Product and Engineering, U.S \nFederal Trade Commission. \nPanelists: \nā€¢\nLiz Oā€™Sullivan, CEO, Parity AI\nā€¢\nTimnit Gebru, Independent Scholar\nā€¢\nJennifer Wortman Vaughan, Senior Principal Researcher, Microsoft Research, New York City\nā€¢\nPamela Wisniewski, Associate Professor of Computer Science, University of Central Florida; Director,\nSocio-technical Interaction Research (STIR) Lab\nā€¢\nSeny Kamara, Associate Professor of Computer Science, Brown University\nEach panelist individually emphasized the risks of using AI in high-stakes settings, including the potential for \nbiased data and discriminatory outcomes, opaque decision-making processes, and lack of public trust and \nunderstanding of the algorithmic systems. The interventions and key needs various panelists put forward as \nnecessary to the future design of critical AI systems included ongoing transparency, value sensitive and \nparticipatory design, explanations designed for relevant stakeholders, and public consultation. \nVarious \npanelists emphasized the importance of placing trust in people, not technologies, and in engaging with \nimpacted communities to understand the potential harms of technologies and build protection by design into \nfuture systems. \nPanel 5: Social Welfare and Development. This event explored current and emerging uses of technology to \nimplement or improve social welfare systems, social development programs, and other systems that can impact \nlife chances. \nWelcome:\nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nAnne-Marie Slaughter, CEO, New America\nModerator: Michele Evermore, Deputy Director for Policy, Office of Unemployment Insurance \nModernization, Office of the Secretary, Department of Labor \nPanelists:\nā€¢\nBlake Hall, CEO and Founder, ID.Me\nā€¢\nKarrie Karahalios, Professor of Computer Science, University of Illinois, Urbana-Champaign\nā€¢\nChristiaan van Veen, Director of Digital Welfare State and Human Rights Project, NYU School of Law's\nCenter for Human Rights and Global Justice\n58\n""]","Technological diffusion is significant in the context of integrating AI technologies within communities as it emphasizes the importance of thoughtful and responsible development and integration of technology. Panelists noted that examining how technological diffusion has worked in urban planning can provide lessons on balancing ownership rights, use rights, and community health, safety, and welfare, ensuring better representation of all voices, especially those traditionally marginalized by technological advances.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 55, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 57, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of ballot curing laws in the voting process?,"["" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nHealthcare ā€œnavigatorsā€ help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is ā€œan individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.ā€106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \nā€œtrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal \ncourts have found that such cure procedures are constitutionally required.110 \nBallot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot. \n52\n""]","Ballot curing laws are designed to allow voters to correct their ballot and have it counted in cases where a voter signature matching algorithm incorrectly flags their ballot as invalid or when there are other issues with their ballot. These laws ensure that voters have a fallback system to verify the validity of their ballot, which may include direct contact from election officials.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 51, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role does technology play in implementing or improving social welfare systems?,"["" \n \n \n \n \nAPPENDIX\nPanel 4: Artificial Intelligence and Democratic Values. This event examined challenges and opportunities in \nthe design of technology that can help support a democratic vision for AI. It included discussion of the \ntechnical aspects \nof \ndesigning \nnon-discriminatory \ntechnology, \nexplainable \nAI, \nhuman-computer \ninteraction with an emphasis on community participation, and privacy-aware design. \nWelcome:\nā€¢\nSorelle Friedler, Assistant Director for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nJ. Bob Alotta, Vice President for Global Programs, Mozilla Foundation\nā€¢\nNavrina Singh, Board Member, Mozilla Foundation\nModerator: Kathy Pham Evans, Deputy Chief Technology Officer for Product and Engineering, U.S \nFederal Trade Commission. \nPanelists: \nā€¢\nLiz Oā€™Sullivan, CEO, Parity AI\nā€¢\nTimnit Gebru, Independent Scholar\nā€¢\nJennifer Wortman Vaughan, Senior Principal Researcher, Microsoft Research, New York City\nā€¢\nPamela Wisniewski, Associate Professor of Computer Science, University of Central Florida; Director,\nSocio-technical Interaction Research (STIR) Lab\nā€¢\nSeny Kamara, Associate Professor of Computer Science, Brown University\nEach panelist individually emphasized the risks of using AI in high-stakes settings, including the potential for \nbiased data and discriminatory outcomes, opaque decision-making processes, and lack of public trust and \nunderstanding of the algorithmic systems. The interventions and key needs various panelists put forward as \nnecessary to the future design of critical AI systems included ongoing transparency, value sensitive and \nparticipatory design, explanations designed for relevant stakeholders, and public consultation. \nVarious \npanelists emphasized the importance of placing trust in people, not technologies, and in engaging with \nimpacted communities to understand the potential harms of technologies and build protection by design into \nfuture systems. \nPanel 5: Social Welfare and Development. This event explored current and emerging uses of technology to \nimplement or improve social welfare systems, social development programs, and other systems that can impact \nlife chances. \nWelcome:\nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nAnne-Marie Slaughter, CEO, New America\nModerator: Michele Evermore, Deputy Director for Policy, Office of Unemployment Insurance \nModernization, Office of the Secretary, Department of Labor \nPanelists:\nā€¢\nBlake Hall, CEO and Founder, ID.Me\nā€¢\nKarrie Karahalios, Professor of Computer Science, University of Illinois, Urbana-Champaign\nā€¢\nChristiaan van Veen, Director of Digital Welfare State and Human Rights Project, NYU School of Law's\nCenter for Human Rights and Global Justice\n58\n""]",The answer to given question is not present in context,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 57, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What actions are suggested to address risks associated with intellectual property infringement in organizational GAI systems?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","The suggested action to address risks associated with intellectual property infringement in organizational GAI systems is to compile statistics on actual policy violations, take-down requests, and intellectual property infringement, and analyze transparency reports across demographic and language groups.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What problems does AI-enabled nudification technology seek to address and protect against?,"[' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAI-enabled ā€œnudificationā€ technology that creates images where people appear to be nudeā€”including apps that\nenable non-technical users to create or alter images of individuals without their consentā€”has proliferated at an\nalarming rate. Such technology is becoming a common form of image-based abuse that disproportionately\nimpacts women. As these tools become more sophisticated, they are producing altered images that are increasing\xad\nly realistic and are difficult for both humans and AI to detect as inauthentic. Regardless of authenticity, the expe\xad\nrience of harm to victims of non-consensual intimate images can be devastatingly realā€”affecting their personal\nand professional lives, and impacting their mental and physical health.10\nā€¢\nA company installed AI-powered cameras in its delivery vans in order to evaluate the road safety habits of its driv\xad\ners, but the system incorrectly penalized drivers when other cars cut them off or when other events beyond\ntheir control took place on the road. As a result, drivers were incorrectly ineligible to receive a bonus.11\n17\n']","AI-enabled nudification technology seeks to address and protect against image-based abuse, particularly the creation of non-consensual intimate images that disproportionately impact women. It aims to combat the proliferation of apps that allow users to create or alter images of individuals without their consent, which can lead to devastating harm to victims.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 16, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What precautions should be taken when using derived data sources in automated systems?,"[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n']","Precautions that should be taken when using derived data sources in automated systems include careful tracking and validation of derived data, as it may be high-risk and could lead to feedback loops, compounded harm, or inaccurate results. Such data should be validated against the risk of collateral consequences.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are indirect prompt injection attacks and how do they exploit vulnerabilities in GAI-integrated applications?,"[' \n11 \nvalue chain (e.g., data inputs, processing, GAI training, or deployment environments), conventional \ncybersecurity practices may need to adapt or evolve. \nFor instance, prompt injection involves modifying what input is provided to a GAI system so that it \nbehaves in unintended ways. In direct prompt injections, attackers might craft malicious prompts and \ninput them directly to a GAI system, with a variety of downstream negative consequences to \ninterconnected systems. Indirect prompt injection attacks occur when adversaries remotely (i.e., without \na direct interface) exploit LLM-integrated applications by injecting prompts into data likely to be \nretrieved. Security researchers have already demonstrated how indirect prompt injections can exploit \nvulnerabilities by stealing proprietary data or running malicious code remotely on a machine. Merely \nquerying a closed production model can elicit previously undisclosed information about that model. \nAnother cybersecurity risk to GAI is data poisoning, in which an adversary compromises a training \ndataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts \nof the model could exacerbate risks associated with GAI system outputs. \nTrustworthy AI Characteristics: Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n2.10. \nIntellectual Property \nIntellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair \nuse under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI \noutputs displaying instances of training data memorization (see Data Privacy above) could infringe on \ncopyright. \nHow GAI relates to copyright, including the status of generated content that is similar to but does not \nstrictly copy work protected by copyright, is currently being debated in legal fora. Similar discussions are \ntaking place regarding the use or emulation of personal identity, likeness, or voice without permission. \nTrustworthy AI Characteristics: Accountable and Transparent, Fair with Harmful Bias Managed, Privacy \nEnhanced \n2.11. \nObscene, Degrading, and/or Abusive Content \nGAI can ease the production of and access to illegal non-consensual intimate imagery (NCII) of adults, \nand/or child sexual abuse material (CSAM). GAI-generated obscene, abusive or degrading content can \ncreate privacy, psychological and emotional, and even physical harms, and in some cases may be illegal. \nGenerated explicit or obscene AI content may include highly realistic ā€œdeepfakesā€ of real individuals, \nincluding children. The spread of this kind of material can have downstream negative consequences: in \nthe context of CSAM, even if the generated images do not resemble speciļ¬c individuals, the prevalence \nof such images can divert time and resources from eļ¬€orts to ļ¬nd real-world victims. Outside of CSAM, \nthe creation and spread of NCII disproportionately impacts women and sexual minorities, and can have \nsubsequent negative consequences including decline in overall mental health, substance abuse, and \neven suicidal thoughts. \nData used for training GAI models may unintentionally include CSAM and NCII. A recent report noted \nthat several commonly used GAI training datasets were found to contain hundreds of known images of \n']",Indirect prompt injection attacks occur when adversaries remotely exploit LLM-integrated applications by injecting prompts into data likely to be retrieved. These attacks can exploit vulnerabilities by stealing proprietary data or running malicious code remotely on a machine.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 14, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the significance of digital content transparency in relation to the societal impacts of AI?,"["" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","The significance of digital content transparency in relation to the societal impacts of AI lies in providing input for training materials about the capabilities and limitations of GAI systems. This transparency is crucial for AI actors, professionals, and the public to understand the societal impacts of AI and the role of diverse and inclusive content generation.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of engaging in threat modeling for GAI systems?,"[' \n18 \nGOVERN 3.2: Policies and procedures are in place to deļ¬ne and diļ¬€erentiate roles and responsibilities for human-AI conļ¬gurations \nand oversight of AI systems. \nAction ID \nSuggested Action \nGAI Risks \nGV-3.2-001 \nPolicies are in place to bolster oversight of GAI systems with independent \nevaluations or assessments of GAI models or systems where the type and \nrobustness of evaluations are proportional to the identiļ¬ed risks. \nCBRN Information or Capabilities; \nHarmful Bias and Homogenization \nGV-3.2-002 \nConsider adjustment of organizational roles and components across lifecycle \nstages of large or complex GAI systems, including: Test and evaluation, validation, \nand red-teaming of GAI systems; GAI content moderation; GAI system \ndevelopment and engineering; Increased accessibility of GAI tools, interfaces, and \nsystems, Incident response and containment. \nHuman-AI Conļ¬guration; \nInformation Security; Harmful Bias \nand Homogenization \nGV-3.2-003 \nDeļ¬ne acceptable use policies for GAI interfaces, modalities, and human-AI \nconļ¬gurations (i.e., for chatbots and decision-making tasks), including criteria for \nthe kinds of queries GAI applications should refuse to respond to. \nHuman-AI Conļ¬guration \nGV-3.2-004 \nEstablish policies for user feedback mechanisms for GAI systems which include \nthorough instructions and any mechanisms for recourse. \nHuman-AI Conļ¬guration \nGV-3.2-005 \nEngage in threat modeling to anticipate potential risks from GAI systems. \nCBRN Information or Capabilities; \nInformation Security \nAI Actors: AI Design \n \nGOVERN 4.1: Organizational policies and practices are in place to foster a critical thinking and safety-ļ¬rst mindset in the design, \ndevelopment, deployment, and uses of AI systems to minimize potential negative impacts. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.1-001 \nEstablish policies and procedures that address continual improvement processes \nfor GAI risk measurement. Address general risks associated with a lack of \nexplainability and transparency in GAI systems by using ample documentation and \ntechniques such as: application of gradient-based attributions, occlusion/term \nreduction, counterfactual prompts and prompt engineering, and analysis of \nembeddings; Assess and update risk measurement approaches at regular \ncadences. \nConfabulation \nGV-4.1-002 \nEstablish policies, procedures, and processes detailing risk measurement in \ncontext of use with standardized measurement protocols and structured public \nfeedback exercises such as AI red-teaming or independent external evaluations. \nCBRN Information and Capability; \nValue Chain and Component \nIntegration \n']",Engaging in threat modeling for GAI systems is intended to anticipate potential risks from these systems.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 21, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do GAI systems play in augmenting cybersecurity attacks?,"[' \n10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities. \nDisinformation and misinformation ā€“ both of which may be facilitated by GAI ā€“ may erode public trust in \ntrue or valid evidence and information, with downstream eļ¬€ects. For example, a synthetic image of a \nPentagon blast went viral and brieļ¬‚y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system \nand the integrity and (when applicable) the conļ¬dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speciļ¬c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published. \n']","GAI systems may augment cybersecurity attacks by advancing offensive cyber capabilities such as hacking, malware, and phishing. Reports indicate that large language models (LLMs) can discover vulnerabilities in systems and write code to exploit them. Sophisticated threat actors might develop GAI-powered security co-pilots to inform attackers on how to evade threat detection and escalate privileges after gaining system access.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 13, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role does user consent play in the collection and use of personal data?,"['You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be \nappropriately and meaningfully given. Any consent requests should be brief, \nbe understandable in plain language, and give you agency over data collection \nand the specific context of use; current hard-to-understand no\xad\ntice-and-choice practices for broad uses of data should be changed. Enhanced \nprotections and restrictions for data and inferences related to sensitive do\xad\nmains, including health, work, education, criminal justice, and finance, and \nfor data pertaining to youth should put you first. In sensitive domains, your \ndata and related inferences should only be used for necessary functions, and \nyou should be protected by ethical review and use prohibitions. You and your \ncommunities should be free from unchecked surveillance; surveillance tech\xad\nnologies should be subject to heightened oversight that includes at least \npre-deployment assessment of their potential harms and scope limits to pro\xad\ntect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the \nuse of such surveillance technologies is likely to limit rights, opportunities, or \naccess. Whenever possible, you should have access to reporting that confirms \nyour data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or \naccess. \nDATA PRIVACY\n30\n']","User consent plays a crucial role in the collection and use of personal data, as it should only be used to justify data collection in cases where it can be appropriately and meaningfully given. Consent requests should be brief, understandable in plain language, and provide individuals with agency over data collection and its specific context of use.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 29, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role do algorithmic impact assessments play in the expectations for automated systems?,"["" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably clear, timely, understandable, and accessible notice of use, and \nexplanations as to how and why a decision was made or an action was taken by the system. These expectations are \nexplained below. \nProvide clear, timely, understandable, and accessible notice of use and explanations \xad\nGenerally accessible plain language documentation. The entity responsible for using the automated \nsystem should ensure that documentation describing the overall system (including any human components) is \npublic and easy to find. The documentation should describe, in plain language, how the system works and how \nany automated component is used to determine an action or decision. It should also include expectations about \nreporting described throughout this framework, such as the algorithmic impact assessments described as \npart of Algorithmic Discrimination Protections. \nAccountable. Notices should clearly identify the entity responsible for designing each component of the \nsystem and the entity using it. \nTimely and up-to-date. Users should receive notice of the use of automated systems in advance of using or \nwhile being impacted by the technology. An explanation should be available with the decision itself, or soon \nthereafter. Notice should be kept up-to-date and people impacted by the system should be notified of use case \nor key functionality changes. \nBrief and clear. Notices and explanations should be assessed, such as by research on usersā€™ experiences, \nincluding user testing, to ensure that the people using or impacted by the automated system are able to easily \nfind notices and explanations, read them quickly, and understand and act on them. This includes ensuring that \nnotices and explanations are accessible to users with disabilities and are available in the language(s) and read-\ning level appropriate for the audience. Notices and explanations may need to be available in multiple forms, \n(e.g., on paper, on a physical sign, or online), in order to meet these expectations and to be accessible to the \nAmerican public. \nProvide explanations as to how and why a decision was made or an action was taken by an \nautomated system \nTailored to the purpose. Explanations should be tailored to the specific purpose for which the user is \nexpected to use the explanation, and should clearly state that purpose. An informational explanation might \ndiffer from an explanation provided to allow for the possibility of recourse, an appeal, or one provided in the \ncontext of a dispute or contestation process. For the purposes of this framework, 'explanation' should be \nconstrued broadly. An explanation need not be a plain-language statement about causality but could consist of \nany mechanism that allows the recipient to build the necessary understanding and intuitions to achieve the \nstated purpose. Tailoring should be assessed (e.g., via user experience research). \nTailored to the target of the explanation. Explanations should be targeted to specific audiences and \nclearly state that audience. An explanation provided to the subject of a decision might differ from one provided \nto an advocate, or to a domain expert or decision maker. Tailoring should be assessed (e.g., via user experience \nresearch). \n43\n""]",The answer to given question is not present in context,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 42, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of establishing transparency policies for GAI applications?,"[' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n']","The purpose of establishing transparency policies for GAI applications is to document the origin and history of training data and generated data, which advances digital content transparency while balancing the proprietary nature of training approaches.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of the NIST AI Risk Management Framework?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","The purpose of the NIST AI Risk Management Framework is to help manage risks posed to individuals, organizations, and society by AI. It aims to incorporate trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What issues related to bias and discrimination are associated with the use of automated systems in decision-making?,"[' \nSECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-\ndinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called \non people of conscience to act to preserve civil rightsā€”including the right to privacy, which he has called ā€œthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.ā€2\nTo advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanionā€”a handbook for anyone seeking to incorporate these protections into policy and practice, including \ndetailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3\n']","Automated systems in decision-making have been associated with issues such as reflecting and reproducing existing unwanted inequities, embedding new harmful bias and discrimination, and being unsafe or ineffective in areas like patient care, hiring, and credit decisions.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 2, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the importance of pre-deployment testing in the AI lifecycle?,"[' \n48 \nā€¢ Data protection \nā€¢ Data retention \nā€¢ Consistency in use of deļ¬ning key terms \nā€¢ Decommissioning \nā€¢ Discouraging anonymous use \nā€¢ Education \nā€¢ Impact assessments \nā€¢ Incident response \nā€¢ Monitoring \nā€¢ Opt-outs \nā€¢ Risk-based controls \nā€¢ Risk mapping and measurement \nā€¢ Science-backed TEVV practices \nā€¢ Secure software development practices \nā€¢ Stakeholder engagement \nā€¢ Synthetic content detection and \nlabeling tools and techniques \nā€¢ Whistleblower protections \nā€¢ Workforce diversity and \ninterdisciplinary teams\nEstablishing acceptable use policies and guidance for the use of GAI in formal human-AI teaming settings \nas well as diļ¬€erent levels of human-AI conļ¬gurations can help to decrease risks arising from misuse, \nabuse, inappropriate repurpose, and misalignment between systems and users. These practices are just \none example of adapting existing governance protocols for GAI contexts. \nA.1.3. Third-Party Considerations \nOrganizations may seek to acquire, embed, incorporate, or use open-source or proprietary third-party \nGAI models, systems, or generated data for various applications across an enterprise. Use of these GAI \ntools and inputs has implications for all functions of the organization ā€“ including but not limited to \nacquisition, human resources, legal, compliance, and IT services ā€“ regardless of whether they are carried \nout by employees or third parties. Many of the actions cited above are relevant and options for \naddressing third-party considerations. \nThird party GAI integrations may give rise to increased intellectual property, data privacy, or information \nsecurity risks, pointing to the need for clear guidelines for transparency and risk management regarding \nthe collection and use of third-party data for model inputs. Organizations may consider varying risk \ncontrols for foundation models, ļ¬ne-tuned models, and embedded tools, enhanced processes for \ninteracting with external GAI technologies or service providers. Organizations can apply standard or \nexisting risk controls and processes to proprietary or open-source GAI technologies, data, and third-party \nservice providers, including acquisition and procurement due diligence, requests for software bills of \nmaterials (SBOMs), application of service level agreements (SLAs), and statement on standards for \nattestation engagement (SSAE) reports to help with third-party transparency and risk management for \nGAI systems. \nA.1.4. Pre-Deployment Testing \nOverview \nThe diverse ways and contexts in which GAI systems may be developed, used, and repurposed \ncomplicates risk mapping and pre-deployment measurement eļ¬€orts. Robust test, evaluation, validation, \nand veriļ¬cation (TEVV) processes can be iteratively applied ā€“ and documented ā€“ in early stages of the AI \nlifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous \n']","The importance of pre-deployment testing in the AI lifecycle lies in its ability to complicate risk mapping and pre-deployment measurement efforts due to the diverse ways and contexts in which GAI systems may be developed, used, and repurposed. Robust test, evaluation, validation, and verification (TEVV) processes can be iteratively applied and documented in the early stages of the AI lifecycle, ensuring that the systems are properly assessed before deployment.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 51, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What role do civil liberties play in the context of surveillance systems?,"[' \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nProtect the public from unchecked surveillance \nHeightened oversight of surveillance. Surveillance or monitoring systems should be subject to \nheightened oversight that includes at a minimum assessment of potential harms during design (before deploy\xad\nment) and in an ongoing manner, to ensure that the American publicā€™s rights, opportunities, and access are \nprotected. This assessment should be done before deployment and should give special attention to ensure \nthere is not algorithmic discrimination, especially based on community membership, when deployed in a \nspecific real-world context. Such assessment should then be reaffirmed in an ongoing manner as long as the \nsystem is in use. \nLimited and proportionate surveillance. Surveillance should be avoided unless it is strictly necessary \nto achieve a legitimate purpose and it is proportionate to the need. Designers, developers, and deployers of \nsurveillance systems should use the least invasive means of monitoring available and restrict monitoring to the \nminimum number of subjects possible. To the greatest extent possible consistent with law enforcement and \nnational security needs, individuals subject to monitoring should be provided with clear and specific notice \nbefore it occurs and be informed about how the data gathered through surveillance will be used. \nScope limits on surveillance to protect rights and democratic values. Civil liberties and civil \nrights must not be limited by the threat of surveillance or harassment facilitated or aided by an automated \nsystem. Surveillance systems should not be used to monitor the exercise of democratic rights, such as voting, \nprivacy, peaceful assembly, speech, or association, in a way that limits the exercise of civil rights or civil liber\xad\nties. Information about or algorithmically-determined assumptions related to identity should be carefully \nlimited if used to target or guide surveillance systems in order to avoid algorithmic discrimination; such iden\xad\ntity-related information includes group characteristics or affiliations, geographic designations, location-based \nand association-based inferences, social networks, and biometrics. Continuous surveillance and monitoring \nsystems should not be used in physical or digital workplaces (regardless of employment status), public educa\xad\ntional institutions, and public accommodations. Continuous surveillance and monitoring systems should not \nbe used in a way that has the effect of limiting access to critical resources or services or suppressing the exer\xad\ncise of rights, even where the organization is not under a particular duty to protect those rights. \nProvide the public with mechanisms for appropriate and meaningful consent, access, and \ncontrol over their data \nUse-specific consent. Consent practices should not allow for abusive surveillance practices. Where data \ncollectors or automated systems seek consent, they should seek it for specific, narrow use contexts, for specif\xad\nic time durations, and for use by specific entities. Consent should not extend if any of these conditions change; \nconsent should be re-acquired before using data if the use case changes, a time limit elapses, or data is trans\xad\nferred to another entity (including being shared or sold). Consent requested should be limited in scope and \nshould not request consent beyond what is required. Refusal to provide consent should be allowed, without \nadverse effects, to the greatest extent possible based on the needs of the use case. \nBrief and direct consent requests. When seeking consent from users short, plain language consent \nrequests should be used so that users understand for what use contexts, time span, and entities they are \nproviding data and metadata consent. User experience research should be performed to ensure these consent \nrequests meet performance standards for readability and comprehension. This includes ensuring that consent \nrequests are accessible to users with disabilities and are available in the language(s) and reading level appro\xad\npriate for the audience. User experience design choices that intentionally obfuscate or manipulate user \nchoice (i.e., ā€œdark patternsā€) should be not be used. \n34\n']","Civil liberties play a crucial role in the context of surveillance systems by ensuring that civil rights are not limited by the threat of surveillance or harassment facilitated by automated systems. Surveillance systems should not monitor the exercise of democratic rights, such as voting, privacy, peaceful assembly, speech, or association, in a way that restricts these civil liberties. Additionally, information related to identity should be carefully limited to avoid algorithmic discrimination, and continuous surveillance should not be used in ways that suppress the exercise of rights.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 33, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures are suggested to assess the environmental impact of AI model training and management activities?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","The suggested measures to assess the environmental impact of AI model training and management activities include: 1) Assessing safety to physical environments when deploying GAI systems, 2) Documenting anticipated environmental impacts of model development, maintenance, and deployment in product design decisions, 3) Measuring or estimating environmental impacts such as energy and water consumption for training, fine-tuning, and deploying models, and verifying trade-offs between resources used at inference time versus additional resources required at training time, and 4) Verifying the effectiveness of carbon capture or offset programs for GAI training and applications, while addressing green-washing concerns.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What should designers and developers provide to ensure clear understanding of system functioning in automated systems?,"[' \nYou should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40\n']","Designers, developers, and deployers of automated systems should provide generally accessible plain language documentation that includes clear descriptions of the overall system functioning and the role automation plays.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 39, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the role of the National Institute of Standards and Technology in advancing artificial intelligence?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) to realize its full commercial and societal benefits without harm to people or the planet. NIST has conducted both fundamental and applied work on AI for more than a decade and is helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of using structured feedback mechanisms in relation to AI-generated content?,"[' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']",The purpose of using structured feedback mechanisms in relation to AI-generated content is to solicit and capture user input about the content to detect subtle shifts in quality or alignment with community and societal values.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are suggested to ensure information integrity in the deployment of GAI systems?,"[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n']","Suggested measures to ensure information integrity in the deployment of GAI systems include verifying GAI system training data and TEVV data provenance, and ensuring that fine-tuning or retrieval-augmented generation data is grounded. Additionally, it is recommended to review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What steps should automated systems take to avoid bias and support equity for marginalized groups?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Automated systems should take several steps to avoid bias and support equity for marginalized groups, including conducting proactive equity assessments during the design phase to identify potential discrimination, using representative and robust data that reflects local communities, and guarding against the use of demographic proxies that could lead to algorithmic discrimination. These steps should be integrated throughout the design, development, and deployment processes to ensure ongoing protection against algorithmic discrimination.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +Why is user consent important for protecting personal data?,"[' \n \n \n \n \nSECTION TITLE\nDATA PRIVACY\nYou should be protected from abusive data practices via built-in protections and you \nshould have agency over how data about you is used. You should be protected from violations of \nprivacy through design choices that ensure such protections are included by default, including ensuring that \ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \nwhere it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \nin plain language, and give you agency over data collection and the specific context of use; current hard-to\xad\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the use of such surveillance \ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \nreporting that confirms your data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or access. \nNOTICE AND EXPLANATION\nYou should know that an automated system is being used and understand how and why it \ncontributes to outcomes that impact you. Designers, developers, and deployers of automated systems \nshould provide generally accessible plain language documentation including clear descriptions of the overall \nsystem functioning and the role automation plays, notice that such systems are in use, the individual or organiza\xad\ntion responsible for the system, and explanations of outcomes that are clear, timely, and accessible. Such notice \nshould be kept up-to-date and people impacted by the system should be notified of significant use case or key \nfunctionality changes. You should know how and why an outcome impacting you was determined by an \nautomated system, including when the automated system is not the sole input determining the outcome. \nAutomated systems should provide explanations that are technically valid, meaningful and useful to you and to \nany operators or others who need to understand the system, and calibrated to the level of risk based on the \ncontext. Reporting that includes summary information about these automated systems in plain language and \nassessments of the clarity and quality of the notice and explanations should be made public whenever possible. \n6\n', 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be \nappropriately and meaningfully given. Any consent requests should be brief, \nbe understandable in plain language, and give you agency over data collection \nand the specific context of use; current hard-to-understand no\xad\ntice-and-choice practices for broad uses of data should be changed. Enhanced \nprotections and restrictions for data and inferences related to sensitive do\xad\nmains, including health, work, education, criminal justice, and finance, and \nfor data pertaining to youth should put you first. In sensitive domains, your \ndata and related inferences should only be used for necessary functions, and \nyou should be protected by ethical review and use prohibitions. You and your \ncommunities should be free from unchecked surveillance; surveillance tech\xad\nnologies should be subject to heightened oversight that includes at least \npre-deployment assessment of their potential harms and scope limits to pro\xad\ntect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the \nuse of such surveillance technologies is likely to limit rights, opportunities, or \naccess. Whenever possible, you should have access to reporting that confirms \nyour data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or \naccess. \nDATA PRIVACY\n30\n']","User consent is important for protecting personal data because it ensures that data collection is justified only in cases where consent can be appropriately and meaningfully given. Consent requests should be brief, understandable in plain language, and provide individuals with agency over their data collection and its specific context of use. This approach helps to prevent abusive data practices and ensures that individuals have control over how their data is used.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 5, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 29, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What steps, like bias testing, ensure fair automated systems?","["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n"", ' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n']","Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to products being changed or not launched to prevent harm. Federal government agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have developed best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does the NAACP impact civil rights in tech governance?,"['APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation \nStephanie Dinkins and the Future \nHistories Studio at Stony Brook \nUniversity \nTechNet \nThe Alliance for Media Arts and \nCulture, MIT Open Documentary \nLab and Co-Creation Studio, and \nImmerse \nThe International Brotherhood of \nTeamsters \nThe Leadership Conference on \nCivil and Human Rights \nThorn \nU.S. Chamber of Commerceā€™s \nTechnology Engagement Center \nUber Technologies \nUniversity of Pittsburgh \nUndergraduate Student \nCollaborative \nUpturn \nUS Technology Policy Committee \nof the Association of Computing \nMachinery \nVirginia Puccio \nVisar Berisha and Julie Liss \nXR Association \nXR Safety Initiative \nā€¢ As an additional effort to reach out to stakeholders regarding the RFI, OSTP conducted two listening sessions\nfor members of the public. The listening sessions together drew upwards of 300 participants. The Science and\nTechnology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61\n', 'APPENDIX\nSummaries of Additional Engagements: \nā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of\nartificial intelligence and other data-driven technologies in their lives.\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The\npurpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or\nplanned use; the domains in which these technologies are being used; the entities making use of them; current\nprinciples, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their\nuse or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below\nlisted organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium \nat Computing Research Association \nConnected Health Initiative \nConsumer Technology Association \nCourtney Radsch \nCoworker \nCyber Farm Labs \nData & Society Research Institute \nData for Black Lives \nData to Actionable Knowledge Lab \nat Harvard University \nDeloitte \nDev Technology Group \nDigital Therapeutics Alliance \nDigital Welfare State & Human \nRights Project and Center for \nHuman Rights and Global Justice at \nNew York University School of \nLaw, and Temple University \nInstitute for Law, Innovation & \nTechnology \nDignari \nDouglas Goddard \nEdgar Dworsky \nElectronic Frontier Foundation \nElectronic Privacy Information \nCenter, Center for Digital \nDemocracy, and Consumer \nFederation of America \nFaceTec \nFight for the Future \nGanesh Mani \nGeorgia Tech Research Institute \nGoogle \nHealth Information Technology \nResearch and Development \nInteragency Working Group \nHireVue \nHR Policy Association \nID.me \nIdentity and Data Sciences \nLaboratory at Science Applications \nInternational Corporation \nInformation Technology and \nInnovation Foundation \nInformation Technology Industry \nCouncil \nInnocence Project \nInstitute for Human-Centered \nArtificial Intelligence at Stanford \nUniversity \nIntegrated Justice Information \nSystems Institute \nInternational Association of Chiefs \nof Police \nInternational Biometrics + Identity \nAssociation \nInternational Business Machines \nCorporation \nInternational Committee of the Red \nCross \nInventionphysics \niProov \nJacob Boudreau \nJennifer K. Wagner, Dan Berger, \nMargaret Hu, and Sara Katsanis \nJonathan Barry-Blocker \nJoseph Turow \nJoy Buolamwini \nJoy Mack \nKaren Bureau \nLamont Gholston \nLawyersā€™ Committee for Civil \nRights Under Law \n60\n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 60, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 59, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does DARPA's XAI tackle opaque AI decision-making challenges?,"["" \n \nENDNOTES\n85. Mick Dumke and Frank Main. A look inside the watch list Chicago police fought to keep secret. The\nChicago Sun Times. May 18, 2017.\nhttps://chicago.suntimes.com/2017/5/18/18386116/a-look-inside-the-watch-list-chicago-police-fought\xad\nto-keep-secret\n86. Jay Stanley. Pitfalls of Artificial Intelligence Decisionmaking Highlighted In Idaho ACLU Case.\nACLU. Jun. 2, 2017.\nhttps://www.aclu.org/blog/privacy-technology/pitfalls-artificial-intelligence-decisionmaking\xad\nhighlighted-idaho-aclu-case\n87. Illinois General Assembly. Biometric Information Privacy Act. Effective Oct. 3, 2008.\nhttps://www.ilga.gov/legislation/ilcs/ilcs3.asp?ActID=3004&ChapterID=57\n88. Partnership on AI. ABOUT ML Reference Document. Accessed May 2, 2022.\nhttps://partnershiponai.org/paper/about-ml-reference-document/1/\n89. See, e.g., the model cards framework: Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker\nBarnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru.\nModel Cards for Model Reporting. In Proceedings of the Conference on Fairness, Accountability, and\nTransparency (FAT* '19). Association for Computing Machinery, New York, NY, USA, 220ā€“229. https://\ndl.acm.org/doi/10.1145/3287560.3287596\n90. Sarah Ammermann. Adverse Action Notice Requirements Under the ECOA and the FCRA. Consumer\nCompliance Outlook. Second Quarter 2013.\nhttps://consumercomplianceoutlook.org/2013/second-quarter/adverse-action-notice-requirements\xad\nunder-ecoa-fcra/\n91. Federal Trade Commission. Using Consumer Reports for Credit Decisions: What to Know About\nAdverse Action and Risk-Based Pricing Notices. Accessed May 2, 2022.\nhttps://www.ftc.gov/business-guidance/resources/using-consumer-reports-credit-decisions-what\xad\nknow-about-adverse-action-risk-based-pricing-notices#risk\n92. Consumer Financial Protection Bureau. CFPB Acts to Protect the Public from Black-Box Credit\nModels Using Complex Algorithms. May 26, 2022.\nhttps://www.consumerfinance.gov/about-us/newsroom/cfpb-acts-to-protect-the-public-from-black\xad\nbox-credit-models-using-complex-algorithms/\n93. Anthony Zaller. California Passes Law Regulating Quotas In Warehouses ā€“ What Employers Need to\nKnow About AB 701. Zaller Law Group California Employment Law Report. Sept. 24, 2021.\nhttps://www.californiaemploymentlawreport.com/2021/09/california-passes-law-regulating-quotas\xad\nin-warehouses-what-employers-need-to-know-about-ab-701/\n94. National Institute of Standards and Technology. AI Fundamental Research ā€“ Explainability.\nAccessed Jun. 4, 2022.\nhttps://www.nist.gov/artificial-intelligence/ai-fundamental-research-explainability\n95. DARPA. Explainable Artificial Intelligence (XAI). Accessed July 20, 2022.\nhttps://www.darpa.mil/program/explainable-artificial-intelligence\n71\n""]",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 70, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What concerns did panelists raise about AI in policing and its impact on safety and democracy?,"["" \n \n \n \nAPPENDIX\nPanelists discussed the benefits of AI-enabled systems and their potential to build better and more \ninnovative infrastructure. They individually noted that while AI technologies may be new, the process of \ntechnological diffusion is not, and that it was critical to have thoughtful and responsible development and \nintegration of technology within communities. Some panelists suggested that the integration of technology \ncould benefit from examining how technological diffusion has worked in the realm of urban planning: \nlessons learned from successes and failures there include the importance of balancing ownership rights, use \nrights, and community health, safety and welfare, as well ensuring better representation of all voices, \nespecially those traditionally marginalized by technological advances. Some panelists also raised the issue of \npower structures ā€“ providing examples of how strong transparency requirements in smart city projects \nhelped to reshape power and give more voice to those lacking the financial or political power to effect change. \nIn discussion of technical and governance interventions that that are needed to protect against the harms \nof these technologies, various panelists emphasized the need for transparency, data collection, and \nflexible and reactive policy development, analogous to how software is continuously updated and deployed. \nSome panelists pointed out that companies need clear guidelines to have a consistent environment for \ninnovation, with principles and guardrails being the key to fostering responsible innovation. \nPanel 2: The Criminal Justice System. This event explored current and emergent uses of technology in \nthe criminal justice system and considered how they advance or undermine public safety, justice, and \ndemocratic values. \nWelcome: \nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nBen Winters, Counsel, Electronic Privacy Information Center\nModerator: Chiraag Bains, Deputy Assistant to the President on Racial Justice & Equity \nPanelists: \nā€¢\nSean Malinowski, Director of Policing Innovation and Reform, University of Chicago Crime Lab\nā€¢\nKristian Lum, Researcher\nā€¢\nJumana Musa, Director, Fourth Amendment Center, National Association of Criminal Defense Lawyers\nā€¢\nStanley Andrisse, Executive Director, From Prison Cells to PHD; Assistant Professor, Howard University\nCollege of Medicine\nā€¢\nMyaisha Hayes, Campaign Strategies Director, MediaJustice\nPanelists discussed uses of technology within the criminal justice system, including the use of predictive \npolicing, pretrial risk assessments, automated license plate readers, and prison communication tools. The \ndiscussion emphasized that communities deserve safety, and strategies need to be identified that lead to safety; \nsuch strategies might include data-driven approaches, but the focus on safety should be primary, and \ntechnology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, various panelists emphasized that transparency is important but is not enough to achieve \naccountability. Some panelists discussed their individual views on additional system needs for validity, and \nagreed upon the importance of advisory boards and compensated community input early in the design process \n(before the technology is built and instituted). Various panelists also emphasized the importance of regulation \nthat includes limits to the type and cost of such technologies. \n56\n"", "" \n \n \n \n \nAPPENDIX\nPanel 4: Artificial Intelligence and Democratic Values. This event examined challenges and opportunities in \nthe design of technology that can help support a democratic vision for AI. It included discussion of the \ntechnical aspects \nof \ndesigning \nnon-discriminatory \ntechnology, \nexplainable \nAI, \nhuman-computer \ninteraction with an emphasis on community participation, and privacy-aware design. \nWelcome:\nā€¢\nSorelle Friedler, Assistant Director for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nJ. Bob Alotta, Vice President for Global Programs, Mozilla Foundation\nā€¢\nNavrina Singh, Board Member, Mozilla Foundation\nModerator: Kathy Pham Evans, Deputy Chief Technology Officer for Product and Engineering, U.S \nFederal Trade Commission. \nPanelists: \nā€¢\nLiz Oā€™Sullivan, CEO, Parity AI\nā€¢\nTimnit Gebru, Independent Scholar\nā€¢\nJennifer Wortman Vaughan, Senior Principal Researcher, Microsoft Research, New York City\nā€¢\nPamela Wisniewski, Associate Professor of Computer Science, University of Central Florida; Director,\nSocio-technical Interaction Research (STIR) Lab\nā€¢\nSeny Kamara, Associate Professor of Computer Science, Brown University\nEach panelist individually emphasized the risks of using AI in high-stakes settings, including the potential for \nbiased data and discriminatory outcomes, opaque decision-making processes, and lack of public trust and \nunderstanding of the algorithmic systems. The interventions and key needs various panelists put forward as \nnecessary to the future design of critical AI systems included ongoing transparency, value sensitive and \nparticipatory design, explanations designed for relevant stakeholders, and public consultation. \nVarious \npanelists emphasized the importance of placing trust in people, not technologies, and in engaging with \nimpacted communities to understand the potential harms of technologies and build protection by design into \nfuture systems. \nPanel 5: Social Welfare and Development. This event explored current and emerging uses of technology to \nimplement or improve social welfare systems, social development programs, and other systems that can impact \nlife chances. \nWelcome:\nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nAnne-Marie Slaughter, CEO, New America\nModerator: Michele Evermore, Deputy Director for Policy, Office of Unemployment Insurance \nModernization, Office of the Secretary, Department of Labor \nPanelists:\nā€¢\nBlake Hall, CEO and Founder, ID.Me\nā€¢\nKarrie Karahalios, Professor of Computer Science, University of Illinois, Urbana-Champaign\nā€¢\nChristiaan van Veen, Director of Digital Welfare State and Human Rights Project, NYU School of Law's\nCenter for Human Rights and Global Justice\n58\n""]","Panelists raised concerns about the validity of AI systems used in policing, noting that adverse or irrelevant data can lead to a replication of unjust outcomes. They highlighted issues such as confirmation bias and the tendency to defer to potentially inaccurate automated systems. The impact of these systems on individuals and communities is seen as potentially severe, with concerns that they lack individualization, undermine the belief in people's ability to change for the better, and can lead to job loss and custody issues. Additionally, surveillance technologies can create chilling effects in communities and send negative signals about how community members are viewed. Panelists emphasized that while transparency is important, it is not sufficient for achieving accountability.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 55, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 57, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role does the OSTP play in the AI Bill of Rights regarding public input and civil liberties?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nAbout this Document \nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People was \npublished by the White House Office of Science and Technology Policy in October 2022. This framework was \nreleased one year after OSTP announced the launch of a process to develop ā€œa bill of rights for an AI-powered \nworld.ā€ Its release follows a year of public engagement to inform this initiative. The framework is available \nonline at: https://www.whitehouse.gov/ostp/ai-bill-of-rights \nAbout the Office of Science and Technology Policy \nThe Office of Science and Technology Policy (OSTP) was established by the National Science and Technology \nPolicy, Organization, and Priorities Act of 1976 to provide the President and others within the Executive Office \nof the President with advice on the scientific, engineering, and technological aspects of the economy, national \nsecurity, health, foreign relations, the environment, and the technological recovery and use of resources, among \nother topics. OSTP leads interagency science and technology policy coordination efforts, assists the Office of \nManagement and Budget (OMB) with an annual review and analysis of Federal research and development in \nbudgets, and serves as a source of scientific and technological analysis and judgment for the President with \nrespect to major policies, plans, and programs of the Federal Government. \nLegal Disclaimer \nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People is a white paper \npublished by the White House Office of Science and Technology Policy. It is intended to support the \ndevelopment of policies and practices that protect civil rights and promote democratic values in the building, \ndeployment, and governance of automated systems. \nThe Blueprint for an AI Bill of Rights is non-binding and does not constitute U.S. government policy. It \ndoes not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles \nin whole or in part may not be appropriate given the intended use of automated systems to achieve government \nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \nautomated systems in certain settings such as AI systems used as part of school building security or automated \nhealth diagnostic systems. \nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \nequities, for example, between the protection of sensitive law enforcement information and the principle of \nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960, \nPromoting the Use of Trustworthy Artificial Intelligence in the Federal Government (December 2020). \nThis white paper recognizes that national security (which includes certain law enforcement and \nhomeland security activities) and defense activities are of increased sensitivity and interest to our nationā€™s \nadversaries and are often subject to special requirements, such as those governing classified information and \nother protected data. Such activities require alternative, compatible safeguards through existing policies that \ngovern automated systems and AI, such as the Department of Defense (DOD) AI Ethical Principles and \nResponsible AI Implementation Pathway and the Intelligence Community (IC) AI Ethics Principles and \nFramework. The implementation of these policies to national security and defense activities can be informed by \nthe Blueprint for an AI Bill of Rights where feasible. \nThe Blueprint for an AI Bill of Rights is not intended to, and does not, create any legal right, benefit, or \n', ' \n \n \nABOUT THIS FRAMEWORK\xad\xad\xad\xad\xad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizationsā€”from governments at all levels to companies of \nall sizesā€”to uphold these values. Experts from across the private sector, governments, and international \nconsortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policyā€”such as \nsector-specific privacy laws and oversight requirementsā€”do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the countryā€”from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal governmentā€”on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-\ning sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4\n']","The Office of Science and Technology Policy (OSTP) plays a crucial role in the AI Bill of Rights by leading a year-long process to seek and distill input from various stakeholders, including impacted communities, industry stakeholders, technology developers, and policymakers. This engagement informs the development of policies and practices that protect civil rights and promote democratic values in the governance of automated systems.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 1, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 3, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do Model Cards enhance AI transparency and accountability amid privacy issues?,"["" \n \nENDNOTES\n85. Mick Dumke and Frank Main. A look inside the watch list Chicago police fought to keep secret. The\nChicago Sun Times. May 18, 2017.\nhttps://chicago.suntimes.com/2017/5/18/18386116/a-look-inside-the-watch-list-chicago-police-fought\xad\nto-keep-secret\n86. Jay Stanley. Pitfalls of Artificial Intelligence Decisionmaking Highlighted In Idaho ACLU Case.\nACLU. Jun. 2, 2017.\nhttps://www.aclu.org/blog/privacy-technology/pitfalls-artificial-intelligence-decisionmaking\xad\nhighlighted-idaho-aclu-case\n87. Illinois General Assembly. Biometric Information Privacy Act. Effective Oct. 3, 2008.\nhttps://www.ilga.gov/legislation/ilcs/ilcs3.asp?ActID=3004&ChapterID=57\n88. Partnership on AI. ABOUT ML Reference Document. Accessed May 2, 2022.\nhttps://partnershiponai.org/paper/about-ml-reference-document/1/\n89. See, e.g., the model cards framework: Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker\nBarnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru.\nModel Cards for Model Reporting. In Proceedings of the Conference on Fairness, Accountability, and\nTransparency (FAT* '19). Association for Computing Machinery, New York, NY, USA, 220ā€“229. https://\ndl.acm.org/doi/10.1145/3287560.3287596\n90. Sarah Ammermann. Adverse Action Notice Requirements Under the ECOA and the FCRA. Consumer\nCompliance Outlook. Second Quarter 2013.\nhttps://consumercomplianceoutlook.org/2013/second-quarter/adverse-action-notice-requirements\xad\nunder-ecoa-fcra/\n91. Federal Trade Commission. Using Consumer Reports for Credit Decisions: What to Know About\nAdverse Action and Risk-Based Pricing Notices. Accessed May 2, 2022.\nhttps://www.ftc.gov/business-guidance/resources/using-consumer-reports-credit-decisions-what\xad\nknow-about-adverse-action-risk-based-pricing-notices#risk\n92. Consumer Financial Protection Bureau. CFPB Acts to Protect the Public from Black-Box Credit\nModels Using Complex Algorithms. May 26, 2022.\nhttps://www.consumerfinance.gov/about-us/newsroom/cfpb-acts-to-protect-the-public-from-black\xad\nbox-credit-models-using-complex-algorithms/\n93. Anthony Zaller. California Passes Law Regulating Quotas In Warehouses ā€“ What Employers Need to\nKnow About AB 701. Zaller Law Group California Employment Law Report. Sept. 24, 2021.\nhttps://www.californiaemploymentlawreport.com/2021/09/california-passes-law-regulating-quotas\xad\nin-warehouses-what-employers-need-to-know-about-ab-701/\n94. National Institute of Standards and Technology. AI Fundamental Research ā€“ Explainability.\nAccessed Jun. 4, 2022.\nhttps://www.nist.gov/artificial-intelligence/ai-fundamental-research-explainability\n95. DARPA. Explainable Artificial Intelligence (XAI). Accessed July 20, 2022.\nhttps://www.darpa.mil/program/explainable-artificial-intelligence\n71\n"", "" \n65. See, e.g., Scott Ikeda. Major Data Broker Exposes 235 Million Social Media Profiles in Data Lead: Info\nAppears to Have Been Scraped Without Permission. CPO Magazine. Aug. 28, 2020. https://\nwww.cpomagazine.com/cyber-security/major-data-broker-exposes-235-million-social-media-profiles\xad\nin-data-leak/; Lily Hay Newman. 1.2 Billion Records Found Exposed Online in a Single Server. WIRED,\nNov. 22, 2019. https://www.wired.com/story/billion-records-exposed-online/\n66. Lola Fadulu. Facial Recognition Technology in Public Housing Prompts Backlash. New York Times.\nSept. 24, 2019.\nhttps://www.nytimes.com/2019/09/24/us/politics/facial-recognition-technology-housing.html\n67. Jo Constantz. ā€˜They Were Spying On Usā€™: Amazon, Walmart, Use Surveillance Technology to Bust\nUnions. Newsweek. Dec. 13, 2021.\nhttps://www.newsweek.com/they-were-spying-us-amazon-walmart-use-surveillance-technology-bust\xad\nunions-1658603\n68. See, e.g., enforcement actions by the FTC against the photo storage app Everalbaum\n(https://www.ftc.gov/legal-library/browse/cases-proceedings/192-3172-everalbum-inc-matter), and\nagainst Weight Watchers and their subsidiary Kurbo\n(https://www.ftc.gov/legal-library/browse/cases-proceedings/1923228-weight-watchersww)\n69. See, e.g., HIPAA, Pub. L 104-191 (1996); Fair Debt Collection Practices Act (FDCPA), Pub. L. 95-109\n(1977); Family Educational Rights and Privacy Act (FERPA) (20 U.S.C. Ā§ 1232g), Children's Online\nPrivacy Protection Act of 1998, 15 U.S.C. 6501ā€“6505, and Confidential Information Protection and\nStatistical Efficiency Act (CIPSEA) (116 Stat. 2899)\n70. Marshall Allen. You Snooze, You Lose: Insurers Make The Old Adage Literally True. ProPublica. Nov.\n21, 2018.\nhttps://www.propublica.org/article/you-snooze-you-lose-insurers-make-the-old-adage-literally-true\n71. Charles Duhigg. How Companies Learn Your Secrets. The New York Times. Feb. 16, 2012.\nhttps://www.nytimes.com/2012/02/19/magazine/shopping-habits.html\n72. Jack Gillum and Jeff Kao. Aggression Detectors: The Unproven, Invasive Surveillance Technology\nSchools are Using to Monitor Students. ProPublica. Jun. 25, 2019.\nhttps://features.propublica.org/aggression-detector/the-unproven-invasive-surveillance-technology\xad\nschools-are-using-to-monitor-students/\n73. Drew Harwell. Cheating-detection companies made millions during the pandemic. Now students are\nfighting back. Washington Post. Nov. 12, 2020.\nhttps://www.washingtonpost.com/technology/2020/11/12/test-monitoring-student-revolt/\n74. See, e.g., Heather Morrison. Virtual Testing Puts Disabled Students at a Disadvantage. Government\nTechnology. May 24, 2022.\nhttps://www.govtech.com/education/k-12/virtual-testing-puts-disabled-students-at-a-disadvantage;\nLydia X. Z. Brown, Ridhi Shetty, Matt Scherer, and Andrew Crawford. Ableism And Disability\nDiscrimination In New Surveillance Technologies: How new surveillance technologies in education,\npolicing, health care, and the workplace disproportionately harm disabled people. Center for Democracy\nand Technology Report. May 24, 2022.\nhttps://cdt.org/insights/ableism-and-disability-discrimination-in-new-surveillance-technologies-how\xad\nnew-surveillance-technologies-in-education-policing-health-care-and-the-workplace\xad\ndisproportionately-harm-disabled-people/\n69\n""]",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 70, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 68, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What does the AI Bill of Rights suggest for protecting civil rights in tech?,"[' \nSECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-\ndinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called \non people of conscience to act to preserve civil rightsā€”including the right to privacy, which he has called ā€œthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.ā€2\nTo advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanionā€”a handbook for anyone seeking to incorporate these protections into policy and practice, including \ndetailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3\n', ' \n \n \n \n \n \n \n \n \n \nBLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022 \n']","The AI Bill of Rights suggests guiding the design, use, and deployment of automated systems to protect the American public, ensuring that these technologies reinforce civil rights and democratic values. It emphasizes the need to root out inequity, embed fairness in decision-making processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 2, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 0, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What steps are taken to ensure fair use of automated systems?,"["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n"", ' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n']","Many companies, non-profits, and federal government agencies are taking steps to ensure the public is protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product quality assessment and launch procedures, which has led to changes or prevented harmful product launches. Federal agencies are developing standards and guidance for the use of automated systems to help prevent bias. Non-profits and companies have developed best practices for audits and impact assessments to identify potential algorithmic discrimination and provide transparency in mitigating such biases.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the AI ethics for intel and their alignment with NIST standards?,"[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n', ' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What's the role of incident response plans in assessing GAI performance and AI Actor communication during incidents?,"[' \n45 \nMG-4.1-007 \nVerify that AI Actors responsible for monitoring reported issues can eļ¬€ectively \nevaluate GAI system performance including the application of content \nprovenance data tracking techniques, and promptly escalate issues for response. \nHuman-AI Conļ¬guration; \nInformation Integrity \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, Human Factors, Operation and \nMonitoring \n \nMANAGE 4.2: Measurable activities for continual improvements are integrated into AI system updates and include regular \nengagement with interested parties, including relevant AI Actors. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.2-001 Conduct regular monitoring of GAI systems and publish reports detailing the \nperformance, feedback received, and improvements made. \nHarmful Bias and Homogenization \nMG-4.2-002 \nPractice and follow incident response plans for addressing the generation of \ninappropriate or harmful content and adapt processes based on ļ¬ndings to \nprevent future occurrences. Conduct post-mortem analyses of incidents with \nrelevant AI Actors, to understand the root causes and implement preventive \nmeasures. \nHuman-AI Conļ¬guration; \nDangerous, Violent, or Hateful \nContent \nMG-4.2-003 Use visualizations or other methods to represent GAI model behavior to ease \nnon-technical stakeholders understanding of GAI system functionality. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Aļ¬€ected Individuals and Communities, End-Users, Operation and \nMonitoring, TEVV \n \nMANAGE 4.3: Incidents and errors are communicated to relevant AI Actors, including aļ¬€ected communities. Processes for tracking, \nresponding to, and recovering from incidents and errors are followed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.3-001 \nConduct after-action assessments for GAI system incidents to verify incident \nresponse and recovery processes are followed and eļ¬€ective, including to follow \nprocedures for communicating incidents to relevant AI Actors and where \napplicable, relevant legal and regulatory bodies. \nInformation Security \nMG-4.3-002 Establish and maintain policies and procedures to record and track GAI system \nreported errors, near-misses, and negative impacts. \nConfabulation; Information \nIntegrity \n', ' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']","Incident response plans play a crucial role in assessing GAI performance by providing structured procedures for addressing the generation of inappropriate or harmful content. They ensure that incidents are communicated to relevant AI Actors, including affected communities, and that processes for tracking, responding to, and recovering from incidents are followed and documented. This structured approach helps in understanding the root causes of incidents and implementing preventive measures, thereby enhancing overall AI Actor communication during such events.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 48, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do GAI incident docs help AI Actors assess and manage system performance?,"[' \n53 \nDocumenting, reporting, and sharing information about GAI incidents can help mitigate and prevent \nharmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness \nand standardization of GAI incident reporting could promote this transparency and improve GAI risk \nmanagement across the AI ecosystem. \nDocumentation and Involvement of AI Actors \nAI Actors should be aware of their roles in reporting AI incidents. To better understand previous incidents \nand implement measures to prevent similar ones in the future, organizations could consider developing \nguidelines for publicly available incident reporting which include information about AI actor \nresponsibilities. These guidelines would help AI system operators identify GAI incidents across the AI \nlifecycle and with AI Actors regardless of role. Documentation and review of third-party inputs and \nplugins for GAI systems is especially important for AI Actors in the context of incident disclosure; LLM \ninputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents. \n \n', ' \n45 \nMG-4.1-007 \nVerify that AI Actors responsible for monitoring reported issues can eļ¬€ectively \nevaluate GAI system performance including the application of content \nprovenance data tracking techniques, and promptly escalate issues for response. \nHuman-AI Conļ¬guration; \nInformation Integrity \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, Human Factors, Operation and \nMonitoring \n \nMANAGE 4.2: Measurable activities for continual improvements are integrated into AI system updates and include regular \nengagement with interested parties, including relevant AI Actors. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.2-001 Conduct regular monitoring of GAI systems and publish reports detailing the \nperformance, feedback received, and improvements made. \nHarmful Bias and Homogenization \nMG-4.2-002 \nPractice and follow incident response plans for addressing the generation of \ninappropriate or harmful content and adapt processes based on ļ¬ndings to \nprevent future occurrences. Conduct post-mortem analyses of incidents with \nrelevant AI Actors, to understand the root causes and implement preventive \nmeasures. \nHuman-AI Conļ¬guration; \nDangerous, Violent, or Hateful \nContent \nMG-4.2-003 Use visualizations or other methods to represent GAI model behavior to ease \nnon-technical stakeholders understanding of GAI system functionality. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Aļ¬€ected Individuals and Communities, End-Users, Operation and \nMonitoring, TEVV \n \nMANAGE 4.3: Incidents and errors are communicated to relevant AI Actors, including aļ¬€ected communities. Processes for tracking, \nresponding to, and recovering from incidents and errors are followed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.3-001 \nConduct after-action assessments for GAI system incidents to verify incident \nresponse and recovery processes are followed and eļ¬€ective, including to follow \nprocedures for communicating incidents to relevant AI Actors and where \napplicable, relevant legal and regulatory bodies. \nInformation Security \nMG-4.3-002 Establish and maintain policies and procedures to record and track GAI system \nreported errors, near-misses, and negative impacts. \nConfabulation; Information \nIntegrity \n']","GAI incident documentation helps AI Actors assess and manage system performance by facilitating smoother sharing of information regarding incidents, which includes logging, recording, and analyzing GAI incidents. This documentation allows AI Actors to trace impacts to their source, understand previous incidents, and implement measures to prevent similar occurrences in the future. Additionally, regular information sharing and maintaining change management records empower AI Actors in responding to and managing AI incidents effectively.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 56, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 48, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What principles did the White House OSTP set for civil rights in automated systems, and how was public input involved?","[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nAbout this Document \nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People was \npublished by the White House Office of Science and Technology Policy in October 2022. This framework was \nreleased one year after OSTP announced the launch of a process to develop ā€œa bill of rights for an AI-powered \nworld.ā€ Its release follows a year of public engagement to inform this initiative. The framework is available \nonline at: https://www.whitehouse.gov/ostp/ai-bill-of-rights \nAbout the Office of Science and Technology Policy \nThe Office of Science and Technology Policy (OSTP) was established by the National Science and Technology \nPolicy, Organization, and Priorities Act of 1976 to provide the President and others within the Executive Office \nof the President with advice on the scientific, engineering, and technological aspects of the economy, national \nsecurity, health, foreign relations, the environment, and the technological recovery and use of resources, among \nother topics. OSTP leads interagency science and technology policy coordination efforts, assists the Office of \nManagement and Budget (OMB) with an annual review and analysis of Federal research and development in \nbudgets, and serves as a source of scientific and technological analysis and judgment for the President with \nrespect to major policies, plans, and programs of the Federal Government. \nLegal Disclaimer \nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People is a white paper \npublished by the White House Office of Science and Technology Policy. It is intended to support the \ndevelopment of policies and practices that protect civil rights and promote democratic values in the building, \ndeployment, and governance of automated systems. \nThe Blueprint for an AI Bill of Rights is non-binding and does not constitute U.S. government policy. It \ndoes not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles \nin whole or in part may not be appropriate given the intended use of automated systems to achieve government \nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \nautomated systems in certain settings such as AI systems used as part of school building security or automated \nhealth diagnostic systems. \nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \nequities, for example, between the protection of sensitive law enforcement information and the principle of \nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960, \nPromoting the Use of Trustworthy Artificial Intelligence in the Federal Government (December 2020). \nThis white paper recognizes that national security (which includes certain law enforcement and \nhomeland security activities) and defense activities are of increased sensitivity and interest to our nationā€™s \nadversaries and are often subject to special requirements, such as those governing classified information and \nother protected data. Such activities require alternative, compatible safeguards through existing policies that \ngovern automated systems and AI, such as the Department of Defense (DOD) AI Ethical Principles and \nResponsible AI Implementation Pathway and the Intelligence Community (IC) AI Ethics Principles and \nFramework. The implementation of these policies to national security and defense activities can be informed by \nthe Blueprint for an AI Bill of Rights where feasible. \nThe Blueprint for an AI Bill of Rights is not intended to, and does not, create any legal right, benefit, or \n', ' \n \n \nABOUT THIS FRAMEWORK\xad\xad\xad\xad\xad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizationsā€”from governments at all levels to companies of \nall sizesā€”to uphold these values. Experts from across the private sector, governments, and international \nconsortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policyā€”such as \nsector-specific privacy laws and oversight requirementsā€”do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the countryā€”from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal governmentā€”on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-\ning sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4\n']","The Blueprint for an AI Bill of Rights includes five principles and associated practices to guide the design, use, and deployment of automated systems to protect the rights of the American public. It was developed through extensive consultation with the American public, which involved a year-long process of seeking and distilling input from impacted communities, industry stakeholders, technology developers, and policymakers. This public engagement included panel discussions, public listening sessions, and a formal request for information, allowing various voices to shape the principles aimed at preventing algorithmic and data-driven harms.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 1, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 3, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do training and feedback improve understanding of digital content transparency in GAI systems?,"[' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","Training and feedback improve understanding of digital content transparency in GAI systems by providing input for training materials about the capabilities and limitations of GAI systems related to digital content transparency. This includes actively seeking feedback on generated content quality and potential biases, as well as assessing the general awareness among end users and impacted communities about the availability of feedback channels.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What leads to model collapse in AI, especially with synthetic data and biases?","[' \n9 \nand reduced content diversity). Overly homogenized outputs can themselves be incorrect, or they may \nlead to unreliable decision-making or amplify harmful biases. These phenomena can ļ¬‚ow from \nfoundation models to downstream models and systems, with the foundation models acting as \nā€œbottlenecks,ā€ or single points of failure. \nOverly homogenized content can contribute to ā€œmodel collapse.ā€ Model collapse can occur when model \ntraining over-relies on synthetic data, resulting in data points disappearing from the distribution of the \nnew modelā€™s outputs. In addition to threatening the robustness of the model overall, model collapse \ncould lead to homogenized outputs, including by amplifying any homogenization from the model used to \ngenerate the synthetic training data. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Valid and Reliable \n2.7. Human-AI Conļ¬guration \nGAI system use can involve varying risks of misconļ¬gurations and poor interactions between a system \nand a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Valid and Reliable \n2.8. Information Integrity \nInformation integrity describes the ā€œspectrum of information and associated patterns of its creation, \nexchange, and consumption in society.ā€ High-integrity information can be trusted; ā€œdistinguishes fact \nfrom ļ¬ction, opinion, and inference; acknowledges uncertainties; and is transparent about its level of \nvetting. This information can be linked to the original source(s) with appropriate evidence. High-integrity \ninformation is also accurate and reliable, can be veriļ¬ed and authenticated, has a clear chain of custody, \nand creates reasonable expectations about when its validity may expire.ā€11 \n \n \n11 This deļ¬nition of information integrity is derived from the 2022 White House Roadmap for Researchers on \nPriorities Related to Information Integrity Research and Development. \n', ' \n8 \nTrustworthy AI Characteristics: Accountable and Transparent, Privacy Enhanced, Safe, Secure and \nResilient \n2.5. Environmental Impacts \nTraining, maintaining, and operating (running inference on) GAI systems are resource-intensive activities, \nwith potentially large energy and environmental footprints. Energy and carbon emissions vary based on \nwhat is being done with the GAI model (i.e., pre-training, ļ¬ne-tuning, inference), the modality of the \ncontent, hardware used, and type of task or application. \nCurrent estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-\ntrip ļ¬‚ights between San Francisco and New York. In a study comparing energy consumption and carbon \nemissions for LLM inference, generative tasks (e.g., text summarization) were found to be more energy- \nand carbon-intensive than discriminative or non-generative tasks (e.g., text classiļ¬cation). \nMethods for creating smaller versions of trained models, such as model distillation or compression, \ncould reduce environmental impacts at inference time, but training and tuning such models may still \ncontribute to their environmental impacts. Currently there is no agreed upon method to estimate \nenvironmental impacts from GAI. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe \n2.6. Harmful Bias and Homogenization \nBias exists in many forms and can become ingrained in automated systems. AI systems, including GAI \nsystems, can increase the speed and scale at which harmful biases manifest and are acted upon, \npotentially perpetuating and amplifying harms to individuals, groups, communities, organizations, and \nsociety. For example, when prompted to generate images of CEOs, doctors, lawyers, and judges, current \ntext-to-image models underrepresent women and/or racial minorities, and people with disabilities. \nImage generator models have also produced biased or stereotyped output for various demographic \ngroups and have diļ¬ƒculty producing non-stereotyped content even when the prompt speciļ¬cally \nrequests image features that are inconsistent with the stereotypes. Harmful bias in GAI models, which \nmay stem from their training data, can also cause representational harms or perpetuate or exacerbate \nbias based on race, gender, disability, or other protected classes. \nHarmful bias in GAI systems can also lead to harms via disparities between how a model performs for \ndiļ¬€erent subgroups or languages (e.g., an LLM may perform less well for non-English languages or \ncertain dialects). Such disparities can contribute to discriminatory decision-making or ampliļ¬cation of \nexisting societal biases. In addition, GAI systems may be inappropriately trusted to perform similarly \nacross all subgroups, which could leave the groups facing underperformance with worse outcomes than \nif no GAI system were used. Disparate or reduced performance for lower-resource languages also \npresents challenges to model adoption, inclusion, and accessibility, and may make preservation of \nendangered languages more diļ¬ƒcult if GAI systems become embedded in everyday processes that would \notherwise have been opportunities to use these languages. \nBias is mutually reinforcing with the problem of undesired homogenization, in which GAI systems \nproduce skewed distributions of outputs that are overly uniform (for example, repetitive aesthetic styles \n']","Model collapse in AI can occur when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. This phenomenon threatens the robustness of the model overall and can lead to homogenized outputs, amplifying any homogenization from the model used to generate the synthetic training data.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 12, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 11, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are Idaho's rules on pretrial risk assessment transparency and their alignment with federal ethical AI standards?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\nSome U.S government agencies have developed specific frameworks for ethical use of AI \nsystems. The Department of Energy (DOE) has activated the AI Advancement Council that oversees coordina-\ntion and advises on implementation of the DOE AI Strategy and addresses issues and/or escalations on the \nethical use and development of AI systems.20 The Department of Defense has adopted Artificial Intelligence \nEthical Principles, and tenets for Responsible Artificial Intelligence specifically tailored to its national \nsecurity and defense activities.21 Similarly, the U.S. Intelligence Community (IC) has developed the Principles \nof Artificial Intelligence Ethics for the Intelligence Community to guide personnel on whether and how to \ndevelop and use AI in furtherance of the IC\'s mission, as well as an AI Ethics Framework to help implement \nthese principles.22\nThe National Science Foundation (NSF) funds extensive research to help foster the \ndevelopment of automated systems that adhere to and advance their safety, security and \neffectiveness. Multiple NSF programs support research that directly addresses many of these principles: \nthe National AI Research Institutes23 support research on all aspects of safe, trustworthy, fair, and explainable \nAI algorithms and systems; the Cyber Physical Systems24 program supports research on developing safe \nautonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace25 \nprogram supports research on cybersecurity and privacy enhancing technologies in automated systems; the \nFormal Methods in the Field26 program supports research on rigorous formal verification and analysis of \nautomated systems and machine learning, and the Designing Accountable Software Systems27 program supports \nresearch on rigorous and reproducible methodologies for developing software systems with legal and regulatory \ncompliance in mind. \nSome state legislatures have placed strong transparency and validity requirements on \nthe use of pretrial risk assessments. The use of algorithmic pretrial risk assessments has been a \ncause of concern for civil rights groups.28 Idaho Code Section 19-1910, enacted in 2019,29 requires that any \npretrial risk assessment, before use in the state, first be ""shown to be free of bias against any class of \nindividuals protected from discrimination by state or federal law"", that any locality using a pretrial risk \nassessment must first formally validate the claim of its being free of bias, that ""all documents, records, and \ninformation used to build or validate the risk assessment shall be open to public inspection,"" and that assertions \nof trade secrets cannot be used ""to quash discovery in a criminal matter by a party to a criminal case."" \n22\n']","Idaho's rules on pretrial risk assessment transparency require that any pretrial risk assessment be shown to be free of bias against any class of individuals protected from discrimination by state or federal law. Additionally, any locality using a pretrial risk assessment must formally validate the claim of it being free of bias, and all documents, records, and information used to build or validate the risk assessment must be open to public inspection. However, the context does not provide specific information on how these rules align with federal ethical AI standards.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 21, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What strategies help engage AI Actors to assess GAI impacts while maintaining AI content integrity?,"[' \n28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring \n \nMEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signiļ¬cant AI risks. The risks or trustworthiness characteristics that will not ā€“ or cannot ā€“ be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modiļ¬cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises \ninformed by representative AI Actors. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, oļ¬€ensive cyber, and CBRN, while \nmaintaining the modelsā€™ ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content \n', ' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']","Strategies to engage AI Actors to assess GAI impacts while maintaining AI content integrity include determining context-based measures to identify new impacts, planning regular engagements with AI Actors responsible for inputs to GAI systems, employing methods to trace the origin and modifications of digital content, integrating tools to analyze content provenance, and using structured feedback mechanisms to capture user input about AI-generated content.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 31, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What strategies are best for managing GAI systems and their lifecycle risks?,"[' \n16 \nGOVERN 1.5: Ongoing monitoring and periodic review of the risk management process and its outcomes are planned, and \norganizational roles and responsibilities are clearly deļ¬ned, including determining the frequency of periodic review. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.5-001 Deļ¬ne organizational responsibilities for periodic review of content provenance \nand incident monitoring for GAI systems. \nInformation Integrity \nGV-1.5-002 \nEstablish organizational policies and procedures for after action reviews of GAI \nsystem incident response and incident disclosures, to identify gaps; Update \nincident response and incident disclosure processes as required. \nHuman-AI Conļ¬guration; \nInformation Security \nGV-1.5-003 \nMaintain a document retention policy to keep history for test, evaluation, \nvalidation, and veriļ¬cation (TEVV), and digital content transparency methods for \nGAI. \nInformation Integrity; Intellectual \nProperty \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring \n \nGOVERN 1.6: Mechanisms are in place to inventory AI systems and are resourced according to organizational risk priorities. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.6-001 Enumerate organizational GAI systems for incorporation into AI system inventory \nand adjust AI system inventory requirements to account for GAI risks. \nInformation Security \nGV-1.6-002 Deļ¬ne any inventory exemptions in organizational policies for GAI systems \nembedded into application software. \nValue Chain and Component \nIntegration \nGV-1.6-003 \nIn addition to general model, governance, and risk information, consider the \nfollowing items in GAI system inventory entries: Data provenance information \n(e.g., source, signatures, versioning, watermarks); Known issues reported from \ninternal bug tracking or external information sharing resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor); Human oversight roles \nand responsibilities; Special rights and considerations for intellectual property, \nlicensed works, or personal, privileged, proprietary or sensitive data; Underlying \nfoundation models, versions of underlying models, and access modes. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity; Intellectual Property; \nValue Chain and Component \nIntegration \nAI Actor Tasks: Governance and Oversight \n \n', ' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']",The context does not provide specific strategies for managing GAI systems and their lifecycle risks.,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 19, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What confabulation might mislead users about CBRN info or capabilities?,"[' \n4 \n1. CBRN Information or Capabilities: Eased access to or synthesis of materially nefarious \ninformation or design capabilities related to chemical, biological, radiological, or nuclear (CBRN) \nweapons or other dangerous materials or agents. \n2. Confabulation: The production of conļ¬dently stated but erroneous or false content (known \ncolloquially as ā€œhallucinationsā€ or ā€œfabricationsā€) by which users may be misled or deceived.6 \n3. Dangerous, Violent, or Hateful Content: Eased production of and access to violent, inciting, \nradicalizing, or threatening content as well as recommendations to carry out self-harm or \nconduct illegal activities. Includes diļ¬ƒculty controlling public exposure to hateful and disparaging \nor stereotyping content. \n4. Data Privacy: Impacts due to leakage and unauthorized use, disclosure, or de-anonymization of \nbiometric, health, location, or other personally identiļ¬able information or sensitive data.7 \n5. Environmental Impacts: Impacts due to high compute resource utilization in training or \noperating GAI models, and related outcomes that may adversely impact ecosystems. \n6. Harmful Bias or Homogenization: Ampliļ¬cation and exacerbation of historical, societal, and \nsystemic biases; performance disparities8 between sub-groups or languages, possibly due to \nnon-representative training data, that result in discrimination, ampliļ¬cation of biases, or \nincorrect presumptions about performance; undesired homogeneity that skews system or model \noutputs, which may be erroneous, lead to ill-founded decision-making, or amplify harmful \nbiases. \n7. Human-AI Conļ¬guration: Arrangements of or interactions between a human and an AI system \nwhich can result in the human inappropriately anthropomorphizing GAI systems or experiencing \nalgorithmic aversion, automation bias, over-reliance, or emotional entanglement with GAI \nsystems. \n8. Information Integrity: Lowered barrier to entry to generate and support the exchange and \nconsumption of content which may not distinguish fact from opinion or ļ¬ction or acknowledge \nuncertainties, or could be leveraged for large-scale dis- and mis-information campaigns. \n9. Information Security: Lowered barriers for oļ¬€ensive cyber capabilities, including via automated \ndiscovery and exploitation of vulnerabilities to ease hacking, malware, phishing, oļ¬€ensive cyber \n \n \n6 Some commenters have noted that the terms ā€œhallucinationā€ and ā€œfabricationā€ anthropomorphize GAI, which \nitself is a risk related to GAI systems as it can inappropriately attribute human characteristics to non-human \nentities. \n7 What is categorized as sensitive data or sensitive PII can be highly contextual based on the nature of the \ninformation, but examples of sensitive information include information that relates to an information subjectā€™s \nmost intimate sphere, including political opinions, sex life, or criminal convictions. \n8 The notion of harm presumes some baseline scenario that the harmful factor (e.g., a GAI model) makes worse. \nWhen the mechanism for potential harm is a disparity between groups, it can be diļ¬ƒcult to establish what the \nmost appropriate baseline is to compare against, which can result in divergent views on when a disparity between \nAI behaviors for diļ¬€erent subgroups constitutes a harm. In discussing harms from disparities such as biased \nbehavior, this document highlights examples where someoneā€™s situation is worsened relative to what it would have \nbeen in the absence of any AI system, making the outcome unambiguously a harm of the system. \n', ' \n5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a systemā€™s availability or the conļ¬dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across \nthe AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scientiļ¬c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational \nlikelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) ā€“ highly specialized AI systems trained on \nscientiļ¬c data that aid in chemical and biological design ā€“ may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \neļ¬ƒcacious, including for beneļ¬cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable \n']",Confabulation in the context of CBRN information or capabilities refers to the production of confidently stated but erroneous or false content that may mislead or deceive users regarding the access to or synthesis of nefarious information or design capabilities related to CBRN weapons or other dangerous materials.,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 7, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 8, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What insights did OSTP seek from the biometric tech RFI, and who provided feedback?","['APPENDIX\nSummaries of Additional Engagements: \nā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of\nartificial intelligence and other data-driven technologies in their lives.\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The\npurpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or\nplanned use; the domains in which these technologies are being used; the entities making use of them; current\nprinciples, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their\nuse or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below\nlisted organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium \nat Computing Research Association \nConnected Health Initiative \nConsumer Technology Association \nCourtney Radsch \nCoworker \nCyber Farm Labs \nData & Society Research Institute \nData for Black Lives \nData to Actionable Knowledge Lab \nat Harvard University \nDeloitte \nDev Technology Group \nDigital Therapeutics Alliance \nDigital Welfare State & Human \nRights Project and Center for \nHuman Rights and Global Justice at \nNew York University School of \nLaw, and Temple University \nInstitute for Law, Innovation & \nTechnology \nDignari \nDouglas Goddard \nEdgar Dworsky \nElectronic Frontier Foundation \nElectronic Privacy Information \nCenter, Center for Digital \nDemocracy, and Consumer \nFederation of America \nFaceTec \nFight for the Future \nGanesh Mani \nGeorgia Tech Research Institute \nGoogle \nHealth Information Technology \nResearch and Development \nInteragency Working Group \nHireVue \nHR Policy Association \nID.me \nIdentity and Data Sciences \nLaboratory at Science Applications \nInternational Corporation \nInformation Technology and \nInnovation Foundation \nInformation Technology Industry \nCouncil \nInnocence Project \nInstitute for Human-Centered \nArtificial Intelligence at Stanford \nUniversity \nIntegrated Justice Information \nSystems Institute \nInternational Association of Chiefs \nof Police \nInternational Biometrics + Identity \nAssociation \nInternational Business Machines \nCorporation \nInternational Committee of the Red \nCross \nInventionphysics \niProov \nJacob Boudreau \nJennifer K. Wagner, Dan Berger, \nMargaret Hu, and Sara Katsanis \nJonathan Barry-Blocker \nJoseph Turow \nJoy Buolamwini \nJoy Mack \nKaren Bureau \nLamont Gholston \nLawyersā€™ Committee for Civil \nRights Under Law \n60\n', 'APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation \nStephanie Dinkins and the Future \nHistories Studio at Stony Brook \nUniversity \nTechNet \nThe Alliance for Media Arts and \nCulture, MIT Open Documentary \nLab and Co-Creation Studio, and \nImmerse \nThe International Brotherhood of \nTeamsters \nThe Leadership Conference on \nCivil and Human Rights \nThorn \nU.S. Chamber of Commerceā€™s \nTechnology Engagement Center \nUber Technologies \nUniversity of Pittsburgh \nUndergraduate Student \nCollaborative \nUpturn \nUS Technology Policy Committee \nof the Association of Computing \nMachinery \nVirginia Puccio \nVisar Berisha and Julie Liss \nXR Association \nXR Safety Initiative \nā€¢ As an additional effort to reach out to stakeholders regarding the RFI, OSTP conducted two listening sessions\nfor members of the public. The listening sessions together drew upwards of 300 participants. The Science and\nTechnology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61\n']","OSTP sought insights on the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. Feedback was provided by 130 organizations and individuals, including Accenture, ACLU, Google, Microsoft Corporation, and many others.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 59, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 60, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What causes model collapse in AI, especially with synthetic data and biases?","[' \n9 \nand reduced content diversity). Overly homogenized outputs can themselves be incorrect, or they may \nlead to unreliable decision-making or amplify harmful biases. These phenomena can ļ¬‚ow from \nfoundation models to downstream models and systems, with the foundation models acting as \nā€œbottlenecks,ā€ or single points of failure. \nOverly homogenized content can contribute to ā€œmodel collapse.ā€ Model collapse can occur when model \ntraining over-relies on synthetic data, resulting in data points disappearing from the distribution of the \nnew modelā€™s outputs. In addition to threatening the robustness of the model overall, model collapse \ncould lead to homogenized outputs, including by amplifying any homogenization from the model used to \ngenerate the synthetic training data. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Valid and Reliable \n2.7. Human-AI Conļ¬guration \nGAI system use can involve varying risks of misconļ¬gurations and poor interactions between a system \nand a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Valid and Reliable \n2.8. Information Integrity \nInformation integrity describes the ā€œspectrum of information and associated patterns of its creation, \nexchange, and consumption in society.ā€ High-integrity information can be trusted; ā€œdistinguishes fact \nfrom ļ¬ction, opinion, and inference; acknowledges uncertainties; and is transparent about its level of \nvetting. This information can be linked to the original source(s) with appropriate evidence. High-integrity \ninformation is also accurate and reliable, can be veriļ¬ed and authenticated, has a clear chain of custody, \nand creates reasonable expectations about when its validity may expire.ā€11 \n \n \n11 This deļ¬nition of information integrity is derived from the 2022 White House Roadmap for Researchers on \nPriorities Related to Information Integrity Research and Development. \n', ' \n8 \nTrustworthy AI Characteristics: Accountable and Transparent, Privacy Enhanced, Safe, Secure and \nResilient \n2.5. Environmental Impacts \nTraining, maintaining, and operating (running inference on) GAI systems are resource-intensive activities, \nwith potentially large energy and environmental footprints. Energy and carbon emissions vary based on \nwhat is being done with the GAI model (i.e., pre-training, ļ¬ne-tuning, inference), the modality of the \ncontent, hardware used, and type of task or application. \nCurrent estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-\ntrip ļ¬‚ights between San Francisco and New York. In a study comparing energy consumption and carbon \nemissions for LLM inference, generative tasks (e.g., text summarization) were found to be more energy- \nand carbon-intensive than discriminative or non-generative tasks (e.g., text classiļ¬cation). \nMethods for creating smaller versions of trained models, such as model distillation or compression, \ncould reduce environmental impacts at inference time, but training and tuning such models may still \ncontribute to their environmental impacts. Currently there is no agreed upon method to estimate \nenvironmental impacts from GAI. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe \n2.6. Harmful Bias and Homogenization \nBias exists in many forms and can become ingrained in automated systems. AI systems, including GAI \nsystems, can increase the speed and scale at which harmful biases manifest and are acted upon, \npotentially perpetuating and amplifying harms to individuals, groups, communities, organizations, and \nsociety. For example, when prompted to generate images of CEOs, doctors, lawyers, and judges, current \ntext-to-image models underrepresent women and/or racial minorities, and people with disabilities. \nImage generator models have also produced biased or stereotyped output for various demographic \ngroups and have diļ¬ƒculty producing non-stereotyped content even when the prompt speciļ¬cally \nrequests image features that are inconsistent with the stereotypes. Harmful bias in GAI models, which \nmay stem from their training data, can also cause representational harms or perpetuate or exacerbate \nbias based on race, gender, disability, or other protected classes. \nHarmful bias in GAI systems can also lead to harms via disparities between how a model performs for \ndiļ¬€erent subgroups or languages (e.g., an LLM may perform less well for non-English languages or \ncertain dialects). Such disparities can contribute to discriminatory decision-making or ampliļ¬cation of \nexisting societal biases. In addition, GAI systems may be inappropriately trusted to perform similarly \nacross all subgroups, which could leave the groups facing underperformance with worse outcomes than \nif no GAI system were used. Disparate or reduced performance for lower-resource languages also \npresents challenges to model adoption, inclusion, and accessibility, and may make preservation of \nendangered languages more diļ¬ƒcult if GAI systems become embedded in everyday processes that would \notherwise have been opportunities to use these languages. \nBias is mutually reinforcing with the problem of undesired homogenization, in which GAI systems \nproduce skewed distributions of outputs that are overly uniform (for example, repetitive aesthetic styles \n']","Model collapse in AI can occur when model training over-relies on synthetic data, leading to data points disappearing from the distribution of the new model's outputs. This threatens the robustness of the model overall and can result in homogenized outputs, amplifying any homogenization from the model used to generate the synthetic training data.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 12, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 11, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What standards should automated systems follow for safety and fairness, and how to assess them?","[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n']","Automated systems should follow standards that include independent evaluation, regular reporting, and protections against algorithmic discrimination. They should be designed to allow independent evaluators access to assess safety and effectiveness, with regular updates on system performance, data usage, risk management, and independent evaluations. Additionally, entities should conduct algorithmic impact assessments to evaluate potential discrimination and ensure transparency in reporting these assessments.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What strategies help with privacy and IP risks in AI content?,"[' \n26 \nMAP 4.1: Approaches for mapping AI technology and legal risks of its components ā€“ including the use of third-party data or \nsoftware ā€“ are in place, followed, and documented, as are risks of infringement of a third-partyā€™s intellectual property or other \nrights. \nAction ID \nSuggested Action \nGAI Risks \nMP-4.1-001 Conduct periodic monitoring of AI-generated content for privacy risks; address any \npossible instances of PII or sensitive data exposure. \nData Privacy \nMP-4.1-002 Implement processes for responding to potential intellectual property infringement \nclaims or other rights. \nIntellectual Property \nMP-4.1-003 \nConnect new GAI policies, procedures, and processes to existing model, data, \nsoftware development, and IT governance and to legal, compliance, and risk \nmanagement activities. \nInformation Security; Data Privacy \nMP-4.1-004 Document training data curation policies, to the extent possible and according to \napplicable laws and policies. \nIntellectual Property; Data Privacy; \nObscene, Degrading, and/or \nAbusive Content \nMP-4.1-005 \nEstablish policies for collection, retention, and minimum quality of data, in \nconsideration of the following risks: Disclosure of inappropriate CBRN information; \nUse of Illegal or dangerous content; Oļ¬€ensive cyber capabilities; Training data \nimbalances that could give rise to harmful biases; Leak of personally identiļ¬able \ninformation, including facial likenesses of individuals. \nCBRN Information or Capabilities; \nIntellectual Property; Information \nSecurity; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-006 Implement policies and practices deļ¬ning how third-party intellectual property and \ntraining data will be used, stored, and protected. \nIntellectual Property; Value Chain \nand Component Integration \nMP-4.1-007 Re-evaluate models that were ļ¬ne-tuned or enhanced on top of third-party \nmodels. \nValue Chain and Component \nIntegration \nMP-4.1-008 \nRe-evaluate risks when adapting GAI models to new domains. Additionally, \nestablish warning systems to determine if a GAI system is being used in a new \ndomain where previous assumptions (relating to context of use or mapped risks \nsuch as security, and safety) may no longer hold. \nCBRN Information or Capabilities; \nIntellectual Property; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-009 Leverage approaches to detect the presence of PII or sensitive data in generated \noutput text, image, video, or audio. \nData Privacy \n', "" \n27 \nMP-4.1-010 \nConduct appropriate diligence on training data use to assess intellectual property, \nand privacy, risks, including to examine whether use of proprietary or sensitive \ntraining data is consistent with applicable laws. \nIntellectual Property; Data Privacy \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n \nMAP 5.1: Likelihood and magnitude of each identiļ¬ed impact (both potentially beneļ¬cial and harmful) based on expected use, past \nuses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \nthe AI system, or other data are identiļ¬ed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \ndata generation capabilities for potential misuse or vulnerabilities. \nInformation Integrity; Information \nSecurity \nMP-5.1-002 \nIdentify potential content provenance harms of GAI, such as misinformation or \ndisinformation, deepfakes, including NCII, or tampered content. Enumerate and \nrank risks based on their likelihood and potential impact, and determine how well \nprovenance solutions address speciļ¬c risks and/or harms. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMP-5.1-003 \nConsider disclosing use of GAI to end users in relevant contexts, while considering \nthe objective of disclosure, the context of use, the likelihood and magnitude of the \nrisk posed, the audience of the disclosure, as well as the frequency of the \ndisclosures. \nHuman-AI Conļ¬guration \nMP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \nestimates. \nInformation Integrity; CBRN \nInformation or Capabilities; \nDangerous, Violent, or Hateful \nContent; Harmful Bias and \nHomogenization \nMP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \nidentify anomalous or unforeseen failure modes. \nInformation Security \nMP-5.1-006 \nProļ¬le threats and negative impacts arising from GAI systems interacting with, \nmanipulating, or generating content, and outlining known and potential \nvulnerabilities and the likelihood of their occurrence. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Design, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, End-\nUsers, Operation and Monitoring \n \n""]","Strategies to help with privacy and intellectual property (IP) risks in AI content include conducting periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, documenting training data curation policies, establishing policies for collection and retention of data, and conducting appropriate diligence on training data use to assess intellectual property and privacy risks.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 29, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How does risk documentation aid compliance and governance in GAI systems, especially with external feedback?","[' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n', ' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']","The context does not provide specific information on how risk documentation aids compliance and governance in GAI systems, particularly regarding external feedback.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How does testing ensure the safety of automated systems before deployment, especially regarding community input and risk?","[' \n \n \nSAFE AND EFFECTIVE SYSTEMS \nYou should be protected from unsafe or ineffective sys\xad\ntems. Automated systems should be developed with consultation \nfrom diverse communities, stakeholders, and domain experts to iden\xad\ntify concerns, risks, and potential impacts of the system. Systems \nshould undergo pre-deployment testing, risk identification and miti\xad\ngation, and ongoing monitoring that demonstrate they are safe and \neffective based on their intended use, mitigation of unsafe outcomes \nincluding those beyond the intended use, and adherence to do\xad\nmain-specific standards. Outcomes of these protective measures \nshould include the possibility of not deploying the system or remov\xad\ning a system from use. Automated systems should not be designed \nwith an intent or reasonably foreseeable possibility of endangering \nyour safety or the safety of your community. They should be designed \nto proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15\n', ' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Testing ensures the safety of automated systems before deployment by requiring extensive testing that follows domain-specific best practices, taking into account the specific technology used and the roles of human operators. This testing should mirror real-world conditions and include both automated and human-led testing. Additionally, community input is gathered through consultation during the design and implementation phases, allowing for the identification and mitigation of potential risks that may impact rights and access, particularly for affected communities. Concerns raised during this consultation should be documented and considered in the development process, ensuring that the system is safe and effective based on community feedback.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 14, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What standards should automated systems follow for safety and fairness, and how to assess them?","[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n']","Automated systems should follow standards that include independent evaluation to ensure safety and effectiveness, regular reporting on system performance and data usage, and protections against algorithmic discrimination. Assessments should involve algorithmic impact assessments that detail consultation results, equity assessments, and any disparities, with findings made public whenever possible.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What federal steps are being taken to tackle algorithmic bias in mortgage lending for communities of color?,"["" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe federal government is working to combat discrimination in mortgage lending. The Depart\xad\nment of Justice has launched a nationwide initiative to combat redlining, which includes reviewing how \nlenders who may be avoiding serving communities of color are conducting targeted marketing and advertising.51 \nThis initiative will draw upon strong partnerships across federal agencies, including the Consumer Financial \nProtection Bureau and prudential regulators. The Action Plan to Advance Property Appraisal and Valuation \nEquity includes a commitment from the agencies that oversee mortgage lending to include a \nnondiscrimination standard in the proposed rules for Automated Valuation Models.52\nThe Equal Employment Opportunity Commission and the Department of Justice have clearly \nlaid out how employersā€™ use of AI and other automated systems can result in \ndiscrimination against job applicants and employees with disabilities.53 The documents explain \nhow employersā€™ use of software that relies on algorithmic decision-making may violate existing requirements \nunder Title I of the Americans with Disabilities Act (ā€œADAā€). This technical assistance also provides practical \ntips to employers on how to comply with the ADA, and to job applicants and employees who think that their \nrights may have been violated. \nDisparity assessments identified harms to Black patients' healthcare access. A widely \nused healthcare algorithm relied on the cost of each patientā€™s past medical care to predict future medical needs, \nrecommending early interventions for the patients deemed most at risk. This process discriminated \nagainst Black patients, who generally have less access to medical care and therefore have generated less cost \nthan white patients with similar illness and need. A landmark study documented this pattern and proposed \npractical ways that were shown to reduce this bias, such as focusing specifically on active chronic health \nconditions or avoidable future costs related to emergency visits and hospitalization.54 \nLarge employers have developed best practices to scrutinize the data and models used \nfor hiring. An industry initiative has developed Algorithmic Bias Safeguards for the Workforce, a structured \nquestionnaire that businesses can use proactively when procuring software to evaluate workers. It covers \nspecific technical questions such as the training data used, model training process, biases identified, and \nmitigation steps employed.55 \nStandards organizations have developed guidelines to incorporate accessibility criteria \ninto technology design processes. The most prevalent in the United States is the Access Boardā€™s Section \n508 regulations,56 which are the technical standards for federal information communication technology (software, \nhardware, and web). Other standards include those issued by the International Organization for \nStandardization,57 and the World Wide Web Consortium Web Content Accessibility Guidelines,58 a globally \nrecognized voluntary consensus standard for web content and other information and communications \ntechnology. \nNIST has released Special Publication 1270, Towards a Standard for Identifying and Managing Bias \nin Artificial Intelligence.59 The special publication: describes the stakes and challenges of bias in artificial \nintelligence and provides examples of how and why it can chip away at public trust; identifies three categories \nof bias in AI ā€“ systemic, statistical, and human ā€“ and describes how and where they contribute to harms; and \ndescribes three broad challenges for mitigating bias ā€“ datasets, testing and evaluation, and human factors ā€“ and \nintroduces preliminary guidance for addressing them. Throughout, the special publication takes a socio-\ntechnical perspective to identifying and managing AI bias. \n29\nAlgorithmic \nDiscrimination \nProtections \n"", "" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n""]","The federal government is working to combat discrimination in mortgage lending through initiatives such as the Department of Justice's nationwide initiative to combat redlining. This includes reviewing how lenders may be avoiding serving communities of color and conducting targeted marketing and advertising. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from agencies overseeing mortgage lending to include a nondiscrimination standard in proposed rules for Automated Valuation Models.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 28, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What strategies can reduce bias in GAI while maintaining data accuracy?,"["" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n"", ' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What strategies help manage IP risks in GAI while ensuring transparency?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]",The context does not provide specific strategies for managing IP risks in GAI while ensuring transparency.,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do automated customer service systems meet complex needs with human oversight?,"["" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nHealthcare ā€œnavigatorsā€ help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is ā€œan individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.ā€106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \nā€œtrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal \ncourts have found that such cure procedures are constitutionally required.110 \nBallot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot. \n52\n""]","Automated customer service systems meet complex needs with human oversight by integrating automated services such as chat-bots and AI-driven call response systems, which can escalate issues to a human support team when necessary. This allows companies to provide faster customer care while ensuring that human agents are available to handle complicated requests.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 51, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does diverse data help prevent algorithmic bias in automated systems?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n']","Diverse data helps prevent algorithmic bias in automated systems by ensuring that any data used in system development or assessment is representative of local communities based on the planned deployment setting. This data should be reviewed for bias considering the historical and societal context, and it should be sufficiently robust to identify and mitigate biases and potential harms.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What factors should be considered for assessing GAI systems' socio-cultural impacts and data integrity?,"[' \n23 \nMP-1.1-002 \nDetermine and document the expected and acceptable GAI system context of \nuse in collaboration with socio-cultural and other domain experts, by assessing: \nAssumptions and limitations; Direct value to the organization; Intended \noperational environment and observed usage patterns; Potential positive and \nnegative impacts to individuals, public safety, groups, communities, \norganizations, democratic institutions, and the physical environment; Social \nnorms and expectations. \nHarmful Bias and Homogenization \nMP-1.1-003 \nDocument risk measurement plans to address identiļ¬ed risks. Plans may \ninclude, as applicable: Individual and group cognitive biases (e.g., conļ¬rmation \nbias, funding bias, groupthink) for AI Actors involved in the design, \nimplementation, and use of GAI systems; Known past GAI system incidents and \nfailure modes; In-context use and foreseeable misuse, abuse, and oļ¬€-label use; \nOver reliance on quantitative metrics and methodologies without suļ¬ƒcient \nawareness of their limitations in the context(s) of use; Standard measurement \nand structured human feedback approaches; Anticipated human-AI \nconļ¬gurations. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent \nMP-1.1-004 \nIdentify and document foreseeable illegal uses or applications of the GAI system \nthat surpass organizational risk tolerances. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Obscene, Degrading, \nand/or Abusive Content \nAI Actor Tasks: AI Deployment \n \nMAP 1.2: Interdisciplinary AI Actors, competencies, skills, and capacities for establishing context reļ¬‚ect demographic diversity and \nbroad domain and user experience expertise, and their participation is documented. Opportunities for interdisciplinary \ncollaboration are prioritized. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.2-001 \nEstablish and empower interdisciplinary teams that reļ¬‚ect a wide range of \ncapabilities, competencies, demographic groups, domain expertise, educational \nbackgrounds, lived experiences, professions, and skills across the enterprise to \ninform and conduct risk measurement and management functions. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMP-1.2-002 \nVerify that data or benchmarks used in risk measurement, and users, \nparticipants, or subjects involved in structured GAI public feedback exercises \nare representative of diverse in-context user populations. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nAI Actor Tasks: AI Deployment \n \n', ' \n29 \nMS-1.1-006 \nImplement continuous monitoring of GAI system impacts to identify whether GAI \noutputs are equitable across various sub-populations. Seek active and direct \nfeedback from aļ¬€ected communities via structured feedback mechanisms or red-\nteaming to monitor and improve outputs. \nHarmful Bias and Homogenization \nMS-1.1-007 \nEvaluate the quality and integrity of data used in training and the provenance of \nAI-generated content, for example by employing techniques like chaos \nengineering and seeking stakeholder feedback. \nInformation Integrity \nMS-1.1-008 \nDeļ¬ne use cases, contexts of use, capabilities, and negative impacts where \nstructured human feedback exercises, e.g., GAI red-teaming, would be most \nbeneļ¬cial for GAI risk measurement and management based on the context of \nuse. \nHarmful Bias and \nHomogenization; CBRN \nInformation or Capabilities \nMS-1.1-009 \nTrack and document risks or opportunities related to all GAI risks that cannot be \nmeasured quantitatively, including explanations as to why some risks cannot be \nmeasured (e.g., due to technological limitations, resource constraints, or \ntrustworthy considerations). Include unmeasured risks in marginal risks. \nInformation Integrity \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMEASURE 1.3: Internal experts who did not serve as front-line developers for the system and/or independent assessors are \ninvolved in regular assessments and updates. Domain experts, users, AI Actors external to the team that developed or deployed the \nAI system, and aļ¬€ected communities are consulted in support of assessments as necessary per organizational risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.3-001 \nDeļ¬ne relevant groups of interest (e.g., demographic groups, subject matter \nexperts, experience with GAI technology) within the context of use as part of \nplans for gathering structured public feedback. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-002 \nEngage in internal and external evaluations, GAI red-teaming, impact \nassessments, or other structured human feedback exercises in consultation \nwith representative AI Actors with expertise and familiarity in the context of \nuse, and/or who are representative of the populations associated with the \ncontext of use. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-003 \nVerify those conducting structured human feedback exercises are not directly \ninvolved in system development tasks for the same GAI model. \nHuman-AI Conļ¬guration; Data \nPrivacy \nAI Actor Tasks: AI Deployment, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, \nEnd-Users, Operation and Monitoring, TEVV \n \n']","Factors to consider for assessing GAI systems' socio-cultural impacts include assumptions and limitations, direct value to the organization, intended operational environment, observed usage patterns, potential positive and negative impacts to individuals and communities, and social norms and expectations. For data integrity, factors include evaluating the quality and integrity of data used in training, the provenance of AI-generated content, and ensuring that data or benchmarks used in risk measurement are representative of diverse in-context user populations.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 26, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 32, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What risks come from human use of generative AI, both emotionally and socially?","[' \n3 \nthe abuse, misuse, and unsafe repurposing by humans (adversarial or not), and others result \nfrom interactions between a human and an AI system. \nā€¢ \nTime scale: GAI risks may materialize abruptly or across extended periods. Examples include \nimmediate (and/or prolonged) emotional harm and potential risks to physical safety due to the \ndistribution of harmful deepfake images, or the long-term eļ¬€ect of disinformation on societal \ntrust in public institutions. \nThe presence of risks and where they fall along the dimensions above will vary depending on the \ncharacteristics of the GAI model, system, or use case at hand. These characteristics include but are not \nlimited to GAI model or system architecture, training mechanisms and libraries, data types used for \ntraining or ļ¬ne-tuning, levels of model access or availability of model weights, and application or use \ncase context. \nOrganizations may choose to tailor how they measure GAI risks based on these characteristics. They may \nadditionally wish to allocate risk management resources relative to the severity and likelihood of \nnegative impacts, including where and how these risks manifest, and their direct and material impacts \nharms in the context of GAI use. Mitigations for model or system level risks may diļ¬€er from mitigations \nfor use-case or ecosystem level risks. \nImportantly, some GAI risks are unknown, and are therefore diļ¬ƒcult to properly scope or evaluate given \nthe uncertainty about potential GAI scale, complexity, and capabilities. Other risks may be known but \ndiļ¬ƒcult to estimate given the wide range of GAI stakeholders, uses, inputs, and outputs. Challenges with \nrisk estimation are aggravated by a lack of visibility into GAI training data, and the generally immature \nstate of the science of AI measurement and safety today. This document focuses on risks for which there \nis an existing empirical evidence base at the time this proļ¬le was written; for example, speculative risks \nthat may potentially arise in more advanced, future GAI systems are not considered. Future updates may \nincorporate additional risks or provide further details on the risks identiļ¬ed below. \nTo guide organizations in identifying and managing GAI risks, a set of risks unique to or exacerbated by \nthe development and use of GAI are deļ¬ned below.5 Each risk is labeled according to the outcome, \nobject, or source of the risk (i.e., some are risks ā€œtoā€ a subject or domain and others are risks ā€œofā€ or \nā€œfromā€ an issue or theme). These risks provide a lens through which organizations can frame and execute \nrisk management eļ¬€orts. To help streamline risk management eļ¬€orts, each risk is mapped in Section 3 \n(as well as in tables in Appendix B) to relevant Trustworthy AI Characteristics identiļ¬ed in the AI RMF. \n \n \n5 These risks can be further categorized by organizations depending on their unique approaches to risk deļ¬nition \nand management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories. \n', ' \n2 \nThis work was informed by public feedback and consultations with diverse stakeholder groups as part of NISTā€™s \nGenerative AI Public Working Group (GAI PWG). The GAI PWG was an open, transparent, and collaborative \nprocess, facilitated via a virtual workspace, to obtain multistakeholder input on GAI risk management and to \ninform NISTā€™s approach. \nThe focus of the GAI PWG was limited to four primary considerations relevant to GAI: Governance, Content \nProvenance, Pre-deployment Testing, and Incident Disclosure (further described in Appendix A). As such, the \nsuggested actions in this document primarily address these considerations. \nFuture revisions of this proļ¬le will include additional AI RMF subcategories, risks, and suggested actions based \non additional considerations of GAI as the space evolves and empirical evidence indicates additional risks. A \nglossary of terms pertinent to GAI risk management will be developed and hosted on NISTā€™s Trustworthy & \nResponsible AI Resource Center (AIRC), and added to The Language of Trustworthy AI: An In-Depth Glossary of \nTerms. \nThis document was also informed by public comments and consultations from several Requests for Information. \n \n2. \nOverview of Risks Unique to or Exacerbated by GAI \nIn the context of the AI RMF, risk refers to the composite measure of an eventā€™s probability (or \nlikelihood) of occurring and the magnitude or degree of the consequences of the corresponding event. \nSome risks can be assessed as likely to materialize in a given context, particularly those that have been \nempirically demonstrated in similar contexts. Other risks may be unlikely to materialize in a given \ncontext, or may be more speculative and therefore uncertain. \nAI risks can diļ¬€er from or intensify traditional software risks. Likewise, GAI can exacerbate existing AI \nrisks, and creates unique risks. GAI risks can vary along many dimensions: \nā€¢ \nStage of the AI lifecycle: Risks can arise during design, development, deployment, operation, \nand/or decommissioning. \nā€¢ \nScope: Risks may exist at individual model or system levels, at the application or implementation \nlevels (i.e., for a speciļ¬c use case), or at the ecosystem level ā€“ that is, beyond a single system or \norganizational context. Examples of the latter include the expansion of ā€œalgorithmic \nmonocultures,3ā€ resulting from repeated use of the same model, or impacts on access to \nopportunity, labor markets, and the creative economies.4 \nā€¢ \nSource of risk: Risks may emerge from factors related to the design, training, or operation of the \nGAI model itself, stemming in some cases from GAI model or system inputs, and in other cases, \nfrom GAI system outputs. Many GAI risks, however, originate from human behavior, including \n \n \n3 ā€œAlgorithmic monoculturesā€ refers to the phenomenon in which repeated use of the same model or algorithm in \nconsequential decision-making settings like employment and lending can result in increased susceptibility by \nsystems to correlated failures (like unexpected shocks), due to multiple actors relying on the same algorithm. \n4 Many studies have projected the impact of AI on the workforce and labor markets. Fewer studies have examined \nthe impact of GAI on the labor market, though some industry surveys indicate that that both employees and \nemployers are pondering this disruption. \n']","The risks that come from human use of generative AI (GAI) include immediate and prolonged emotional harm, potential risks to physical safety due to the distribution of harmful deepfake images, and the long-term effect of disinformation on societal trust in public institutions.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 6, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 5, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What problems does AI nudification tech address, and how do they connect to wider concerns about automated harm?","[' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAI-enabled ā€œnudificationā€ technology that creates images where people appear to be nudeā€”including apps that\nenable non-technical users to create or alter images of individuals without their consentā€”has proliferated at an\nalarming rate. Such technology is becoming a common form of image-based abuse that disproportionately\nimpacts women. As these tools become more sophisticated, they are producing altered images that are increasing\xad\nly realistic and are difficult for both humans and AI to detect as inauthentic. Regardless of authenticity, the expe\xad\nrience of harm to victims of non-consensual intimate images can be devastatingly realā€”affecting their personal\nand professional lives, and impacting their mental and physical health.10\nā€¢\nA company installed AI-powered cameras in its delivery vans in order to evaluate the road safety habits of its driv\xad\ners, but the system incorrectly penalized drivers when other cars cut them off or when other events beyond\ntheir control took place on the road. As a result, drivers were incorrectly ineligible to receive a bonus.11\n17\n', ' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nWhile technologies are being deployed to solve problems across a wide array of issues, our reliance on technology can \nalso lead to its use in situations where it has not yet been proven to workā€”either at all or within an acceptable range \nof error. In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. \nAutomated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informa\xad\ntion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposeful\xad\nly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \nor unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad\nvators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \nfrom unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consis\xad\ntently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harm\xad\nful outcomes. \nā€¢\nA proprietary model was developed to predict the likelihood of sepsis in hospitalized patients and was imple\xad\nmented at hundreds of hospitals around the country. An independent study showed that the model predictions\nunderperformed relative to the designerā€™s claims while also causing ā€˜alert fatigueā€™ by falsely alerting\nlikelihood of sepsis.6\nā€¢\nOn social media, Black people who quote and criticize racist messages have had their own speech silenced when\na platformā€™s automated moderation system failed to distinguish this ā€œcounter speechā€ (or other critique\nand journalism) from the original hateful messages to which such speech responded.7\nā€¢\nA device originally developed to help people track and find lost items has been used as a tool by stalkers to track\nvictimsā€™ locations in violation of their privacy and safety. The device manufacturer took steps after release to\nprotect people from unwanted tracking by alerting people on their phones when a device is found to be moving\nwith them over time and also by having the device make an occasional noise, but not all phones are able\nto receive the notification and the devices remain a safety concern due to their misuse.8 \nā€¢\nAn algorithm used to deploy police was found to repeatedly send police to neighborhoods they regularly visit,\neven if those neighborhoods were not the ones with the highest crime rates. These incorrect crime predictions\nwere the result of a feedback loop generated from the reuse of data from previous arrests and algorithm\npredictions.9\n16\n']","AI nudification technology addresses the problem of creating non-consensual intimate images that can lead to image-based abuse, particularly impacting women. This technology raises wider concerns about automated harm as it exemplifies how advanced tools can be misused, leading to devastating effects on victims' personal and professional lives, as well as their mental and physical health. Additionally, the reliance on automated systems can result in unintended consequences, such as incorrect penalization of drivers or biased decision-making based on flawed historical data, highlighting the need for safeguards and ethical reviews in technology deployment.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 16, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 15, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What strategies ensure safe, fair automated systems for underserved communities?","[' AI BILL OF RIGHTS\nFFECTIVE SYSTEMS\nineffective systems. Automated systems should be \ncommunities, stakeholders, and domain experts to identify \nSystems should undergo pre-deployment testing, risk \nthat demonstrate they are safe and effective based on \nincluding those beyond the intended use, and adherence to \nprotective measures should include the possibility of not \nAutomated systems should not be designed with an intent \nreasonably foreseeable possibility of endangering your safety or the safety of your community. They should \nstemming from unintended, yet foreseeable, uses or \n \n \n \n \n \n \n \nSECTION TITLE\nBLUEPRINT FOR AN\nSAFE AND E \nYou should be protected from unsafe or \ndeveloped with consultation from diverse \nconcerns, risks, and potential impacts of the system. \nidentification and mitigation, and ongoing monitoring \ntheir intended use, mitigation of unsafe outcomes \ndomain-specific standards. Outcomes of these \ndeploying the system or removing a system from use. \nor \nbe designed to proactively protect you from harms \nimpacts of automated systems. You should be protected from inappropriate or irrelevant data use in the \ndesign, development, and deployment of automated systems, and from the compounded harm of its reuse. \nIndependent evaluation and reporting that confirms that the system is safe and effective, including reporting of \nsteps taken to mitigate potential harms, should be performed and the results made public whenever possible. \nALGORITHMIC DISCRIMINATION PROTECTIONS\nYou should not face discrimination by algorithms and systems should be used and designed in \nan equitable way. Algorithmic discrimination occurs when automated systems contribute to unjustified \ndifferent treatment or impacts disfavoring people based on their race, color, ethnicity, sex (including \npregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other \nclassification protected by law. Depending on the specific circumstances, such algorithmic discrimination \nmay violate legal protections. Designers, developers, and deployers of automated systems should take \nproactive \nand \ncontinuous \nmeasures \nto \nprotect \nindividuals \nand \ncommunities \nfrom algorithmic \ndiscrimination and to use and design systems in an equitable way. This protection should include proactive \nequity assessments as part of the system design, use of representative data and protection against proxies \nfor demographic features, ensuring accessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent \nevaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5\n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Strategies to ensure safe and fair automated systems for underserved communities include conducting proactive equity assessments during the design phase, using representative and robust data, guarding against proxies that may lead to algorithmic discrimination, and implementing ongoing monitoring and evaluation to confirm protections against algorithmic discrimination. These strategies aim to identify potential discrimination and effects on equity, ensuring that the systems are designed and deployed in an equitable manner.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 4, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What's the role of oversight and feedback in managing GAI risks and communicating their societal effects?,"[' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n', ' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']","Oversight and feedback play a crucial role in managing GAI risks by ensuring that organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from external sources regarding the potential individual and societal impacts related to AI risks. This includes establishing oversight functions across the GAI lifecycle and documenting the risks and potential impacts of the AI technology, which facilitates broader communication about these impacts.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +Which framework aims to boost AI trustworthiness while upholding civil rights and privacy laws?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n', 'SECTION TITLE\n \n \n \n \n \n \nApplying The Blueprint for an AI Bill of Rights \nRELATIONSHIP TO EXISTING LAW AND POLICY\nThere are regulatory safety requirements for medical devices, as well as sector-, population-, or technology-spe\xad\ncific privacy and security protections. Ensuring some of the additional protections proposed in this framework \nwould require new laws to be enacted or new policies and practices to be adopted. In some cases, exceptions to \nthe principles described in the Blueprint for an AI Bill of Rights may be necessary to comply with existing law, \nconform to the practicalities of a specific use case, or balance competing public interests. In particular, law \nenforcement, and other regulatory contexts may require government actors to protect civil rights, civil liberties, \nand privacy in a manner consistent with, but using alternate mechanisms to, the specific principles discussed in \nthis framework. The Blueprint for an AI Bill of Rights is meant to assist governments and the private sector in \nmoving principles into practice. \nThe expectations given in the Technical Companion are meant to serve as a blueprint for the development of \nadditional technical standards and practices that should be tailored for particular sectors and contexts. While \nexisting laws informed the development of the Blueprint for an AI Bill of Rights, this framework does not detail \nthose laws beyond providing them as examples, where appropriate, of existing protective measures. This \nframework instead shares a broad, forward-leaning vision of recommended principles for automated system \ndevelopment and use to inform private and public involvement with these systems where they have the poten\xad\ntial to meaningfully impact rights, opportunities, or access. Additionally, this framework does not analyze or \ntake a position on legislative and regulatory proposals in municipal, state, and federal government, or those in \nother countries. \nWe have seen modest progress in recent years, with some state and local governments responding to these prob\xad\nlems with legislation, and some courts extending longstanding statutory protections to new and emerging tech\xad\nnologies. There are companies working to incorporate additional protections in their design and use of auto\xad\nmated systems, and researchers developing innovative guardrails. Advocates, researchers, and government \norganizations have proposed principles for the ethical use of AI and other automated systems. These include \nthe Organization for Economic Co-operation and Developmentā€™s (OECDā€™s) 2019 Recommendation on Artificial \nIntelligence, which includes principles for responsible stewardship of trustworthy AI and which the United \nStates adopted, and Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government, which sets out principles that govern the federal governmentā€™s use of AI. The Blueprint \nfor an AI Bill of Rights is fully consistent with these principles and with the direction in Executive Order 13985 \non Advancing Racial Equity and Support for Underserved Communities Through the Federal Government. \nThese principles find kinship in the Fair Information Practice Principles (FIPPs), derived from the 1973 report \nof an advisory committee to the U.S. Department of Health, Education, and Welfare, Records, Computers, \nand the Rights of Citizens.4 While there is no single, universal articulation of the FIPPs, these core \nprinciples for managing information about individuals have been incorporated into data privacy laws and \npolicies across the globe.5 The Blueprint for an AI Bill of Rights embraces elements of the FIPPs that are \nparticularly relevant to automated systems, without articulating a specific set of FIPPs or scoping \napplicability or the interests served to a single particular domain, like privacy, civil rights and civil liberties, \nethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9\n']",The NIST AI Risk Management Framework aims to boost AI trustworthiness while upholding civil rights and privacy laws.,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 8, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What factors ensure effective oversight in automated systems for critical fields like justice and healthcare?,"[' \nSECTION TITLE\nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nYou should be able to opt out, where appropriate, and have access to a person who can quickly \nconsider and remedy problems you encounter. You should be able to opt out from automated systems in \nfavor of a human alternative, where appropriate. Appropriateness should be determined based on reasonable \nexpectations in a given context and with a focus on ensuring broad accessibility and protecting the public from \nespecially harmful impacts. In some cases, a human or other alternative may be required by law. You should have \naccess to timely human consideration and remedy by a fallback and escalation process if an automated system \nfails, it produces an error, or you would like to appeal or contest its impacts on you. Human consideration and \nfallback should be accessible, equitable, effective, maintained, accompanied by appropriate operator training, and \nshould not impose an unreasonable burden on the public. Automated systems with an intended use within sensi\xad\ntive domains, including, but not limited to, criminal justice, employment, education, and health, should additional\xad\nly be tailored to the purpose, provide meaningful access for oversight, include training for any people interacting \nwith the system, and incorporate human consideration for adverse or high-risk decisions. Reporting that includes \na description of these human governance processes and assessment of their timeliness, accessibility, outcomes, \nand effectiveness should be made public whenever possible. \nDefinitions for key terms in The Blueprint for an AI Bill of Rights can be found in Applying the Blueprint for an AI Bill of Rights. \nAccompanying analysis and tools for actualizing each principle can be found in the Technical Companion. \n7\n', 'You should be able to opt out, where appropriate, and \nhave access to a person who can quickly consider and \nremedy problems you encounter. You should be able to opt \nout from automated systems in favor of a human alternative, where \nappropriate. Appropriateness should be determined based on rea\xad\nsonable expectations in a given context and with a focus on ensuring \nbroad accessibility and protecting the public from especially harm\xad\nful impacts. In some cases, a human or other alternative may be re\xad\nquired by law. You should have access to timely human consider\xad\nation and remedy by a fallback and escalation process if an automat\xad\ned system fails, it produces an error, or you would like to appeal or \ncontest its impacts on you. Human consideration and fallback \nshould be accessible, equitable, effective, maintained, accompanied \nby appropriate operator training, and should not impose an unrea\xad\nsonable burden on the public. Automated systems with an intended \nuse within sensitive domains, including, but not limited to, criminal \njustice, employment, education, and health, should additionally be \ntailored to the purpose, provide meaningful access for oversight, \ninclude training for any people interacting with the system, and in\xad\ncorporate human consideration for adverse or high-risk decisions. \nReporting that includes a description of these human governance \nprocesses and assessment of their timeliness, accessibility, out\xad\ncomes, and effectiveness should be made public whenever possible. \nHUMAN ALTERNATIVES, CONSIDERATION\nALLBACK\nF\nAND\n, \n46\n']","Effective oversight in automated systems for critical fields like justice and healthcare is ensured by tailoring the systems to their intended purpose, providing meaningful access for oversight, including training for individuals interacting with the system, and incorporating human consideration for adverse or high-risk decisions. Additionally, reporting on human governance processes and assessing their timeliness, accessibility, outcomes, and effectiveness should be made public whenever possible.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 6, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 45, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What impact do automated systems have on rights, and how are transparency needs met by current laws?","["" \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nAutomated systems now determine opportunities, from employment to credit, and directly shape the American \npublicā€™s experiences, from the courtroom to online classrooms, in ways that profoundly impact peopleā€™s lives. But this \nexpansive impact is not always visible. An applicant might not know whether a person rejected their resume or a \nhiring algorithm moved them to the bottom of the list. A defendant in the courtroom might not know if a judge deny\xad\ning their bail is informed by an automated system that labeled them ā€œhigh risk.ā€ From correcting errors to contesting \ndecisions, people are often denied the knowledge they need to address the impact of automated systems on their lives. \nNotice and explanations also serve an important safety and efficacy purpose, allowing experts to verify the reasonable\xad\nness of a recommendation before enacting it. \nIn order to guard against potential harms, the American public needs to know if an automated system is being used. \nClear, brief, and understandable notice is a prerequisite for achieving the other protections in this framework. Like\xad\nwise, the public is often unable to ascertain how or why an automated system has made a decision or contributed to a \nparticular outcome. The decision-making processes of automated systems tend to be opaque, complex, and, therefore, \nunaccountable, whether by design or by omission. These factors can make explanations both more challenging and \nmore important, and should not be used as a pretext to avoid explaining important decisions to the people impacted \nby those choices. In the context of automated systems, clear and valid explanations should be recognized as a baseline \nrequirement. \nProviding notice has long been a standard practice, and in many cases is a legal requirement, when, for example, \nmaking a video recording of someone (outside of a law enforcement or national security context). In some cases, such \nas credit, lenders are required to provide notice and explanation to consumers. Techniques used to automate the \nprocess of explaining such systems are under active research and improvement and such explanations can take many \nforms. Innovative companies and researchers are rising to the challenge and creating and deploying explanatory \nsystems that can help the public better understand decisions that impact them. \nWhile notice and explanation requirements are already in place in some sectors or situations, the American public \ndeserve to know consistently and across sectors if an automated system is being used in a way that impacts their rights, \nopportunities, or access. This knowledge should provide confidence in how the public is being treated, and trust in the \nvalidity and reasonable use of automated systems. \nā€¢\nA lawyer representing an older client with disabilities who had been cut off from Medicaid-funded home\nhealth-care assistance couldn't determine why, especially since the decision went against historical access\npractices. In a court hearing, the lawyer learned from a witness that the state in which the older client\nlived had recently adopted a new algorithm to determine eligibility.83 The lack of a timely explanation made it\nharder to understand and contest the decision.\nā€¢\nA formal child welfare investigation is opened against a parent based on an algorithm and without the parent\never being notified that data was being collected and used as part of an algorithmic child maltreatment\nrisk assessment.84 The lack of notice or an explanation makes it harder for those performing child\nmaltreatment assessments to validate the risk assessment and denies parents knowledge that could help them\ncontest a decision.\n41\n"", ' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nNOTICE & \nEXPLANATION \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\xad\xad\xad\nPeople in Illinois are given written notice by the private sector if their biometric informa-\ntion is used. The Biometric Information Privacy Act enacted by the state contains a number of provisions \nconcerning the use of individual biometric data and identifiers. Included among them is a provision that no private \nentity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" such information about an \nindividual, unless written notice is provided to that individual or their legally appointed representative. 87\nMajor technology companies are piloting new ways to communicate with the public about \ntheir automated technologies. For example, a collection of non-profit organizations and companies have \nworked together to develop a framework that defines operational approaches to transparency for machine \nlearning systems.88 This framework, and others like it,89 inform the public about the use of these tools, going \nbeyond simple notice to include reporting elements such as safety evaluations, disparity assessments, and \nexplanations of how the systems work. \nLenders are required by federal law to notify consumers about certain decisions made about \nthem. Both the Fair Credit Reporting Act and the Equal Credit Opportunity Act require in certain circumstances \nthat consumers who are denied credit receive ""adverse action"" notices. Anyone who relies on the information in a \ncredit report to deny a consumer credit must, under the Fair Credit Reporting Act, provide an ""adverse action"" \nnotice to the consumer, which includes ""notice of the reasons a creditor took adverse action on the application \nor on an existing credit account.""90 In addition, under the risk-based pricing rule,91 lenders must either inform \nborrowers of their credit score, or else tell consumers when ""they are getting worse terms because of \ninformation in their credit report."" The CFPB has also asserted that ""[t]he law gives every applicant the right to \na specific explanation if their application for credit was denied, and that right is not diminished simply because \na company uses a complex algorithm that it doesn\'t understand.""92 Such explanations illustrate a shared value \nthat certain decisions need to be explained. \nA California law requires that warehouse employees are provided with notice and explana-\ntion about quotas, potentially facilitated by automated systems, that apply to them. Warehous-\ning employers in California that use quota systems (often facilitated by algorithmic monitoring systems) are \nrequired to provide employees with a written description of each quota that applies to the employee, including \nā€œquantified number of tasks to be performed or materials to be produced or handled, within the defined \ntime period, and any potential adverse employment action that could result from failure to meet the quota.ā€93\nAcross the federal government, agencies are conducting and supporting research on explain-\nable AI systems. The NIST is conducting fundamental research on the explainability of AI systems. A multidis-\nciplinary team of researchers aims to develop measurement methods and best practices to support the \nimplementation of core tenets of explainable AI.94 The Defense Advanced Research Projects Agency has a \nprogram on Explainable Artificial Intelligence that aims to create a suite of machine learning techniques that \nproduce more explainable models, while maintaining a high level of learning performance (prediction \naccuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45\n']","Automated systems significantly impact rights by determining opportunities in various areas such as employment and credit, often without individuals being aware of the algorithms influencing decisions. Current laws, such as the Biometric Information Privacy Act in Illinois, require written notice when biometric information is used, and federal laws like the Fair Credit Reporting Act mandate that consumers receive adverse action notices when credit is denied. These laws aim to ensure transparency and provide individuals with the knowledge necessary to contest decisions made by automated systems.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 40, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 44, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What criteria ensure human subject protection in AI evaluations?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","Human subject protection in AI evaluations is ensured through several criteria, including: 1) evaluations involving human subjects must meet applicable requirements and be representative of the relevant population; 2) options must be provided for human subjects to withdraw participation or revoke consent for the use of their data; 3) techniques such as anonymization and differential privacy should be used to minimize risks associated with linking AI-generated content back to individual human subjects; 4) documentation of how content provenance data is tracked and how it interacts with privacy and security is necessary, including the removal of personally identifiable information (PII).",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How does data provenance support ethical AI governance, especially for human protection and bias?","[' \n35 \nMEASURE 2.9: The AI model is explained, validated, and documented, and AI system output is interpreted within its context ā€“ as \nidentiļ¬ed in the MAP function ā€“ to inform responsible use and governance. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.9-001 \nApply and document ML explanation results such as: Analysis of embeddings, \nCounterfactual prompts, Gradient-based attributions, Model \ncompression/surrogate models, Occlusion/term reduction. \nConfabulation \nMS-2.9-002 \nDocument GAI model details including: Proposed use and organizational value; \nAssumptions and limitations, Data collection methodologies; Data provenance; \nData quality; Model architecture (e.g., convolutional neural network, \ntransformers, etc.); Optimization objectives; Training algorithms; RLHF \napproaches; Fine-tuning or retrieval-augmented generation approaches; \nEvaluation data; Ethical considerations; Legal and regulatory requirements. \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.10-001 \nConduct AI red-teaming to assess issues such as: Outputting of training data \nsamples, and subsequent reverse engineering, model extraction, and \nmembership inference risks; Revealing biometric, conļ¬dential, copyrighted, \nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \nTracking or revealing location information of users or members of training \ndatasets. \nHuman-AI Conļ¬guration; \nInformation Integrity; Intellectual \nProperty \nMS-2.10-002 \nEngage directly with end-users and other stakeholders to understand their \nexpectations and concerns regarding content provenance. Use this feedback to \nguide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n', ' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']","The context does not explicitly mention how data provenance supports ethical AI governance, particularly regarding human protection and bias.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What factors on data privacy and content integrity should be considered for a GAI system, especially regarding user feedback and transparency?","[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","Factors on data privacy and content integrity for a GAI system include documenting the extent to which human domain knowledge is employed to improve GAI system performance, reviewing and verifying sources and citations in GAI system outputs, tracking instances of anthropomorphization in GAI system interfaces, verifying GAI system training data and TEVV data provenance, and regularly reviewing security and safety guardrails. Additionally, structured feedback about content provenance should be recorded and integrated from operators, users, and impacted communities, and there should be an emphasis on digital content transparency regarding the societal impacts of AI and the role of diverse and inclusive content generation.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What challenges did panelists see at the tech-healthcare equity intersection?,"["" \n \n \n \n \nAPPENDIX\nā€¢\nJulia Simon-Mishel, Supervising Attorney, Philadelphia Legal Assistance\nā€¢\nDr. Zachary Mahafza, Research & Data Analyst, Southern Poverty Law Center\nā€¢\nJ. Khadijah Abdurahman, Tech Impact Network Research Fellow, AI Now Institute, UCLA C2I1, and\nUWA Law School\nPanelists separately described the increasing scope of technology use in providing for social welfare, including \nin fraud detection, digital ID systems, and other methods focused on improving efficiency and reducing cost. \nHowever, various panelists individually cautioned that these systems may reduce burden for government \nagencies by increasing the burden and agency of people using and interacting with these technologies. \nAdditionally, these systems can produce feedback loops and compounded harm, collecting data from \ncommunities and using it to reinforce inequality. Various panelists suggested that these harms could be \nmitigated by ensuring community input at the beginning of the design process, providing ways to opt out of \nthese systems and use associated human-driven mechanisms instead, ensuring timeliness of benefit payments, \nand providing clear notice about the use of these systems and clear explanations of how and what the \ntechnologies are doing. Some panelists suggested that technology should be used to help people receive \nbenefits, e.g., by pushing benefits to those in need and ensuring automated decision-making systems are only \nused to provide a positive outcome; technology shouldn't be used to take supports away from people who need \nthem. \nPanel 6: The Healthcare System. This event explored current and emerging uses of technology in the \nhealthcare system and consumer products related to health. \nWelcome:\nā€¢\nAlondra Nelson, Deputy Director for Science and Society, White House Office of Science and Technology\nPolicy\nā€¢\nPatrick Gaspard, President and CEO, Center for American Progress\nModerator: Micky Tripathi, National Coordinator for Health Information Technology, U.S Department of \nHealth and Human Services. \nPanelists: \nā€¢\nMark Schneider, Health Innovation Advisor, ChristianaCare\nā€¢\nZiad Obermeyer, Blue Cross of California Distinguished Associate Professor of Policy and Management,\nUniversity of California, Berkeley School of Public Health\nā€¢\nDorothy Roberts, George A. Weiss University Professor of Law and Sociology and the Raymond Pace and\nSadie Tanner Mossell Alexander Professor of Civil Rights, University of Pennsylvania\nā€¢\nDavid Jones, A. Bernard Ackerman Professor of the Culture of Medicine, Harvard University\nā€¢\nJamila Michener, Associate Professor of Government, Cornell University; Co-Director, Cornell Center for\nHealth Equity\xad\nPanelists discussed the impact of new technologies on health disparities; healthcare access, delivery, and \noutcomes; and areas ripe for research and policymaking. Panelists discussed the increasing importance of tech-\nnology as both a vehicle to deliver healthcare and a tool to enhance the quality of care. On the issue of \ndelivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.\n59\n""]","Panelists identified several challenges at the tech-healthcare equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense of health monitoring devices, which can exacerbate equity issues. Additionally, they discussed how racial biases and the use of race in medicine perpetuate harms and embed prior discrimination, emphasizing the need for accountability of the technologies used in medical care and the importance of hearing the voices of those subjected to these technologies.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 58, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What strategies can help reduce IP and privacy risks in AI training data?,"[' \n26 \nMAP 4.1: Approaches for mapping AI technology and legal risks of its components ā€“ including the use of third-party data or \nsoftware ā€“ are in place, followed, and documented, as are risks of infringement of a third-partyā€™s intellectual property or other \nrights. \nAction ID \nSuggested Action \nGAI Risks \nMP-4.1-001 Conduct periodic monitoring of AI-generated content for privacy risks; address any \npossible instances of PII or sensitive data exposure. \nData Privacy \nMP-4.1-002 Implement processes for responding to potential intellectual property infringement \nclaims or other rights. \nIntellectual Property \nMP-4.1-003 \nConnect new GAI policies, procedures, and processes to existing model, data, \nsoftware development, and IT governance and to legal, compliance, and risk \nmanagement activities. \nInformation Security; Data Privacy \nMP-4.1-004 Document training data curation policies, to the extent possible and according to \napplicable laws and policies. \nIntellectual Property; Data Privacy; \nObscene, Degrading, and/or \nAbusive Content \nMP-4.1-005 \nEstablish policies for collection, retention, and minimum quality of data, in \nconsideration of the following risks: Disclosure of inappropriate CBRN information; \nUse of Illegal or dangerous content; Oļ¬€ensive cyber capabilities; Training data \nimbalances that could give rise to harmful biases; Leak of personally identiļ¬able \ninformation, including facial likenesses of individuals. \nCBRN Information or Capabilities; \nIntellectual Property; Information \nSecurity; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-006 Implement policies and practices deļ¬ning how third-party intellectual property and \ntraining data will be used, stored, and protected. \nIntellectual Property; Value Chain \nand Component Integration \nMP-4.1-007 Re-evaluate models that were ļ¬ne-tuned or enhanced on top of third-party \nmodels. \nValue Chain and Component \nIntegration \nMP-4.1-008 \nRe-evaluate risks when adapting GAI models to new domains. Additionally, \nestablish warning systems to determine if a GAI system is being used in a new \ndomain where previous assumptions (relating to context of use or mapped risks \nsuch as security, and safety) may no longer hold. \nCBRN Information or Capabilities; \nIntellectual Property; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-009 Leverage approaches to detect the presence of PII or sensitive data in generated \noutput text, image, video, or audio. \nData Privacy \n', "" \n27 \nMP-4.1-010 \nConduct appropriate diligence on training data use to assess intellectual property, \nand privacy, risks, including to examine whether use of proprietary or sensitive \ntraining data is consistent with applicable laws. \nIntellectual Property; Data Privacy \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n \nMAP 5.1: Likelihood and magnitude of each identiļ¬ed impact (both potentially beneļ¬cial and harmful) based on expected use, past \nuses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \nthe AI system, or other data are identiļ¬ed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \ndata generation capabilities for potential misuse or vulnerabilities. \nInformation Integrity; Information \nSecurity \nMP-5.1-002 \nIdentify potential content provenance harms of GAI, such as misinformation or \ndisinformation, deepfakes, including NCII, or tampered content. Enumerate and \nrank risks based on their likelihood and potential impact, and determine how well \nprovenance solutions address speciļ¬c risks and/or harms. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMP-5.1-003 \nConsider disclosing use of GAI to end users in relevant contexts, while considering \nthe objective of disclosure, the context of use, the likelihood and magnitude of the \nrisk posed, the audience of the disclosure, as well as the frequency of the \ndisclosures. \nHuman-AI Conļ¬guration \nMP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \nestimates. \nInformation Integrity; CBRN \nInformation or Capabilities; \nDangerous, Violent, or Hateful \nContent; Harmful Bias and \nHomogenization \nMP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \nidentify anomalous or unforeseen failure modes. \nInformation Security \nMP-5.1-006 \nProļ¬le threats and negative impacts arising from GAI systems interacting with, \nmanipulating, or generating content, and outlining known and potential \nvulnerabilities and the likelihood of their occurrence. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Design, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, End-\nUsers, Operation and Monitoring \n \n""]","Strategies to reduce IP and privacy risks in AI training data include conducting periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, documenting training data curation policies, establishing policies for collection and retention of data, and conducting appropriate diligence on training data use to assess intellectual property and privacy risks.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 29, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What goals does the NIST AI Risk Management Framework pursue for safe, equitable AI, especially in transparency and ethics?","[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n', ' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","The NIST AI Risk Management Framework aims to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) to realize its full commercial and societal benefits without harm to people or the planet. It also supports the development of safe, secure, and trustworthy AI, emphasizing transparency and ethical considerations in its implementation.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do real-time auditing tools help with AI content authenticity and system monitoring?,"[' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n', ' \n45 \nMG-4.1-007 \nVerify that AI Actors responsible for monitoring reported issues can eļ¬€ectively \nevaluate GAI system performance including the application of content \nprovenance data tracking techniques, and promptly escalate issues for response. \nHuman-AI Conļ¬guration; \nInformation Integrity \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, Human Factors, Operation and \nMonitoring \n \nMANAGE 4.2: Measurable activities for continual improvements are integrated into AI system updates and include regular \nengagement with interested parties, including relevant AI Actors. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.2-001 Conduct regular monitoring of GAI systems and publish reports detailing the \nperformance, feedback received, and improvements made. \nHarmful Bias and Homogenization \nMG-4.2-002 \nPractice and follow incident response plans for addressing the generation of \ninappropriate or harmful content and adapt processes based on ļ¬ndings to \nprevent future occurrences. Conduct post-mortem analyses of incidents with \nrelevant AI Actors, to understand the root causes and implement preventive \nmeasures. \nHuman-AI Conļ¬guration; \nDangerous, Violent, or Hateful \nContent \nMG-4.2-003 Use visualizations or other methods to represent GAI model behavior to ease \nnon-technical stakeholders understanding of GAI system functionality. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Aļ¬€ected Individuals and Communities, End-Users, Operation and \nMonitoring, TEVV \n \nMANAGE 4.3: Incidents and errors are communicated to relevant AI Actors, including aļ¬€ected communities. Processes for tracking, \nresponding to, and recovering from incidents and errors are followed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.3-001 \nConduct after-action assessments for GAI system incidents to verify incident \nresponse and recovery processes are followed and eļ¬€ective, including to follow \nprocedures for communicating incidents to relevant AI Actors and where \napplicable, relevant legal and regulatory bodies. \nInformation Security \nMG-4.3-002 Establish and maintain policies and procedures to record and track GAI system \nreported errors, near-misses, and negative impacts. \nConfabulation; Information \nIntegrity \n']","Real-time auditing tools aid in the tracking and validation of the lineage and authenticity of AI-generated data, which is essential for ensuring the integrity and reliability of the content produced by AI systems.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 48, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What key processes and stakeholder interactions ensure automated systems' safety and effectiveness?,"[' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n', ' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Key processes and stakeholder interactions that ensure automated systems' safety and effectiveness include ongoing monitoring procedures, clear organizational oversight, consultation with the public during various phases of development, extensive testing before deployment, and proactive risk identification and mitigation. These processes involve continuous evaluation of performance metrics, involvement of organizational stakeholders, engagement with diverse impacted communities, and adherence to domain-specific best practices for testing.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the effects of bias and uniformity in GAI on data accuracy and user feedback?,"[' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","The effects of bias and uniformity in GAI on data accuracy and user feedback are related to harmful bias and homogenization, which can compromise the representativeness and relevance of data used in AI systems. This can lead to inaccuracies in the information generated and may affect the quality of user feedback, as it may not accurately reflect diverse perspectives or experiences.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +Which NSF projects align with federal ethics for automated systems?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\nSome U.S government agencies have developed specific frameworks for ethical use of AI \nsystems. The Department of Energy (DOE) has activated the AI Advancement Council that oversees coordina-\ntion and advises on implementation of the DOE AI Strategy and addresses issues and/or escalations on the \nethical use and development of AI systems.20 The Department of Defense has adopted Artificial Intelligence \nEthical Principles, and tenets for Responsible Artificial Intelligence specifically tailored to its national \nsecurity and defense activities.21 Similarly, the U.S. Intelligence Community (IC) has developed the Principles \nof Artificial Intelligence Ethics for the Intelligence Community to guide personnel on whether and how to \ndevelop and use AI in furtherance of the IC\'s mission, as well as an AI Ethics Framework to help implement \nthese principles.22\nThe National Science Foundation (NSF) funds extensive research to help foster the \ndevelopment of automated systems that adhere to and advance their safety, security and \neffectiveness. Multiple NSF programs support research that directly addresses many of these principles: \nthe National AI Research Institutes23 support research on all aspects of safe, trustworthy, fair, and explainable \nAI algorithms and systems; the Cyber Physical Systems24 program supports research on developing safe \nautonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace25 \nprogram supports research on cybersecurity and privacy enhancing technologies in automated systems; the \nFormal Methods in the Field26 program supports research on rigorous formal verification and analysis of \nautomated systems and machine learning, and the Designing Accountable Software Systems27 program supports \nresearch on rigorous and reproducible methodologies for developing software systems with legal and regulatory \ncompliance in mind. \nSome state legislatures have placed strong transparency and validity requirements on \nthe use of pretrial risk assessments. The use of algorithmic pretrial risk assessments has been a \ncause of concern for civil rights groups.28 Idaho Code Section 19-1910, enacted in 2019,29 requires that any \npretrial risk assessment, before use in the state, first be ""shown to be free of bias against any class of \nindividuals protected from discrimination by state or federal law"", that any locality using a pretrial risk \nassessment must first formally validate the claim of its being free of bias, that ""all documents, records, and \ninformation used to build or validate the risk assessment shall be open to public inspection,"" and that assertions \nof trade secrets cannot be used ""to quash discovery in a criminal matter by a party to a criminal case."" \n22\n', ' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles, including the National AI Research Institutes, the Cyber Physical Systems program, the Secure and Trustworthy Cyberspace program, the Formal Methods in the Field program, and the Designing Accountable Software Systems program.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 21, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What concerns do panelists raise about AI in criminal justice and its effects on communities and democracy?,"["" \n \n \n \nAPPENDIX\nPanelists discussed the benefits of AI-enabled systems and their potential to build better and more \ninnovative infrastructure. They individually noted that while AI technologies may be new, the process of \ntechnological diffusion is not, and that it was critical to have thoughtful and responsible development and \nintegration of technology within communities. Some panelists suggested that the integration of technology \ncould benefit from examining how technological diffusion has worked in the realm of urban planning: \nlessons learned from successes and failures there include the importance of balancing ownership rights, use \nrights, and community health, safety and welfare, as well ensuring better representation of all voices, \nespecially those traditionally marginalized by technological advances. Some panelists also raised the issue of \npower structures ā€“ providing examples of how strong transparency requirements in smart city projects \nhelped to reshape power and give more voice to those lacking the financial or political power to effect change. \nIn discussion of technical and governance interventions that that are needed to protect against the harms \nof these technologies, various panelists emphasized the need for transparency, data collection, and \nflexible and reactive policy development, analogous to how software is continuously updated and deployed. \nSome panelists pointed out that companies need clear guidelines to have a consistent environment for \ninnovation, with principles and guardrails being the key to fostering responsible innovation. \nPanel 2: The Criminal Justice System. This event explored current and emergent uses of technology in \nthe criminal justice system and considered how they advance or undermine public safety, justice, and \ndemocratic values. \nWelcome: \nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nBen Winters, Counsel, Electronic Privacy Information Center\nModerator: Chiraag Bains, Deputy Assistant to the President on Racial Justice & Equity \nPanelists: \nā€¢\nSean Malinowski, Director of Policing Innovation and Reform, University of Chicago Crime Lab\nā€¢\nKristian Lum, Researcher\nā€¢\nJumana Musa, Director, Fourth Amendment Center, National Association of Criminal Defense Lawyers\nā€¢\nStanley Andrisse, Executive Director, From Prison Cells to PHD; Assistant Professor, Howard University\nCollege of Medicine\nā€¢\nMyaisha Hayes, Campaign Strategies Director, MediaJustice\nPanelists discussed uses of technology within the criminal justice system, including the use of predictive \npolicing, pretrial risk assessments, automated license plate readers, and prison communication tools. The \ndiscussion emphasized that communities deserve safety, and strategies need to be identified that lead to safety; \nsuch strategies might include data-driven approaches, but the focus on safety should be primary, and \ntechnology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, various panelists emphasized that transparency is important but is not enough to achieve \naccountability. Some panelists discussed their individual views on additional system needs for validity, and \nagreed upon the importance of advisory boards and compensated community input early in the design process \n(before the technology is built and instituted). Various panelists also emphasized the importance of regulation \nthat includes limits to the type and cost of such technologies. \n56\n"", "" \n \n \n \n \nAPPENDIX\nPanel 4: Artificial Intelligence and Democratic Values. This event examined challenges and opportunities in \nthe design of technology that can help support a democratic vision for AI. It included discussion of the \ntechnical aspects \nof \ndesigning \nnon-discriminatory \ntechnology, \nexplainable \nAI, \nhuman-computer \ninteraction with an emphasis on community participation, and privacy-aware design. \nWelcome:\nā€¢\nSorelle Friedler, Assistant Director for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nJ. Bob Alotta, Vice President for Global Programs, Mozilla Foundation\nā€¢\nNavrina Singh, Board Member, Mozilla Foundation\nModerator: Kathy Pham Evans, Deputy Chief Technology Officer for Product and Engineering, U.S \nFederal Trade Commission. \nPanelists: \nā€¢\nLiz Oā€™Sullivan, CEO, Parity AI\nā€¢\nTimnit Gebru, Independent Scholar\nā€¢\nJennifer Wortman Vaughan, Senior Principal Researcher, Microsoft Research, New York City\nā€¢\nPamela Wisniewski, Associate Professor of Computer Science, University of Central Florida; Director,\nSocio-technical Interaction Research (STIR) Lab\nā€¢\nSeny Kamara, Associate Professor of Computer Science, Brown University\nEach panelist individually emphasized the risks of using AI in high-stakes settings, including the potential for \nbiased data and discriminatory outcomes, opaque decision-making processes, and lack of public trust and \nunderstanding of the algorithmic systems. The interventions and key needs various panelists put forward as \nnecessary to the future design of critical AI systems included ongoing transparency, value sensitive and \nparticipatory design, explanations designed for relevant stakeholders, and public consultation. \nVarious \npanelists emphasized the importance of placing trust in people, not technologies, and in engaging with \nimpacted communities to understand the potential harms of technologies and build protection by design into \nfuture systems. \nPanel 5: Social Welfare and Development. This event explored current and emerging uses of technology to \nimplement or improve social welfare systems, social development programs, and other systems that can impact \nlife chances. \nWelcome:\nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nAnne-Marie Slaughter, CEO, New America\nModerator: Michele Evermore, Deputy Director for Policy, Office of Unemployment Insurance \nModernization, Office of the Secretary, Department of Labor \nPanelists:\nā€¢\nBlake Hall, CEO and Founder, ID.Me\nā€¢\nKarrie Karahalios, Professor of Computer Science, University of Illinois, Urbana-Champaign\nā€¢\nChristiaan van Veen, Director of Digital Welfare State and Human Rights Project, NYU School of Law's\nCenter for Human Rights and Global Justice\n58\n""]","Panelists raised concerns about the validity of AI systems used in the criminal justice system, noting that adverse or irrelevant data can lead to a replication of unjust outcomes. They highlighted issues such as confirmation bias and the tendency to defer to potentially inaccurate automated systems. The impact of these systems on individuals and communities is seen as potentially severe, with concerns that they lack individualization, work against the belief that people can change for the better, and can lead to job loss and custody issues. Additionally, surveillance can create chilling effects for communities and send negative signals about how they are viewed. Panelists emphasized that while transparency is important, it is not sufficient for achieving accountability, and they discussed the need for regulation that includes limits on the type and cost of such technologies.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 55, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 57, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What must lenders disclose to consumers about adverse actions from automated decisions, and how does this tie into the need for transparency in algorithms affecting rights?","[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nNOTICE & \nEXPLANATION \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\xad\xad\xad\nPeople in Illinois are given written notice by the private sector if their biometric informa-\ntion is used. The Biometric Information Privacy Act enacted by the state contains a number of provisions \nconcerning the use of individual biometric data and identifiers. Included among them is a provision that no private \nentity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" such information about an \nindividual, unless written notice is provided to that individual or their legally appointed representative. 87\nMajor technology companies are piloting new ways to communicate with the public about \ntheir automated technologies. For example, a collection of non-profit organizations and companies have \nworked together to develop a framework that defines operational approaches to transparency for machine \nlearning systems.88 This framework, and others like it,89 inform the public about the use of these tools, going \nbeyond simple notice to include reporting elements such as safety evaluations, disparity assessments, and \nexplanations of how the systems work. \nLenders are required by federal law to notify consumers about certain decisions made about \nthem. Both the Fair Credit Reporting Act and the Equal Credit Opportunity Act require in certain circumstances \nthat consumers who are denied credit receive ""adverse action"" notices. Anyone who relies on the information in a \ncredit report to deny a consumer credit must, under the Fair Credit Reporting Act, provide an ""adverse action"" \nnotice to the consumer, which includes ""notice of the reasons a creditor took adverse action on the application \nor on an existing credit account.""90 In addition, under the risk-based pricing rule,91 lenders must either inform \nborrowers of their credit score, or else tell consumers when ""they are getting worse terms because of \ninformation in their credit report."" The CFPB has also asserted that ""[t]he law gives every applicant the right to \na specific explanation if their application for credit was denied, and that right is not diminished simply because \na company uses a complex algorithm that it doesn\'t understand.""92 Such explanations illustrate a shared value \nthat certain decisions need to be explained. \nA California law requires that warehouse employees are provided with notice and explana-\ntion about quotas, potentially facilitated by automated systems, that apply to them. Warehous-\ning employers in California that use quota systems (often facilitated by algorithmic monitoring systems) are \nrequired to provide employees with a written description of each quota that applies to the employee, including \nā€œquantified number of tasks to be performed or materials to be produced or handled, within the defined \ntime period, and any potential adverse employment action that could result from failure to meet the quota.ā€93\nAcross the federal government, agencies are conducting and supporting research on explain-\nable AI systems. The NIST is conducting fundamental research on the explainability of AI systems. A multidis-\nciplinary team of researchers aims to develop measurement methods and best practices to support the \nimplementation of core tenets of explainable AI.94 The Defense Advanced Research Projects Agency has a \nprogram on Explainable Artificial Intelligence that aims to create a suite of machine learning techniques that \nproduce more explainable models, while maintaining a high level of learning performance (prediction \naccuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45\n', "" \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nAutomated systems now determine opportunities, from employment to credit, and directly shape the American \npublicā€™s experiences, from the courtroom to online classrooms, in ways that profoundly impact peopleā€™s lives. But this \nexpansive impact is not always visible. An applicant might not know whether a person rejected their resume or a \nhiring algorithm moved them to the bottom of the list. A defendant in the courtroom might not know if a judge deny\xad\ning their bail is informed by an automated system that labeled them ā€œhigh risk.ā€ From correcting errors to contesting \ndecisions, people are often denied the knowledge they need to address the impact of automated systems on their lives. \nNotice and explanations also serve an important safety and efficacy purpose, allowing experts to verify the reasonable\xad\nness of a recommendation before enacting it. \nIn order to guard against potential harms, the American public needs to know if an automated system is being used. \nClear, brief, and understandable notice is a prerequisite for achieving the other protections in this framework. Like\xad\nwise, the public is often unable to ascertain how or why an automated system has made a decision or contributed to a \nparticular outcome. The decision-making processes of automated systems tend to be opaque, complex, and, therefore, \nunaccountable, whether by design or by omission. These factors can make explanations both more challenging and \nmore important, and should not be used as a pretext to avoid explaining important decisions to the people impacted \nby those choices. In the context of automated systems, clear and valid explanations should be recognized as a baseline \nrequirement. \nProviding notice has long been a standard practice, and in many cases is a legal requirement, when, for example, \nmaking a video recording of someone (outside of a law enforcement or national security context). In some cases, such \nas credit, lenders are required to provide notice and explanation to consumers. Techniques used to automate the \nprocess of explaining such systems are under active research and improvement and such explanations can take many \nforms. Innovative companies and researchers are rising to the challenge and creating and deploying explanatory \nsystems that can help the public better understand decisions that impact them. \nWhile notice and explanation requirements are already in place in some sectors or situations, the American public \ndeserve to know consistently and across sectors if an automated system is being used in a way that impacts their rights, \nopportunities, or access. This knowledge should provide confidence in how the public is being treated, and trust in the \nvalidity and reasonable use of automated systems. \nā€¢\nA lawyer representing an older client with disabilities who had been cut off from Medicaid-funded home\nhealth-care assistance couldn't determine why, especially since the decision went against historical access\npractices. In a court hearing, the lawyer learned from a witness that the state in which the older client\nlived had recently adopted a new algorithm to determine eligibility.83 The lack of a timely explanation made it\nharder to understand and contest the decision.\nā€¢\nA formal child welfare investigation is opened against a parent based on an algorithm and without the parent\never being notified that data was being collected and used as part of an algorithmic child maltreatment\nrisk assessment.84 The lack of notice or an explanation makes it harder for those performing child\nmaltreatment assessments to validate the risk assessment and denies parents knowledge that could help them\ncontest a decision.\n41\n""]","Lenders are required by federal law to notify consumers about certain decisions made about them, specifically through ""adverse action"" notices. This includes providing notice of the reasons a creditor took adverse action on a credit application or existing credit account. This requirement ties into the need for transparency in algorithms affecting rights, as it ensures that consumers are informed about the automated decisions impacting their credit, allowing them to understand and contest those decisions.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 44, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 40, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What steps are taken to inform the public about automated decision-making and their rights?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nNOTICE & \nEXPLANATION \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\xad\xad\xad\nPeople in Illinois are given written notice by the private sector if their biometric informa-\ntion is used. The Biometric Information Privacy Act enacted by the state contains a number of provisions \nconcerning the use of individual biometric data and identifiers. Included among them is a provision that no private \nentity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" such information about an \nindividual, unless written notice is provided to that individual or their legally appointed representative. 87\nMajor technology companies are piloting new ways to communicate with the public about \ntheir automated technologies. For example, a collection of non-profit organizations and companies have \nworked together to develop a framework that defines operational approaches to transparency for machine \nlearning systems.88 This framework, and others like it,89 inform the public about the use of these tools, going \nbeyond simple notice to include reporting elements such as safety evaluations, disparity assessments, and \nexplanations of how the systems work. \nLenders are required by federal law to notify consumers about certain decisions made about \nthem. Both the Fair Credit Reporting Act and the Equal Credit Opportunity Act require in certain circumstances \nthat consumers who are denied credit receive ""adverse action"" notices. Anyone who relies on the information in a \ncredit report to deny a consumer credit must, under the Fair Credit Reporting Act, provide an ""adverse action"" \nnotice to the consumer, which includes ""notice of the reasons a creditor took adverse action on the application \nor on an existing credit account.""90 In addition, under the risk-based pricing rule,91 lenders must either inform \nborrowers of their credit score, or else tell consumers when ""they are getting worse terms because of \ninformation in their credit report."" The CFPB has also asserted that ""[t]he law gives every applicant the right to \na specific explanation if their application for credit was denied, and that right is not diminished simply because \na company uses a complex algorithm that it doesn\'t understand.""92 Such explanations illustrate a shared value \nthat certain decisions need to be explained. \nA California law requires that warehouse employees are provided with notice and explana-\ntion about quotas, potentially facilitated by automated systems, that apply to them. Warehous-\ning employers in California that use quota systems (often facilitated by algorithmic monitoring systems) are \nrequired to provide employees with a written description of each quota that applies to the employee, including \nā€œquantified number of tasks to be performed or materials to be produced or handled, within the defined \ntime period, and any potential adverse employment action that could result from failure to meet the quota.ā€93\nAcross the federal government, agencies are conducting and supporting research on explain-\nable AI systems. The NIST is conducting fundamental research on the explainability of AI systems. A multidis-\nciplinary team of researchers aims to develop measurement methods and best practices to support the \nimplementation of core tenets of explainable AI.94 The Defense Advanced Research Projects Agency has a \nprogram on Explainable Artificial Intelligence that aims to create a suite of machine learning techniques that \nproduce more explainable models, while maintaining a high level of learning performance (prediction \naccuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45\n', "" \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nAutomated systems now determine opportunities, from employment to credit, and directly shape the American \npublicā€™s experiences, from the courtroom to online classrooms, in ways that profoundly impact peopleā€™s lives. But this \nexpansive impact is not always visible. An applicant might not know whether a person rejected their resume or a \nhiring algorithm moved them to the bottom of the list. A defendant in the courtroom might not know if a judge deny\xad\ning their bail is informed by an automated system that labeled them ā€œhigh risk.ā€ From correcting errors to contesting \ndecisions, people are often denied the knowledge they need to address the impact of automated systems on their lives. \nNotice and explanations also serve an important safety and efficacy purpose, allowing experts to verify the reasonable\xad\nness of a recommendation before enacting it. \nIn order to guard against potential harms, the American public needs to know if an automated system is being used. \nClear, brief, and understandable notice is a prerequisite for achieving the other protections in this framework. Like\xad\nwise, the public is often unable to ascertain how or why an automated system has made a decision or contributed to a \nparticular outcome. The decision-making processes of automated systems tend to be opaque, complex, and, therefore, \nunaccountable, whether by design or by omission. These factors can make explanations both more challenging and \nmore important, and should not be used as a pretext to avoid explaining important decisions to the people impacted \nby those choices. In the context of automated systems, clear and valid explanations should be recognized as a baseline \nrequirement. \nProviding notice has long been a standard practice, and in many cases is a legal requirement, when, for example, \nmaking a video recording of someone (outside of a law enforcement or national security context). In some cases, such \nas credit, lenders are required to provide notice and explanation to consumers. Techniques used to automate the \nprocess of explaining such systems are under active research and improvement and such explanations can take many \nforms. Innovative companies and researchers are rising to the challenge and creating and deploying explanatory \nsystems that can help the public better understand decisions that impact them. \nWhile notice and explanation requirements are already in place in some sectors or situations, the American public \ndeserve to know consistently and across sectors if an automated system is being used in a way that impacts their rights, \nopportunities, or access. This knowledge should provide confidence in how the public is being treated, and trust in the \nvalidity and reasonable use of automated systems. \nā€¢\nA lawyer representing an older client with disabilities who had been cut off from Medicaid-funded home\nhealth-care assistance couldn't determine why, especially since the decision went against historical access\npractices. In a court hearing, the lawyer learned from a witness that the state in which the older client\nlived had recently adopted a new algorithm to determine eligibility.83 The lack of a timely explanation made it\nharder to understand and contest the decision.\nā€¢\nA formal child welfare investigation is opened against a parent based on an algorithm and without the parent\never being notified that data was being collected and used as part of an algorithmic child maltreatment\nrisk assessment.84 The lack of notice or an explanation makes it harder for those performing child\nmaltreatment assessments to validate the risk assessment and denies parents knowledge that could help them\ncontest a decision.\n41\n""]","Steps taken to inform the public about automated decision-making and their rights include written notice provided by private entities in Illinois regarding the use of biometric information, federal laws requiring lenders to notify consumers about adverse actions related to credit decisions, and California laws mandating that warehouse employees receive written descriptions of quotas. Additionally, major technology companies are developing frameworks for transparency in machine learning systems, and federal agencies are conducting research on explainable AI systems to ensure that the public understands how automated systems impact their rights and opportunities.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 44, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 40, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does community assessment help reduce algorithmic bias in the AI Bill of Rights?,"['Applying The Blueprint for an AI Bill of Rights \nDEFINITIONS\nALGORITHMIC DISCRIMINATION: ā€œAlgorithmic discriminationā€ occurs when automated systems \ncontribute to unjustified different treatment or impacts disfavoring people based on their race, color, ethnicity, \nsex (including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifica-\ntion protected by law. Depending on the specific circumstances, such algorithmic discrimination may violate \nlegal protections. Throughout this framework the term ā€œalgorithmic discriminationā€ takes this meaning (and \nnot a technical understanding of discrimination as distinguishing between items). \nAUTOMATED SYSTEM: An ""automated system"" is any system, software, or process that uses computation as \nwhole or part of a system to determine outcomes, make or aid decisions, inform policy implementation, collect \ndata or observations, or otherwise interact with individuals and/or communities. Automated systems \ninclude, but are not limited to, systems derived from machine learning, statistics, or other data processing \nor artificial intelligence techniques, and exclude passive computing infrastructure. ā€œPassive computing \ninfrastructureā€ is any intermediary technology that does not influence or determine the outcome of decision, \nmake or aid in decisions, inform policy implementation, or collect data or observations, including web \nhosting, domain registration, networking, caching, data storage, or cybersecurity. Throughout this \nframework, automated systems that are considered in scope are only those that have the potential to \nmeaningfully impact individualsā€™ or communi-tiesā€™ rights, opportunities, or access. \nCOMMUNITIES: ā€œCommunitiesā€ include: neighborhoods; social network connections (both online and \noffline); families (construed broadly); people connected by affinity, identity, or shared traits; and formal organi-\nzational ties. This includes Tribes, Clans, Bands, Rancherias, Villages, and other Indigenous communities. AI \nand other data-driven automated systems most directly collect data on, make inferences about, and may cause \nharm to individuals. But the overall magnitude of their impacts may be most readily visible at the level of com-\nmunities. Accordingly, the concept of community is integral to the scope of the Blueprint for an AI Bill of Rights. \nUnited States law and policy have long employed approaches for protecting the rights of individuals, but exist-\ning frameworks have sometimes struggled to provide protections when effects manifest most clearly at a com-\nmunity level. For these reasons, the Blueprint for an AI Bill of Rights asserts that the harms of automated \nsystems should be evaluated, protected against, and redressed at both the individual and community levels. \nEQUITY: ā€œEquityā€ means the consistent and systematic fair, just, and impartial treatment of all individuals. \nSystemic, fair, and just treatment must take into account the status of individuals who belong to underserved \ncommunities that have been denied such treatment, such as Black, Latino, and Indigenous and Native American \npersons, Asian Americans and Pacific Islanders and other persons of color; members of religious minorities; \nwomen, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and intersex (LGBTQI+) \npersons; older adults; persons with disabilities; persons who live in rural areas; and persons otherwise adversely \naffected by persistent poverty or inequality. \nRIGHTS, OPPORTUNITIES, OR ACCESS: ā€œRights, opportunities, or accessā€ is used to indicate the scoping \nof this framework. It describes the set of: civil rights, civil liberties, and privacy, including freedom of speech, \nvoting, and protections from discrimination, excessive punishment, unlawful surveillance, and violations of \nprivacy and other freedoms in both public and private sector contexts; equal opportunities, including equitable \naccess to education, housing, credit, employment, and other programs; or, access to critical resources or \nservices, such as healthcare, financial services, safety, social services, non-deceptive information about goods \nand services, and government benefits. \n10\n', ' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 9, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role does NIST play in AI safety and risk management?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","NIST plays a significant role in AI safety and risk management by developing measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence. They are also helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI and have established the U.S. AI Safety Institute and the AI Safety Institute Consortium to build the necessary science for the safe and trustworthy development and use of AI.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What governance aspects are key for public safety in automated systems?,"[' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n', ' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Key governance aspects for public safety in automated systems include laying out clear governance structures and procedures, establishing responsibility for oversight, involving organizational stakeholders in governance procedures, and ensuring that those in charge are aware of potential impacts on people's rights and opportunities. Additionally, it may be appropriate to conduct an independent ethics review before deployment.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do content provenance standards impact the performance and risks of third-party GAI systems regarding info integrity and IP?,"[' \n21 \nGV-6.1-005 \nImplement a use-cased based supplier risk assessment framework to evaluate and \nmonitor third-party entitiesā€™ performance and adherence to content provenance \nstandards and technologies to detect anomalies and unauthorized changes; \nservices acquisition and value chain risk management; and legal compliance. \nData Privacy; Information \nIntegrity; Information Security; \nIntellectual Property; Value Chain \nand Component Integration \nGV-6.1-006 Include clauses in contracts which allow an organization to evaluate third-party \nGAI processes and standards. \nInformation Integrity \nGV-6.1-007 Inventory all third-party entities with access to organizational content and \nestablish approved GAI technology and service provider lists. \nValue Chain and Component \nIntegration \nGV-6.1-008 Maintain records of changes to content made by third parties to promote content \nprovenance, including sources, timestamps, metadata. \nInformation Integrity; Value Chain \nand Component Integration; \nIntellectual Property \nGV-6.1-009 \nUpdate and integrate due diligence processes for GAI acquisition and \nprocurement vendor assessments to include intellectual property, data privacy, \nsecurity, and other risks. For example, update processes to: Address solutions that \nmay rely on embedded GAI technologies; Address ongoing monitoring, \nassessments, and alerting, dynamic risk assessments, and real-time reporting \ntools for monitoring third-party GAI risks; Consider policy adjustments across GAI \nmodeling libraries, tools and APIs, ļ¬ne-tuned models, and embedded tools; \nAssess GAI vendors, open-source or proprietary GAI tools, or GAI service \nproviders against incident or vulnerability databases. \nData Privacy; Human-AI \nConļ¬guration; Information \nSecurity; Intellectual Property; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nGV-6.1-010 \nUpdate GAI acceptable use policies to address proprietary and open-source GAI \ntechnologies and data, and contractors, consultants, and other third-party \npersonnel. \nIntellectual Property; Value Chain \nand Component Integration \nAI Actor Tasks: Operation and Monitoring, Procurement, Third-party entities \n \nGOVERN 6.2: Contingency processes are in place to handle failures or incidents in third-party data or AI systems deemed to be \nhigh-risk. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.2-001 \nDocument GAI risks associated with system value chain to identify over-reliance \non third-party data and to identify fallbacks. \nValue Chain and Component \nIntegration \nGV-6.2-002 \nDocument incidents involving third-party GAI data and systems, including open-\ndata and open-source software. \nIntellectual Property; Value Chain \nand Component Integration \n', ' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 24, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What goals does the U.S. AI Safety Institute have for NIST's AI risk standards?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What org strategies help with AI testing, incident reporting, and risk communication?","[' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n', ' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']","Organizational strategies that help with AI testing, incident reporting, and risk communication include establishing policies for measuring the effectiveness of content provenance methodologies, identifying the minimum set of criteria necessary for GAI system incident reporting, and verifying information sharing and feedback mechanisms regarding any negative impact from GAI systems.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What insights did OSTP seek from the biometric tech RFI, and who provided feedback?","['APPENDIX\nSummaries of Additional Engagements: \nā€¢ OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of\nartificial intelligence and other data-driven technologies in their lives.\nā€¢ OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The\npurpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or\nplanned use; the domains in which these technologies are being used; the entities making use of them; current\nprinciples, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their\nuse or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below\nlisted organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium \nat Computing Research Association \nConnected Health Initiative \nConsumer Technology Association \nCourtney Radsch \nCoworker \nCyber Farm Labs \nData & Society Research Institute \nData for Black Lives \nData to Actionable Knowledge Lab \nat Harvard University \nDeloitte \nDev Technology Group \nDigital Therapeutics Alliance \nDigital Welfare State & Human \nRights Project and Center for \nHuman Rights and Global Justice at \nNew York University School of \nLaw, and Temple University \nInstitute for Law, Innovation & \nTechnology \nDignari \nDouglas Goddard \nEdgar Dworsky \nElectronic Frontier Foundation \nElectronic Privacy Information \nCenter, Center for Digital \nDemocracy, and Consumer \nFederation of America \nFaceTec \nFight for the Future \nGanesh Mani \nGeorgia Tech Research Institute \nGoogle \nHealth Information Technology \nResearch and Development \nInteragency Working Group \nHireVue \nHR Policy Association \nID.me \nIdentity and Data Sciences \nLaboratory at Science Applications \nInternational Corporation \nInformation Technology and \nInnovation Foundation \nInformation Technology Industry \nCouncil \nInnocence Project \nInstitute for Human-Centered \nArtificial Intelligence at Stanford \nUniversity \nIntegrated Justice Information \nSystems Institute \nInternational Association of Chiefs \nof Police \nInternational Biometrics + Identity \nAssociation \nInternational Business Machines \nCorporation \nInternational Committee of the Red \nCross \nInventionphysics \niProov \nJacob Boudreau \nJennifer K. Wagner, Dan Berger, \nMargaret Hu, and Sara Katsanis \nJonathan Barry-Blocker \nJoseph Turow \nJoy Buolamwini \nJoy Mack \nKaren Bureau \nLamont Gholston \nLawyersā€™ Committee for Civil \nRights Under Law \n60\n', 'APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation \nStephanie Dinkins and the Future \nHistories Studio at Stony Brook \nUniversity \nTechNet \nThe Alliance for Media Arts and \nCulture, MIT Open Documentary \nLab and Co-Creation Studio, and \nImmerse \nThe International Brotherhood of \nTeamsters \nThe Leadership Conference on \nCivil and Human Rights \nThorn \nU.S. Chamber of Commerceā€™s \nTechnology Engagement Center \nUber Technologies \nUniversity of Pittsburgh \nUndergraduate Student \nCollaborative \nUpturn \nUS Technology Policy Committee \nof the Association of Computing \nMachinery \nVirginia Puccio \nVisar Berisha and Julie Liss \nXR Association \nXR Safety Initiative \nā€¢ As an additional effort to reach out to stakeholders regarding the RFI, OSTP conducted two listening sessions\nfor members of the public. The listening sessions together drew upwards of 300 participants. The Science and\nTechnology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61\n']","OSTP sought insights on the extent and variety of biometric technologies in past, current, or planned use; the domains in which these technologies are being used; the entities making use of them; current principles, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their use or regulation. Feedback was provided by 130 organizations and individuals, including Accenture, ACLU, Google, Microsoft Corporation, and many others.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 59, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 60, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What consent practices and design principles can help balance user privacy and surveillance risks in automated systems?,"[' \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nProtect the public from unchecked surveillance \nHeightened oversight of surveillance. Surveillance or monitoring systems should be subject to \nheightened oversight that includes at a minimum assessment of potential harms during design (before deploy\xad\nment) and in an ongoing manner, to ensure that the American publicā€™s rights, opportunities, and access are \nprotected. This assessment should be done before deployment and should give special attention to ensure \nthere is not algorithmic discrimination, especially based on community membership, when deployed in a \nspecific real-world context. Such assessment should then be reaffirmed in an ongoing manner as long as the \nsystem is in use. \nLimited and proportionate surveillance. Surveillance should be avoided unless it is strictly necessary \nto achieve a legitimate purpose and it is proportionate to the need. Designers, developers, and deployers of \nsurveillance systems should use the least invasive means of monitoring available and restrict monitoring to the \nminimum number of subjects possible. To the greatest extent possible consistent with law enforcement and \nnational security needs, individuals subject to monitoring should be provided with clear and specific notice \nbefore it occurs and be informed about how the data gathered through surveillance will be used. \nScope limits on surveillance to protect rights and democratic values. Civil liberties and civil \nrights must not be limited by the threat of surveillance or harassment facilitated or aided by an automated \nsystem. Surveillance systems should not be used to monitor the exercise of democratic rights, such as voting, \nprivacy, peaceful assembly, speech, or association, in a way that limits the exercise of civil rights or civil liber\xad\nties. Information about or algorithmically-determined assumptions related to identity should be carefully \nlimited if used to target or guide surveillance systems in order to avoid algorithmic discrimination; such iden\xad\ntity-related information includes group characteristics or affiliations, geographic designations, location-based \nand association-based inferences, social networks, and biometrics. Continuous surveillance and monitoring \nsystems should not be used in physical or digital workplaces (regardless of employment status), public educa\xad\ntional institutions, and public accommodations. Continuous surveillance and monitoring systems should not \nbe used in a way that has the effect of limiting access to critical resources or services or suppressing the exer\xad\ncise of rights, even where the organization is not under a particular duty to protect those rights. \nProvide the public with mechanisms for appropriate and meaningful consent, access, and \ncontrol over their data \nUse-specific consent. Consent practices should not allow for abusive surveillance practices. Where data \ncollectors or automated systems seek consent, they should seek it for specific, narrow use contexts, for specif\xad\nic time durations, and for use by specific entities. Consent should not extend if any of these conditions change; \nconsent should be re-acquired before using data if the use case changes, a time limit elapses, or data is trans\xad\nferred to another entity (including being shared or sold). Consent requested should be limited in scope and \nshould not request consent beyond what is required. Refusal to provide consent should be allowed, without \nadverse effects, to the greatest extent possible based on the needs of the use case. \nBrief and direct consent requests. When seeking consent from users short, plain language consent \nrequests should be used so that users understand for what use contexts, time span, and entities they are \nproviding data and metadata consent. User experience research should be performed to ensure these consent \nrequests meet performance standards for readability and comprehension. This includes ensuring that consent \nrequests are accessible to users with disabilities and are available in the language(s) and reading level appro\xad\npriate for the audience. User experience design choices that intentionally obfuscate or manipulate user \nchoice (i.e., ā€œdark patternsā€) should be not be used. \n34\n', ' \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTraditional terms of serviceā€”the block of text that the public is accustomed to clicking through when using a web\xad\nsite or digital appā€”are not an adequate mechanism for protecting privacy. The American public should be protect\xad\ned via built-in privacy protections, data minimization, use and collection limitations, and transparency, in addition \nto being entitled to clear mechanisms to control access to and use of their dataā€”including their metadataā€”in a \nproactive, informed, and ongoing way. Any automated system collecting, using, sharing, or storing personal data \nshould meet these expectations. \nProtect privacy by design and by default \nPrivacy by design and by default. Automated systems should be designed and built with privacy protect\xad\ned by default. Privacy risks should be assessed throughout the development life cycle, including privacy risks \nfrom reidentification, and appropriate technical and policy mitigation measures should be implemented. This \nincludes potential harms to those who are not users of the automated system, but who may be harmed by \ninferred data, purposeful privacy violations, or community surveillance or other community harms. Data \ncollection should be minimized and clearly communicated to the people whose data is collected. Data should \nonly be collected or used for the purposes of training or testing machine learning models if such collection and \nuse is legal and consistent with the expectations of the people whose data is collected. User experience \nresearch should be conducted to confirm that people understand what data is being collected about them and \nhow it will be used, and that this collection matches their expectations and desires. \nData collection and use-case scope limits. Data collection should be limited in scope, with specific, \nnarrow identified goals, to avoid ""mission creep."" Anticipated data collection should be determined to be \nstrictly necessary to the identified goals and should be minimized as much as possible. Data collected based on \nthese identified goals and for a specific context should not be used in a different context without assessing for \nnew privacy risks and implementing appropriate mitigation measures, which may include express consent. \nClear timelines for data retention should be established, with data deleted as soon as possible in accordance \nwith legal or policy-based limitations. Determined data retention timelines should be documented and justi\xad\nfied. \nRisk identification and mitigation. Entities that collect, use, share, or store sensitive data should \nattempt to proactively identify harms and seek to manage them so as to avoid, mitigate, and respond appropri\xad\nately to identified risks. Appropriate responses include determining not to process data when the privacy risks \noutweigh the benefits or implementing measures to mitigate acceptable risks. Appropriate responses do not \ninclude sharing or transferring the privacy risks to users via notice or consent requests where users could not \nreasonably be expected to understand the risks without further support. \nPrivacy-preserving security. Entities creating, using, or governing automated systems should follow \nprivacy and security best practices designed to ensure data and metadata do not leak beyond the specific \nconsented use case. Best practices could include using privacy-enhancing cryptography or other types of \nprivacy-enhancing technologies or fine-grained permissions and access control mechanisms, along with \nconventional system security protocols. \n33\n']","Consent practices that can help balance user privacy and surveillance risks in automated systems include use-specific consent, where consent is sought for specific, narrow use contexts and time durations, and should be re-acquired if conditions change. Additionally, brief and direct consent requests should be used, employing short, plain language to ensure users understand the context and duration of data use. User experience research should be conducted to ensure these requests are accessible and comprehensible, avoiding manipulative design choices. Furthermore, privacy should be protected by design and by default, with privacy risks assessed throughout the development life cycle and data collection minimized to only what is necessary for identified goals.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 33, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 32, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the effects of GAI evaluations on fair content and community input?,"[' \n29 \nMS-1.1-006 \nImplement continuous monitoring of GAI system impacts to identify whether GAI \noutputs are equitable across various sub-populations. Seek active and direct \nfeedback from aļ¬€ected communities via structured feedback mechanisms or red-\nteaming to monitor and improve outputs. \nHarmful Bias and Homogenization \nMS-1.1-007 \nEvaluate the quality and integrity of data used in training and the provenance of \nAI-generated content, for example by employing techniques like chaos \nengineering and seeking stakeholder feedback. \nInformation Integrity \nMS-1.1-008 \nDeļ¬ne use cases, contexts of use, capabilities, and negative impacts where \nstructured human feedback exercises, e.g., GAI red-teaming, would be most \nbeneļ¬cial for GAI risk measurement and management based on the context of \nuse. \nHarmful Bias and \nHomogenization; CBRN \nInformation or Capabilities \nMS-1.1-009 \nTrack and document risks or opportunities related to all GAI risks that cannot be \nmeasured quantitatively, including explanations as to why some risks cannot be \nmeasured (e.g., due to technological limitations, resource constraints, or \ntrustworthy considerations). Include unmeasured risks in marginal risks. \nInformation Integrity \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMEASURE 1.3: Internal experts who did not serve as front-line developers for the system and/or independent assessors are \ninvolved in regular assessments and updates. Domain experts, users, AI Actors external to the team that developed or deployed the \nAI system, and aļ¬€ected communities are consulted in support of assessments as necessary per organizational risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.3-001 \nDeļ¬ne relevant groups of interest (e.g., demographic groups, subject matter \nexperts, experience with GAI technology) within the context of use as part of \nplans for gathering structured public feedback. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-002 \nEngage in internal and external evaluations, GAI red-teaming, impact \nassessments, or other structured human feedback exercises in consultation \nwith representative AI Actors with expertise and familiarity in the context of \nuse, and/or who are representative of the populations associated with the \ncontext of use. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-003 \nVerify those conducting structured human feedback exercises are not directly \ninvolved in system development tasks for the same GAI model. \nHuman-AI Conļ¬guration; Data \nPrivacy \nAI Actor Tasks: AI Deployment, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, \nEnd-Users, Operation and Monitoring, TEVV \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 32, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What risks come from easier access to violent content, especially regarding CBRN knowledge and misinformation?","[' \n4 \n1. CBRN Information or Capabilities: Eased access to or synthesis of materially nefarious \ninformation or design capabilities related to chemical, biological, radiological, or nuclear (CBRN) \nweapons or other dangerous materials or agents. \n2. Confabulation: The production of conļ¬dently stated but erroneous or false content (known \ncolloquially as ā€œhallucinationsā€ or ā€œfabricationsā€) by which users may be misled or deceived.6 \n3. Dangerous, Violent, or Hateful Content: Eased production of and access to violent, inciting, \nradicalizing, or threatening content as well as recommendations to carry out self-harm or \nconduct illegal activities. Includes diļ¬ƒculty controlling public exposure to hateful and disparaging \nor stereotyping content. \n4. Data Privacy: Impacts due to leakage and unauthorized use, disclosure, or de-anonymization of \nbiometric, health, location, or other personally identiļ¬able information or sensitive data.7 \n5. Environmental Impacts: Impacts due to high compute resource utilization in training or \noperating GAI models, and related outcomes that may adversely impact ecosystems. \n6. Harmful Bias or Homogenization: Ampliļ¬cation and exacerbation of historical, societal, and \nsystemic biases; performance disparities8 between sub-groups or languages, possibly due to \nnon-representative training data, that result in discrimination, ampliļ¬cation of biases, or \nincorrect presumptions about performance; undesired homogeneity that skews system or model \noutputs, which may be erroneous, lead to ill-founded decision-making, or amplify harmful \nbiases. \n7. Human-AI Conļ¬guration: Arrangements of or interactions between a human and an AI system \nwhich can result in the human inappropriately anthropomorphizing GAI systems or experiencing \nalgorithmic aversion, automation bias, over-reliance, or emotional entanglement with GAI \nsystems. \n8. Information Integrity: Lowered barrier to entry to generate and support the exchange and \nconsumption of content which may not distinguish fact from opinion or ļ¬ction or acknowledge \nuncertainties, or could be leveraged for large-scale dis- and mis-information campaigns. \n9. Information Security: Lowered barriers for oļ¬€ensive cyber capabilities, including via automated \ndiscovery and exploitation of vulnerabilities to ease hacking, malware, phishing, oļ¬€ensive cyber \n \n \n6 Some commenters have noted that the terms ā€œhallucinationā€ and ā€œfabricationā€ anthropomorphize GAI, which \nitself is a risk related to GAI systems as it can inappropriately attribute human characteristics to non-human \nentities. \n7 What is categorized as sensitive data or sensitive PII can be highly contextual based on the nature of the \ninformation, but examples of sensitive information include information that relates to an information subjectā€™s \nmost intimate sphere, including political opinions, sex life, or criminal convictions. \n8 The notion of harm presumes some baseline scenario that the harmful factor (e.g., a GAI model) makes worse. \nWhen the mechanism for potential harm is a disparity between groups, it can be diļ¬ƒcult to establish what the \nmost appropriate baseline is to compare against, which can result in divergent views on when a disparity between \nAI behaviors for diļ¬€erent subgroups constitutes a harm. In discussing harms from disparities such as biased \nbehavior, this document highlights examples where someoneā€™s situation is worsened relative to what it would have \nbeen in the absence of any AI system, making the outcome unambiguously a harm of the system. \n', ' \n5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a systemā€™s availability or the conļ¬dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across \nthe AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scientiļ¬c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational \nlikelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) ā€“ highly specialized AI systems trained on \nscientiļ¬c data that aid in chemical and biological design ā€“ may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \neļ¬ƒcacious, including for beneļ¬cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable \n']","Eased access to violent content can lead to the production of and access to violent, inciting, radicalizing, or threatening content, as well as recommendations to carry out self-harm or conduct illegal activities. This includes difficulty controlling public exposure to hateful and disparaging or stereotyping content. Additionally, the lowered barrier to generate and support the exchange of content may not distinguish fact from opinion or acknowledge uncertainties, which could be leveraged for large-scale dis- and mis-information campaigns, potentially impacting the operational likelihood of attacks involving CBRN knowledge.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 7, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 8, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What factors on data privacy and content integrity should be considered for a GAI system, especially regarding user feedback and transparency?","[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","Factors on data privacy and content integrity for a GAI system include documenting the extent to which human domain knowledge is employed to improve GAI system performance, reviewing and verifying sources and citations in GAI system outputs, tracking instances of anthropomorphization in GAI system interfaces, verifying GAI system training data and TEVV data provenance, and regularly reviewing security and safety guardrails. Additionally, structured feedback about content provenance should be recorded and integrated from operators, users, and impacted communities, and there should be an emphasis on digital content transparency regarding the societal impacts of AI and the role of diverse and inclusive content generation.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What goals does PAVE have for racial equity and valuing marginalized communities?,"["" \n \n \n \nENDNOTES\n47. Darshali A. Vyas et al., Hidden in Plain Sight ā€“ Reconsidering the Use of Race Correction in Clinical\nAlgorithms, 383 N. Engl. J. Med.874, 876-78 (Aug. 27, 2020), https://www.nejm.org/doi/full/10.1056/\nNEJMms2004740.\n48. The definitions of 'equity' and 'underserved communities' can be found in the Definitions section of\nthis framework as well as in Section 2 of The Executive Order On Advancing Racial Equity and Support\nfor Underserved Communities Through the Federal Government. https://www.whitehouse.gov/\nbriefing-room/presidential-actions/2021/01/20/executive-order-advancing-racial-equity-and-support\xad\nfor-underserved-communities-through-the-federal-government/\n49. Id.\n50. Various organizations have offered proposals for how such assessments might be designed. See, e.g.,\nEmanuel Moss, Elizabeth Anne Watkins, Ranjit Singh, Madeleine Clare Elish, and Jacob Metcalf.\nAssembling Accountability: Algorithmic Impact Assessment for the Public Interest. Data & Society\nResearch Institute Report. June 29, 2021. https://datasociety.net/library/assembling-accountability\xad\nalgorithmic-impact-assessment-for-the-public-interest/; Nicol Turner Lee, Paul Resnick, and Genie\nBarton. Algorithmic bias detection and mitigation: Best practices and policies to reduce consumer harms.\nBrookings Report. May 22, 2019.\nhttps://www.brookings.edu/research/algorithmic-bias-detection-and-mitigation-best-practices-and\xad\npolicies-to-reduce-consumer-harms/; Andrew D. Selbst. An Institutional View Of Algorithmic Impact\nAssessments. Harvard Journal of Law & Technology. June 15, 2021. https://ssrn.com/abstract=3867634;\nDillon Reisman, Jason Schultz, Kate Crawford, and Meredith Whittaker. Algorithmic Impact\nAssessments: A Practical Framework for Public Agency Accountability. AI Now Institute Report. April\n2018. https://ainowinstitute.org/aiareport2018.pdf\n51. Department of Justice. Justice Department Announces New Initiative to Combat Redlining. Oct. 22,\n2021. https://www.justice.gov/opa/pr/justice-department-announces-new-initiative-combat-redlining\n52. PAVE Interagency Task Force on Property Appraisal and Valuation Equity. Action Plan to Advance\nProperty Appraisal and Valuation Equity: Closing the Racial Wealth Gap by Addressing Mis-valuations for\nFamilies and Communities of Color. March 2022. https://pave.hud.gov/sites/pave.hud.gov/files/\ndocuments/PAVEActionPlan.pdf\n53. U.S. Equal Employment Opportunity Commission. The Americans with Disabilities Act and the Use of\nSoftware, Algorithms, and Artificial Intelligence to Assess Job Applicants and Employees. EEOC\xad\nNVTA-2022-2. May 12, 2022. https://www.eeoc.gov/laws/guidance/americans-disabilities-act-and-use\xad\nsoftware-algorithms-and-artificial-intelligence; U.S. Department of Justice. Algorithms, Artificial\nIntelligence, and Disability Discrimination in Hiring. May 12, 2022. https://beta.ada.gov/resources/ai\xad\nguidance/\n54. Ziad Obermeyer, Brian Powers, Christine Vogeli, and Sendhil Mullainathan. Dissecting racial bias in\nan algorithm used to manage the health of populations. Science. Vol. 366, No. 6464. Oct. 25, 2019. https://\nwww.science.org/doi/10.1126/science.aax2342\n55. Data & Trust Alliance. Algorithmic Bias Safeguards for Workforce: Overview. Jan. 2022. https://\ndataandtrustalliance.org/Algorithmic_Bias_Safeguards_for_Workforce_Overview.pdf\n56. Section 508.gov. IT Accessibility Laws and Policies. Access Board. https://www.section508.gov/\nmanage/laws-and-policies/\n67\n"", "" \n \n \n \nENDNOTES\n1.The Executive Order On Advancing Racial Equity and Support for Underserved Communities Through the\nFederal\xa0Government. https://www.whitehouse.gov/briefing-room/presidential-actions/2021/01/20/executive\norder-advancing-racial-equity-and-support-for-underserved-communities-through-the-federal-government/\n2. The White House. Remarks by President Biden on the Supreme Court Decision to Overturn Roe v. Wade. Jun.\n24, 2022. https://www.whitehouse.gov/briefing-room/speeches-remarks/2022/06/24/remarks-by-president\xad\nbiden-on-the-supreme-court-decision-to-overturn-roe-v-wade/\n3. The White House. Join the Effort to Create A Bill of Rights for an Automated Society. Nov. 10, 2021. https://\nwww.whitehouse.gov/ostp/news-updates/2021/11/10/join-the-effort-to-create-a-bill-of-rights-for-an\xad\nautomated-society/\n4. U.S. Dept. of Health, Educ. & Welfare, Report of the Secā€™yā€™s Advisory Comm. on Automated Pers. Data Sys.,\nRecords, Computers, and the Rights of Citizens (July 1973). https://www.justice.gov/opcl/docs/rec-com\xad\nrights.pdf.\n5. See, e.g., Office of Mgmt. & Budget, Exec. Office of the President, Circular A-130, Managing Information as a\nStrategic Resource, app. II Ā§\xa03 (July 28, 2016); Org. of Econ. Co-Operation & Dev., Revision of the\nRecommendation of the Council Concerning Guidelines Governing the Protection of Privacy and Transborder\nFlows of Personal Data, Annex Part Two (June 20, 2013). https://one.oecd.org/document/C(2013)79/en/pdf.\n6. Andrew Wong et al. External validation of a widely implemented proprietary sepsis prediction model in\nhospitalized patients. JAMA Intern Med. 2021; 181(8):1065-1070. doi:10.1001/jamainternmed.2021.2626\n7. Jessica Guynn. Facebook while black: Users call it getting 'Zucked,' say talking about racism is censored as hate\nspeech. USA Today. Apr. 24, 2019. https://www.usatoday.com/story/news/2019/04/24/facebook-while-black\xad\nzucked-users-say-they-get-blocked-racism-discussion/2859593002/\n8. See, e.g., Michael Levitt. AirTags are being used to track people and cars. Here's what is being done about it.\nNPR. Feb. 18, 2022. https://www.npr.org/2022/02/18/1080944193/apple-airtags-theft-stalking-privacy-tech;\nSamantha Cole. Police Records Show Women Are Being Stalked With Apple AirTags Across the Country.\nMotherboard. Apr. 6, 2022. https://www.vice.com/en/article/y3vj3y/apple-airtags-police-reports-stalking\xad\nharassment\n9. Kristian Lum and William Isaac. To Predict and Serve? Significance. Vol. 13, No. 5, p. 14-19. Oct. 7, 2016.\nhttps://rss.onlinelibrary.wiley.com/doi/full/10.1111/j.1740-9713.2016.00960.x; Aaron Sankin, Dhruv Mehrotra,\nSurya Mattu, and Annie Gilbertson. Crime Prediction Software Promised to Be Free of Biases. New Data Shows\nIt Perpetuates Them. The Markup and Gizmodo. Dec. 2, 2021. https://themarkup.org/prediction\xad\nbias/2021/12/02/crime-prediction-software-promised-to-be-free-of-biases-new-data-shows-it-perpetuates\xad\nthem\n10. Samantha Cole. This Horrifying App Undresses a Photo of Any Woman With a Single Click. Motherboard.\nJune 26, 2019. https://www.vice.com/en/article/kzm59x/deepn""]",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 66, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 62, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What steps ensure automated systems reduce bias and promote equity?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n']","To ensure automated systems reduce bias and promote equity, several steps should be taken: 1) Conduct proactive equity assessments during the design phase to identify potential discrimination and effects on equity; 2) Use representative and robust data that reflects local communities and is reviewed for bias; 3) Guard against proxies by avoiding the direct use of demographic information in system design and testing for correlations; 4) Allow independent evaluations of potential algorithmic discrimination; 5) Provide reporting of algorithmic impact assessments that detail consultations, equity assessments, and any disparities found, ensuring transparency and public accountability.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does threat modeling help with GAI risk and org policies on transparency?,"[' \n18 \nGOVERN 3.2: Policies and procedures are in place to deļ¬ne and diļ¬€erentiate roles and responsibilities for human-AI conļ¬gurations \nand oversight of AI systems. \nAction ID \nSuggested Action \nGAI Risks \nGV-3.2-001 \nPolicies are in place to bolster oversight of GAI systems with independent \nevaluations or assessments of GAI models or systems where the type and \nrobustness of evaluations are proportional to the identiļ¬ed risks. \nCBRN Information or Capabilities; \nHarmful Bias and Homogenization \nGV-3.2-002 \nConsider adjustment of organizational roles and components across lifecycle \nstages of large or complex GAI systems, including: Test and evaluation, validation, \nand red-teaming of GAI systems; GAI content moderation; GAI system \ndevelopment and engineering; Increased accessibility of GAI tools, interfaces, and \nsystems, Incident response and containment. \nHuman-AI Conļ¬guration; \nInformation Security; Harmful Bias \nand Homogenization \nGV-3.2-003 \nDeļ¬ne acceptable use policies for GAI interfaces, modalities, and human-AI \nconļ¬gurations (i.e., for chatbots and decision-making tasks), including criteria for \nthe kinds of queries GAI applications should refuse to respond to. \nHuman-AI Conļ¬guration \nGV-3.2-004 \nEstablish policies for user feedback mechanisms for GAI systems which include \nthorough instructions and any mechanisms for recourse. \nHuman-AI Conļ¬guration \nGV-3.2-005 \nEngage in threat modeling to anticipate potential risks from GAI systems. \nCBRN Information or Capabilities; \nInformation Security \nAI Actors: AI Design \n \nGOVERN 4.1: Organizational policies and practices are in place to foster a critical thinking and safety-ļ¬rst mindset in the design, \ndevelopment, deployment, and uses of AI systems to minimize potential negative impacts. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.1-001 \nEstablish policies and procedures that address continual improvement processes \nfor GAI risk measurement. Address general risks associated with a lack of \nexplainability and transparency in GAI systems by using ample documentation and \ntechniques such as: application of gradient-based attributions, occlusion/term \nreduction, counterfactual prompts and prompt engineering, and analysis of \nembeddings; Assess and update risk measurement approaches at regular \ncadences. \nConfabulation \nGV-4.1-002 \nEstablish policies, procedures, and processes detailing risk measurement in \ncontext of use with standardized measurement protocols and structured public \nfeedback exercises such as AI red-teaming or independent external evaluations. \nCBRN Information and Capability; \nValue Chain and Component \nIntegration \n', ' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 21, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How does the AI Incident Database help with AI challenges in cybersecurity and mental health?,"[' \n54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant. \nhttps://www.mandiant.com/resources/blog/securing-ai-pipeline \nBurgess, M. (2024) Generative AIā€™s Biggest Security Flaw Is Not Easy to Fix. WIRED. \nhttps://www.wired.com/story/generative-ai-prompt-injection-hacking/ \nBurtell, M. et al. (2024) The Surprising Power of Next Word Prediction: Large Language Models \nExplained, Part 1. Georgetown Center for Security and Emerging Technology. \nhttps://cset.georgetown.edu/article/the-surprising-power-of-next-word-prediction-large-language-\nmodels-explained-part-1/ \nCanadian Centre for Cyber Security (2023) Generative artiļ¬cial intelligence (AI) - ITSAP.00.041. \nhttps://www.cyber.gc.ca/en/guidance/generative-artiļ¬cial-intelligence-ai-itsap00041 \nCarlini, N., et al. (2021) Extracting Training Data from Large Language Models. Usenix. \nhttps://www.usenix.org/conference/usenixsecurity21/presentation/carlini-extracting \nCarlini, N. et al. (2023) Quantifying Memorization Across Neural Language Models. ICLR 2023. \nhttps://arxiv.org/pdf/2202.07646 \nCarlini, N. et al. (2024) Stealing Part of a Production Language Model. arXiv. \nhttps://arxiv.org/abs/2403.06634 \nChandra, B. et al. (2023) Dismantling the Disinformation Business of Chinese Inļ¬‚uence Operations. \nRAND. https://www.rand.org/pubs/commentary/2023/10/dismantling-the-disinformation-business-of-\nchinese.html \nCiriello, R. et al. (2024) Ethical Tensions in Human-AI Companionship: A Dialectical Inquiry into Replika. \nResearchGate. https://www.researchgate.net/publication/374505266_Ethical_Tensions_in_Human-\nAI_Companionship_A_Dialectical_Inquiry_into_Replika \nDahl, M. et al. (2024) Large Legal Fictions: Proļ¬ling Legal Hallucinations in Large Language Models. arXiv. \nhttps://arxiv.org/abs/2401.01301 \n', "" \n55 \nDe Angelo, D. (2024) Short, Mid and Long-Term Impacts of AI in Cybersecurity. Palo Alto Networks. \nhttps://www.paloaltonetworks.com/blog/2024/02/impacts-of-ai-in-cybersecurity/ \nDe Freitas, J. et al. (2023) Chatbots and Mental Health: Insights into the Safety of Generative AI. Harvard \nBusiness School. https://www.hbs.edu/ris/Publication%20Files/23-011_c1bdd417-f717-47b6-bccb-\n5438c6e65c1a_f6fd9798-3c2d-4932-b222-056231fe69d7.pdf \nDietvorst, B. et al. (2014) Algorithm Aversion: People Erroneously Avoid Algorithms After Seeing Them \nErr. Journal of Experimental Psychology. https://marketing.wharton.upenn.edu/wp-\ncontent/uploads/2016/10/Dietvorst-Simmons-Massey-2014.pdf \nDuhigg, C. (2012) How Companies Learn Your Secrets. New York Times. \nhttps://www.nytimes.com/2012/02/19/magazine/shopping-habits.html \nElsayed, G. et al. (2024) Images altered to trick machine vision can inļ¬‚uence humans too. Google \nDeepMind. https://deepmind.google/discover/blog/images-altered-to-trick-machine-vision-can-\ninļ¬‚uence-humans-too/ \nEpstein, Z. et al. (2023). Art and the science of generative AI. Science. \nhttps://www.science.org/doi/10.1126/science.adh4451 \nFeļ¬€er, M. et al. (2024) Red-Teaming for Generative AI: Silver Bullet or Security Theater? arXiv. \nhttps://arxiv.org/pdf/2401.15897 \nGlazunov, S. et al. (2024) Project Naptime: Evaluating Oļ¬€ensive Security Capabilities of Large Language \nModels. Project Zero. https://googleprojectzero.blogspot.com/2024/06/project-naptime.html \nGreshake, K. et al. (2023) Not what you've signed up for: Compromising Real-World LLM-Integrated \nApplications with Indirect Prompt Injection. arXiv. https://arxiv.org/abs/2302.12173 \nHagan, M. (2024) Good AI Legal Help, Bad AI Legal Help: Establishing quality standards for responses to \npeopleā€™s legal problem stories. SSRN. https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4696936 \nHaran, R. (2023) Securing LLM Systems Against Prompt Injection. NVIDIA. \nhttps://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection/ \nInformation Technology Industry Council (2024) Authenticating AI-Generated Content. \nhttps://www.itic.org/policy/ITI_AIContentAuthorizationPolicy_122123.pdf \nJain, S. et al. (2023) Algorithmic Pluralism: A Structural Approach To Equal Opportunity. arXiv. \nhttps://arxiv.org/pdf/2305.08157 \nJi, Z. et al (2023) Survey of Hallucination in Natural Language Generation. ACM Comput. Surv. 55, 12, \nArticle 248. https://doi.org/10.1145/3571730 \nJones-Jang, S. et al. (2022) How do people react to AI failure? Automation bias, algorithmic aversion, and \nperceived controllability. Oxford. https://academic.oup.com/jcmc/article/28/1/zmac029/6827859] \nJussupow, E. et al. (2020) Why Are We Averse Towards Algorithms? A Comprehensive Literature Review \non Algorithm Aversion. ECIS 2020. https://aisel.aisnet.org/ecis2020_rp/168/ \nKalai, A., et al. (2024) Cal""]",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 57, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 58, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What steps ensure automated systems avoid bias and maintain safety?,"[' \xad\xad\xad\xad\xad\xad\xad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23\n', ' AI BILL OF RIGHTS\nFFECTIVE SYSTEMS\nineffective systems. Automated systems should be \ncommunities, stakeholders, and domain experts to identify \nSystems should undergo pre-deployment testing, risk \nthat demonstrate they are safe and effective based on \nincluding those beyond the intended use, and adherence to \nprotective measures should include the possibility of not \nAutomated systems should not be designed with an intent \nreasonably foreseeable possibility of endangering your safety or the safety of your community. They should \nstemming from unintended, yet foreseeable, uses or \n \n \n \n \n \n \n \nSECTION TITLE\nBLUEPRINT FOR AN\nSAFE AND E \nYou should be protected from unsafe or \ndeveloped with consultation from diverse \nconcerns, risks, and potential impacts of the system. \nidentification and mitigation, and ongoing monitoring \ntheir intended use, mitigation of unsafe outcomes \ndomain-specific standards. Outcomes of these \ndeploying the system or removing a system from use. \nor \nbe designed to proactively protect you from harms \nimpacts of automated systems. You should be protected from inappropriate or irrelevant data use in the \ndesign, development, and deployment of automated systems, and from the compounded harm of its reuse. \nIndependent evaluation and reporting that confirms that the system is safe and effective, including reporting of \nsteps taken to mitigate potential harms, should be performed and the results made public whenever possible. \nALGORITHMIC DISCRIMINATION PROTECTIONS\nYou should not face discrimination by algorithms and systems should be used and designed in \nan equitable way. Algorithmic discrimination occurs when automated systems contribute to unjustified \ndifferent treatment or impacts disfavoring people based on their race, color, ethnicity, sex (including \npregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other \nclassification protected by law. Depending on the specific circumstances, such algorithmic discrimination \nmay violate legal protections. Designers, developers, and deployers of automated systems should take \nproactive \nand \ncontinuous \nmeasures \nto \nprotect \nindividuals \nand \ncommunities \nfrom algorithmic \ndiscrimination and to use and design systems in an equitable way. This protection should include proactive \nequity assessments as part of the system design, use of representative data and protection against proxies \nfor demographic features, ensuring accessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent \nevaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5\n']","To ensure automated systems avoid bias and maintain safety, designers, developers, and deployers should take proactive and continuous measures, including conducting proactive equity assessments as part of system design, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Additionally, independent evaluation and reporting should confirm that the system is safe and effective, including steps taken to mitigate potential harms.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 22, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 4, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What methods work for evaluating biases in AI content with diverse user feedback?,"[' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","The context mentions evaluating potential biases and stereotypes that could emerge from AI-generated content using appropriate methodologies, including computational testing methods as well as evaluating structured feedback input. Additionally, it suggests recording and integrating structured feedback about content provenance from operators, users, and potentially impacted communities through methods such as user research studies, focus groups, or community forums.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the U.S. AI Safety Institute's goals for NIST's AI risk standards?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How might AI tech reinforce inequities in education, housing, and jobs, and add burdens on those using social welfare?","["" \n \nAPPENDIX\nPanel 3: Equal Opportunities and Civil Justice. This event explored current and emerging uses of \ntechnology that impact equity of opportunity in employment, education, and housing. \nWelcome: \nā€¢\nRashida Richardson, Senior Policy Advisor for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nDominique Harrison, Director for Technology Policy, The Joint Center for Political and Economic\nStudies\nModerator: Jenny Yang, Director, Office of Federal Contract Compliance Programs, Department of Labor \nPanelists: \nā€¢\nChristo Wilson, Associate Professor of Computer Science, Northeastern University\nā€¢\nFrida Polli, CEO, Pymetrics\nā€¢\nKaren Levy, Assistant Professor, Department of Information Science, Cornell University\nā€¢\nNatasha Duarte, Project Director, Upturn\nā€¢\nElana Zeide, Assistant Professor, University of Nebraska College of Law\nā€¢\nFabian Rogers, Constituent Advocate, Office of NY State Senator Jabari Brisport and Community\nAdvocate and Floor Captain, Atlantic Plaza Towers Tenants Association\nThe individual panelists described the ways in which AI systems and other technologies are increasingly being \nused to limit access to equal opportunities in education, housing, and employment. Education-related \nconcerning uses included the increased use of remote proctoring systems, student location and facial \nrecognition tracking, teacher evaluation systems, robot teachers, and more. Housing-related concerning uses \nincluding automated tenant background screening and facial recognition-based controls to enter or exit \nhousing complexes. Employment-related concerning uses included discrimination in automated hiring \nscreening and workplace surveillance. Various panelists raised the limitations of existing privacy law as a key \nconcern, pointing out that students should be able to reinvent themselves and require privacy of their student \nrecords and education-related data in order to do so. The overarching concerns of surveillance in these \ndomains included concerns about the chilling effects of surveillance on student expression, inappropriate \ncontrol of tenants via surveillance, and the way that surveillance of workers blurs the boundary between work \nand life and exerts extreme and potentially damaging control over workers' lives. Additionally, some panelists \npointed out ways that data from one situation was misapplied in another in a way that limited people's \nopportunities, for example data from criminal justice settings or previous evictions being used to block further \naccess to housing. Throughout, various panelists emphasized that these technologies are being used to shift the \nburden of oversight and efficiency from employers to workers, schools to students, and landlords to tenants, in \nways that diminish and encroach on equality of opportunity; assessment of these technologies should include \nwhether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57\n"", "" \n \n \n \n \nAPPENDIX\nā€¢\nJulia Simon-Mishel, Supervising Attorney, Philadelphia Legal Assistance\nā€¢\nDr. Zachary Mahafza, Research & Data Analyst, Southern Poverty Law Center\nā€¢\nJ. Khadijah Abdurahman, Tech Impact Network Research Fellow, AI Now Institute, UCLA C2I1, and\nUWA Law School\nPanelists separately described the increasing scope of technology use in providing for social welfare, including \nin fraud detection, digital ID systems, and other methods focused on improving efficiency and reducing cost. \nHowever, various panelists individually cautioned that these systems may reduce burden for government \nagencies by increasing the burden and agency of people using and interacting with these technologies. \nAdditionally, these systems can produce feedback loops and compounded harm, collecting data from \ncommunities and using it to reinforce inequality. Various panelists suggested that these harms could be \nmitigated by ensuring community input at the beginning of the design process, providing ways to opt out of \nthese systems and use associated human-driven mechanisms instead, ensuring timeliness of benefit payments, \nand providing clear notice about the use of these systems and clear explanations of how and what the \ntechnologies are doing. Some panelists suggested that technology should be used to help people receive \nbenefits, e.g., by pushing benefits to those in need and ensuring automated decision-making systems are only \nused to provide a positive outcome; technology shouldn't be used to take supports away from people who need \nthem. \nPanel 6: The Healthcare System. This event explored current and emerging uses of technology in the \nhealthcare system and consumer products related to health. \nWelcome:\nā€¢\nAlondra Nelson, Deputy Director for Science and Society, White House Office of Science and Technology\nPolicy\nā€¢\nPatrick Gaspard, President and CEO, Center for American Progress\nModerator: Micky Tripathi, National Coordinator for Health Information Technology, U.S Department of \nHealth and Human Services. \nPanelists: \nā€¢\nMark Schneider, Health Innovation Advisor, ChristianaCare\nā€¢\nZiad Obermeyer, Blue Cross of California Distinguished Associate Professor of Policy and Management,\nUniversity of California, Berkeley School of Public Health\nā€¢\nDorothy Roberts, George A. Weiss University Professor of Law and Sociology and the Raymond Pace and\nSadie Tanner Mossell Alexander Professor of Civil Rights, University of Pennsylvania\nā€¢\nDavid Jones, A. Bernard Ackerman Professor of the Culture of Medicine, Harvard University\nā€¢\nJamila Michener, Associate Professor of Government, Cornell University; Co-Director, Cornell Center for\nHealth Equity\xad\nPanelists discussed the impact of new technologies on health disparities; healthcare access, delivery, and \noutcomes; and areas ripe for research and policymaking. Panelists discussed the increasing importance of tech-\nnology as both a vehicle to deliver healthcare and a tool to enhance the quality of care. On the issue of \ndelivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.\n59\n""]","AI technology can reinforce inequities in education, housing, and jobs by being used to limit access to equal opportunities, such as through automated tenant background screening, discrimination in automated hiring screening, and remote proctoring systems. Additionally, these technologies can shift the burden of oversight from employers to workers, schools to students, and landlords to tenants, which diminishes equality of opportunity. In the context of social welfare, AI systems may reduce the burden for government agencies but increase the burden on individuals interacting with these technologies, potentially creating feedback loops that reinforce inequality.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 56, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 58, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What role do algorithmic impact assessments play in clarifying accountability for automated systems?,"["" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably clear, timely, understandable, and accessible notice of use, and \nexplanations as to how and why a decision was made or an action was taken by the system. These expectations are \nexplained below. \nProvide clear, timely, understandable, and accessible notice of use and explanations \xad\nGenerally accessible plain language documentation. The entity responsible for using the automated \nsystem should ensure that documentation describing the overall system (including any human components) is \npublic and easy to find. The documentation should describe, in plain language, how the system works and how \nany automated component is used to determine an action or decision. It should also include expectations about \nreporting described throughout this framework, such as the algorithmic impact assessments described as \npart of Algorithmic Discrimination Protections. \nAccountable. Notices should clearly identify the entity responsible for designing each component of the \nsystem and the entity using it. \nTimely and up-to-date. Users should receive notice of the use of automated systems in advance of using or \nwhile being impacted by the technology. An explanation should be available with the decision itself, or soon \nthereafter. Notice should be kept up-to-date and people impacted by the system should be notified of use case \nor key functionality changes. \nBrief and clear. Notices and explanations should be assessed, such as by research on usersā€™ experiences, \nincluding user testing, to ensure that the people using or impacted by the automated system are able to easily \nfind notices and explanations, read them quickly, and understand and act on them. This includes ensuring that \nnotices and explanations are accessible to users with disabilities and are available in the language(s) and read-\ning level appropriate for the audience. Notices and explanations may need to be available in multiple forms, \n(e.g., on paper, on a physical sign, or online), in order to meet these expectations and to be accessible to the \nAmerican public. \nProvide explanations as to how and why a decision was made or an action was taken by an \nautomated system \nTailored to the purpose. Explanations should be tailored to the specific purpose for which the user is \nexpected to use the explanation, and should clearly state that purpose. An informational explanation might \ndiffer from an explanation provided to allow for the possibility of recourse, an appeal, or one provided in the \ncontext of a dispute or contestation process. For the purposes of this framework, 'explanation' should be \nconstrued broadly. An explanation need not be a plain-language statement about causality but could consist of \nany mechanism that allows the recipient to build the necessary understanding and intuitions to achieve the \nstated purpose. Tailoring should be assessed (e.g., via user experience research). \nTailored to the target of the explanation. Explanations should be targeted to specific audiences and \nclearly state that audience. An explanation provided to the subject of a decision might differ from one provided \nto an advocate, or to a domain expert or decision maker. Tailoring should be assessed (e.g., via user experience \nresearch). \n43\n"", "" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTailored to the level of risk. An assessment should be done to determine the level of risk of the auto\xad\nmated system. In settings where the consequences are high as determined by a risk assessment, or extensive \noversight is expected (e.g., in criminal justice or some public sector settings), explanatory mechanisms should \nbe built into the system design so that the systemā€™s full behavior can be explained in advance (i.e., only fully \ntransparent models should be used), rather than as an after-the-decision interpretation. In other settings, the \nextent of explanation provided should be tailored to the risk level. \nValid. The explanation provided by a system should accurately reflect the factors and the influences that led \nto a particular decision, and should be meaningful for the particular customization based on purpose, target, \nand level of risk. While approximation and simplification may be necessary for the system to succeed based on \nthe explanatory purpose and target of the explanation, or to account for the risk of fraud or other concerns \nrelated to revealing decision-making information, such simplifications should be done in a scientifically \nsupportable way. Where appropriate based on the explanatory system, error ranges for the explanation should \nbe calculated and included in the explanation, with the choice of presentation of such information balanced \nwith usability and overall interface complexity concerns. \nDemonstrate protections for notice and explanation \nReporting. Summary reporting should document the determinations made based on the above consider\xad\nations, including: the responsible entities for accountability purposes; the goal and use cases for the system, \nidentified users, and impacted populations; the assessment of notice clarity and timeliness; the assessment of \nthe explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment \nof how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of \nrisk. Individualized profile information should be made readily available to the greatest extent possible that \nincludes explanations for any system impacts or inferences. Reporting should be provided in a clear plain \nlanguage and machine-readable manner. \n44\n""]",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 42, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 43, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does human input affect fairness and fallback in automated systems?,"[' \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably effective mechanisms to opt out in favor of a human alterna\xad\ntive, where appropriate, as well as timely human consideration and remedy by a fallback system, with additional \nhuman oversight and safeguards for systems used in sensitive domains, and with training and assessment for any \nhuman-based portions of the system to ensure effectiveness. \nProvide a mechanism to conveniently opt out from automated systems in favor of a human \nalternative, where appropriate \nBrief, clear, accessible notice and instructions. Those impacted by an automated system should be \ngiven a brief, clear notice that they are entitled to opt-out, along with clear instructions for how to opt-out. \nInstructions should be provided in an accessible form and should be easily findable by those impacted by the \nautomated system. The brevity, clarity, and accessibility of the notice and instructions should be assessed (e.g., \nvia user experience research). \nHuman alternatives provided when appropriate. In many scenarios, there is a reasonable expectation \nof human involvement in attaining rights, opportunities, or access. When automated systems make up part of \nthe attainment process, alternative timely human-driven processes should be provided. The use of a human \nalternative should be triggered by an opt-out process. \nTimely and not burdensome human alternative. Opting out should be timely and not unreasonably \nburdensome in both the process of requesting to opt-out and the human-driven alternative provided. \nProvide timely human consideration and remedy by a fallback and escalation system in the \nevent that an automated system fails, produces error, or you would like to appeal or con\xad\ntest its impacts on you \nProportionate. The availability of human consideration and fallback, along with associated training and \nsafeguards against human bias, should be proportionate to the potential of the automated system to meaning\xad\nfully impact rights, opportunities, or access. Automated systems that have greater control over outcomes, \nprovide input to high-stakes decisions, relate to sensitive domains, or otherwise have a greater potential to \nmeaningfully impact rights, opportunities, or access should have greater availability (e.g., staffing) and over\xad\nsight of human consideration and fallback mechanisms. \nAccessible. Mechanisms for human consideration and fallback, whether in-person, on paper, by phone, or \notherwise provided, should be easy to find and use. These mechanisms should be tested to ensure that users \nwho have trouble with the automated system are able to use human consideration and fallback, with the under\xad\nstanding that it may be these users who are most likely to need the human assistance. Similarly, it should be \ntested to ensure that users with disabilities are able to find and use human consideration and fallback and also \nrequest reasonable accommodations or modifications. \nConvenient. Mechanisms for human consideration and fallback should not be unreasonably burdensome as \ncompared to the automated systemā€™s equivalent. \n49\n', "" \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEquitable. Consideration should be given to ensuring outcomes of the fallback and escalation system are \nequitable when compared to those of the automated system and such that the fallback and escalation \nsystem provides equitable access to underserved communities.105 \nTimely. Human consideration and fallback are only useful if they are conducted and concluded in a \ntimely manner. The determination of what is timely should be made relative to the specific automated \nsystem, and the review system should be staffed and regularly assessed to ensure it is providing timely \nconsideration and fallback. In time-critical systems, this mechanism should be immediately available or, \nwhere possible, available before the harm occurs. Time-critical systems include, but are not limited to, \nvoting-related systems, automated building access and other access systems, systems that form a critical \ncomponent of healthcare, and systems that have the ability to withhold wages or otherwise cause \nimmediate financial penalties. \nEffective. The organizational structure surrounding processes for consideration and fallback should \nbe designed so that if the human decision-maker charged with reassessing a decision determines that it \nshould be overruled, the new decision will be effectively enacted. This includes ensuring that the new \ndecision is entered into the automated system throughout its components, any previous repercussions from \nthe old decision are also overturned, and safeguards are put in place to help ensure that future decisions do \nnot result in the same errors. \nMaintained. The human consideration and fallback process and any associated automated processes \nshould be maintained and supported as long as the relevant automated system continues to be in use. \nInstitute training, assessment, and oversight to combat automation bias and ensure any \nhuman-based components of a system are effective. \nTraining and assessment. Anyone administering, interacting with, or interpreting the outputs of an auto\xad\nmated system should receive training in that system, including how to properly interpret outputs of a system \nin light of its intended purpose and in how to mitigate the effects of automation bias. The training should reoc\xad\ncur regularly to ensure it is up to date with the system and to ensure the system is used appropriately. Assess\xad\nment should be ongoing to ensure that the use of the system with human involvement provides for appropri\xad\nate results, i.e., that the involvement of people does not invalidate the system's assessment as safe and effective \nor lead to algorithmic discrimination. \nOversight. Human-based systems have the potential for bias, including automation bias, as well as other \nconcerns that may limit their effectiveness. The results of assessments of the efficacy and potential bias of \nsuch human-based systems should be overseen by governance structures that have the potential to update the \noperation of the human-based system in order to mitigate these effects. \n50\n""]","The context does not explicitly address how human input affects fairness and fallback in automated systems. However, it mentions that human consideration and fallback mechanisms should be proportionate, accessible, equitable, timely, and effective, which implies that human input is crucial in ensuring these aspects are upheld. The effectiveness of human involvement is emphasized through training, assessment, and oversight to combat automation bias and ensure appropriate results.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 48, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 49, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What factors to consider for human options in automated systems per the AI Bill of Rights?,"['TABLE OF CONTENTS\nFROM PRINCIPLES TO PRACTICE: A TECHNICAL COMPANION TO THE BLUEPRINT \nFOR AN AI BILL OF RIGHTS \n \nUSING THIS TECHNICAL COMPANION\n \nSAFE AND EFFECTIVE SYSTEMS\n \nALGORITHMIC DISCRIMINATION PROTECTIONS\n \nDATA PRIVACY\n \nNOTICE AND EXPLANATION\n \nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nAPPENDIX\n \nEXAMPLES OF AUTOMATED SYSTEMS\n \nLISTENING TO THE AMERICAN PEOPLE\nENDNOTES \n12\n14\n15\n23\n30\n40\n46\n53\n53\n55\n63\n13\n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 12, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does a document retention policy support GAI system integrity?,"[' \n16 \nGOVERN 1.5: Ongoing monitoring and periodic review of the risk management process and its outcomes are planned, and \norganizational roles and responsibilities are clearly deļ¬ned, including determining the frequency of periodic review. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.5-001 Deļ¬ne organizational responsibilities for periodic review of content provenance \nand incident monitoring for GAI systems. \nInformation Integrity \nGV-1.5-002 \nEstablish organizational policies and procedures for after action reviews of GAI \nsystem incident response and incident disclosures, to identify gaps; Update \nincident response and incident disclosure processes as required. \nHuman-AI Conļ¬guration; \nInformation Security \nGV-1.5-003 \nMaintain a document retention policy to keep history for test, evaluation, \nvalidation, and veriļ¬cation (TEVV), and digital content transparency methods for \nGAI. \nInformation Integrity; Intellectual \nProperty \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring \n \nGOVERN 1.6: Mechanisms are in place to inventory AI systems and are resourced according to organizational risk priorities. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.6-001 Enumerate organizational GAI systems for incorporation into AI system inventory \nand adjust AI system inventory requirements to account for GAI risks. \nInformation Security \nGV-1.6-002 Deļ¬ne any inventory exemptions in organizational policies for GAI systems \nembedded into application software. \nValue Chain and Component \nIntegration \nGV-1.6-003 \nIn addition to general model, governance, and risk information, consider the \nfollowing items in GAI system inventory entries: Data provenance information \n(e.g., source, signatures, versioning, watermarks); Known issues reported from \ninternal bug tracking or external information sharing resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor); Human oversight roles \nand responsibilities; Special rights and considerations for intellectual property, \nlicensed works, or personal, privileged, proprietary or sensitive data; Underlying \nfoundation models, versions of underlying models, and access modes. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity; Intellectual Property; \nValue Chain and Component \nIntegration \nAI Actor Tasks: Governance and Oversight \n \n', ' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']",The context does not provide specific information on how a document retention policy supports GAI system integrity.,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 19, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What challenges did panelists see at the tech-health equity intersection?,"["" \n \n \n \n \nAPPENDIX\nā€¢\nJulia Simon-Mishel, Supervising Attorney, Philadelphia Legal Assistance\nā€¢\nDr. Zachary Mahafza, Research & Data Analyst, Southern Poverty Law Center\nā€¢\nJ. Khadijah Abdurahman, Tech Impact Network Research Fellow, AI Now Institute, UCLA C2I1, and\nUWA Law School\nPanelists separately described the increasing scope of technology use in providing for social welfare, including \nin fraud detection, digital ID systems, and other methods focused on improving efficiency and reducing cost. \nHowever, various panelists individually cautioned that these systems may reduce burden for government \nagencies by increasing the burden and agency of people using and interacting with these technologies. \nAdditionally, these systems can produce feedback loops and compounded harm, collecting data from \ncommunities and using it to reinforce inequality. Various panelists suggested that these harms could be \nmitigated by ensuring community input at the beginning of the design process, providing ways to opt out of \nthese systems and use associated human-driven mechanisms instead, ensuring timeliness of benefit payments, \nand providing clear notice about the use of these systems and clear explanations of how and what the \ntechnologies are doing. Some panelists suggested that technology should be used to help people receive \nbenefits, e.g., by pushing benefits to those in need and ensuring automated decision-making systems are only \nused to provide a positive outcome; technology shouldn't be used to take supports away from people who need \nthem. \nPanel 6: The Healthcare System. This event explored current and emerging uses of technology in the \nhealthcare system and consumer products related to health. \nWelcome:\nā€¢\nAlondra Nelson, Deputy Director for Science and Society, White House Office of Science and Technology\nPolicy\nā€¢\nPatrick Gaspard, President and CEO, Center for American Progress\nModerator: Micky Tripathi, National Coordinator for Health Information Technology, U.S Department of \nHealth and Human Services. \nPanelists: \nā€¢\nMark Schneider, Health Innovation Advisor, ChristianaCare\nā€¢\nZiad Obermeyer, Blue Cross of California Distinguished Associate Professor of Policy and Management,\nUniversity of California, Berkeley School of Public Health\nā€¢\nDorothy Roberts, George A. Weiss University Professor of Law and Sociology and the Raymond Pace and\nSadie Tanner Mossell Alexander Professor of Civil Rights, University of Pennsylvania\nā€¢\nDavid Jones, A. Bernard Ackerman Professor of the Culture of Medicine, Harvard University\nā€¢\nJamila Michener, Associate Professor of Government, Cornell University; Co-Director, Cornell Center for\nHealth Equity\xad\nPanelists discussed the impact of new technologies on health disparities; healthcare access, delivery, and \noutcomes; and areas ripe for research and policymaking. Panelists discussed the increasing importance of tech-\nnology as both a vehicle to deliver healthcare and a tool to enhance the quality of care. On the issue of \ndelivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.\n59\n"", "" \n \n \n \nAPPENDIX\nPanelists discussed the benefits of AI-enabled systems and their potential to build better and more \ninnovative infrastructure. They individually noted that while AI technologies may be new, the process of \ntechnological diffusion is not, and that it was critical to have thoughtful and responsible development and \nintegration of technology within communities. Some panelists suggested that the integration of technology \ncould benefit from examining how technological diffusion has worked in the realm of urban planning: \nlessons learned from successes and failures there include the importance of balancing ownership rights, use \nrights, and community health, safety and welfare, as well ensuring better representation of all voices, \nespecially those traditionally marginalized by technological advances. Some panelists also raised the issue of \npower structures ā€“ providing examples of how strong transparency requirements in smart city projects \nhelped to reshape power and give more voice to those lacking the financial or political power to effect change. \nIn discussion of technical and governance interventions that that are needed to protect against the harms \nof these technologies, various panelists emphasized the need for transparency, data collection, and \nflexible and reactive policy development, analogous to how software is continuously updated and deployed. \nSome panelists pointed out that companies need clear guidelines to have a consistent environment for \ninnovation, with principles and guardrails being the key to fostering responsible innovation. \nPanel 2: The Criminal Justice System. This event explored current and emergent uses of technology in \nthe criminal justice system and considered how they advance or undermine public safety, justice, and \ndemocratic values. \nWelcome: \nā€¢\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\nā€¢\nBen Winters, Counsel, Electronic Privacy Information Center\nModerator: Chiraag Bains, Deputy Assistant to the President on Racial Justice & Equity \nPanelists: \nā€¢\nSean Malinowski, Director of Policing Innovation and Reform, University of Chicago Crime Lab\nā€¢\nKristian Lum, Researcher\nā€¢\nJumana Musa, Director, Fourth Amendment Center, National Association of Criminal Defense Lawyers\nā€¢\nStanley Andrisse, Executive Director, From Prison Cells to PHD; Assistant Professor, Howard University\nCollege of Medicine\nā€¢\nMyaisha Hayes, Campaign Strategies Director, MediaJustice\nPanelists discussed uses of technology within the criminal justice system, including the use of predictive \npolicing, pretrial risk assessments, automated license plate readers, and prison communication tools. The \ndiscussion emphasized that communities deserve safety, and strategies need to be identified that lead to safety; \nsuch strategies might include data-driven approaches, but the focus on safety should be primary, and \ntechnology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, various panelists emphasized that transparency is important but is not enough to achieve \naccountability. Some panelists discussed their individual views on additional system needs for validity, and \nagreed upon the importance of advisory boards and compensated community input early in the design process \n(before the technology is built and instituted). Various panelists also emphasized the importance of regulation \nthat includes limits to the type and cost of such technologies. \n56\n""]","Panelists discussed several challenges at the tech-health equity intersection, including access to and expense of broadband service, privacy concerns associated with telehealth systems, and the expense associated with health monitoring devices, which can exacerbate equity issues. They also highlighted the need for accountability in the technologies used in medical care, particularly regarding racial biases and the use of race in medicine, which perpetuate harms and embed prior discrimination.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 58, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 55, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do transparency policies help manage GAI risks and ensure compliance?,"[' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n', ' \n15 \nGV-1.3-004 Obtain input from stakeholder communities to identify unacceptable use, in \naccordance with activities in the AI RMF Map function. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nGV-1.3-005 \nMaintain an updated hierarchy of identiļ¬ed and expected GAI risks connected to \ncontexts of GAI model advancement and use, potentially including specialized risk \nlevels for GAI systems that address issues such as model collapse and algorithmic \nmonoculture. \nHarmful Bias and Homogenization \nGV-1.3-006 \nReevaluate organizational risk tolerances to account for unacceptable negative risk \n(such as where signiļ¬cant negative impacts are imminent, severe harms are \nactually occurring, or large-scale risks could occur); and broad GAI negative risks, \nincluding: Immature safety or risk cultures related to AI and GAI design, \ndevelopment and deployment, public information integrity risks, including impacts \non democratic processes, unknown long-term performance characteristics of GAI. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nGV-1.3-007 Devise a plan to halt development or deployment of a GAI system that poses \nunacceptable negative risk. \nCBRN Information and Capability; \nInformation Security; Information \nIntegrity \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.4: The risk management process and its outcomes are established through transparent policies, procedures, and other \ncontrols based on organizational risk priorities. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.4-001 \nEstablish policies and mechanisms to prevent GAI systems from generating \nCSAM, NCII or content that violates the law. \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias \nand Homogenization; \nDangerous, Violent, or Hateful \nContent \nGV-1.4-002 \nEstablish transparent acceptable use policies for GAI that address illegal use or \napplications of GAI. \nCBRN Information or \nCapabilities; Obscene, \nDegrading, and/or Abusive \nContent; Data Privacy; Civil \nRights violations \nAI Actor Tasks: AI Development, AI Deployment, Governance and Oversight \n \n']","Transparency policies help manage GAI risks by establishing processes for documenting the origin and history of training data and generated data for GAI applications. This promotes digital content transparency while balancing the proprietary nature of training approaches, thereby ensuring compliance with data privacy, information integrity, and intellectual property standards.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 18, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How important are clear decision-making explanations in automated systems for risk assessment and user understanding?,"["" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTailored to the level of risk. An assessment should be done to determine the level of risk of the auto\xad\nmated system. In settings where the consequences are high as determined by a risk assessment, or extensive \noversight is expected (e.g., in criminal justice or some public sector settings), explanatory mechanisms should \nbe built into the system design so that the systemā€™s full behavior can be explained in advance (i.e., only fully \ntransparent models should be used), rather than as an after-the-decision interpretation. In other settings, the \nextent of explanation provided should be tailored to the risk level. \nValid. The explanation provided by a system should accurately reflect the factors and the influences that led \nto a particular decision, and should be meaningful for the particular customization based on purpose, target, \nand level of risk. While approximation and simplification may be necessary for the system to succeed based on \nthe explanatory purpose and target of the explanation, or to account for the risk of fraud or other concerns \nrelated to revealing decision-making information, such simplifications should be done in a scientifically \nsupportable way. Where appropriate based on the explanatory system, error ranges for the explanation should \nbe calculated and included in the explanation, with the choice of presentation of such information balanced \nwith usability and overall interface complexity concerns. \nDemonstrate protections for notice and explanation \nReporting. Summary reporting should document the determinations made based on the above consider\xad\nations, including: the responsible entities for accountability purposes; the goal and use cases for the system, \nidentified users, and impacted populations; the assessment of notice clarity and timeliness; the assessment of \nthe explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment \nof how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of \nrisk. Individualized profile information should be made readily available to the greatest extent possible that \nincludes explanations for any system impacts or inferences. Reporting should be provided in a clear plain \nlanguage and machine-readable manner. \n44\n"", "" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably clear, timely, understandable, and accessible notice of use, and \nexplanations as to how and why a decision was made or an action was taken by the system. These expectations are \nexplained below. \nProvide clear, timely, understandable, and accessible notice of use and explanations \xad\nGenerally accessible plain language documentation. The entity responsible for using the automated \nsystem should ensure that documentation describing the overall system (including any human components) is \npublic and easy to find. The documentation should describe, in plain language, how the system works and how \nany automated component is used to determine an action or decision. It should also include expectations about \nreporting described throughout this framework, such as the algorithmic impact assessments described as \npart of Algorithmic Discrimination Protections. \nAccountable. Notices should clearly identify the entity responsible for designing each component of the \nsystem and the entity using it. \nTimely and up-to-date. Users should receive notice of the use of automated systems in advance of using or \nwhile being impacted by the technology. An explanation should be available with the decision itself, or soon \nthereafter. Notice should be kept up-to-date and people impacted by the system should be notified of use case \nor key functionality changes. \nBrief and clear. Notices and explanations should be assessed, such as by research on usersā€™ experiences, \nincluding user testing, to ensure that the people using or impacted by the automated system are able to easily \nfind notices and explanations, read them quickly, and understand and act on them. This includes ensuring that \nnotices and explanations are accessible to users with disabilities and are available in the language(s) and read-\ning level appropriate for the audience. Notices and explanations may need to be available in multiple forms, \n(e.g., on paper, on a physical sign, or online), in order to meet these expectations and to be accessible to the \nAmerican public. \nProvide explanations as to how and why a decision was made or an action was taken by an \nautomated system \nTailored to the purpose. Explanations should be tailored to the specific purpose for which the user is \nexpected to use the explanation, and should clearly state that purpose. An informational explanation might \ndiffer from an explanation provided to allow for the possibility of recourse, an appeal, or one provided in the \ncontext of a dispute or contestation process. For the purposes of this framework, 'explanation' should be \nconstrued broadly. An explanation need not be a plain-language statement about causality but could consist of \nany mechanism that allows the recipient to build the necessary understanding and intuitions to achieve the \nstated purpose. Tailoring should be assessed (e.g., via user experience research). \nTailored to the target of the explanation. Explanations should be targeted to specific audiences and \nclearly state that audience. An explanation provided to the subject of a decision might differ from one provided \nto an advocate, or to a domain expert or decision maker. Tailoring should be assessed (e.g., via user experience \nresearch). \n43\n""]","The context emphasizes the importance of clear decision-making explanations in automated systems, particularly for risk assessment and user understanding. It states that explanations should accurately reflect the factors influencing decisions and be tailored to the purpose and audience, ensuring they are understandable and accessible. This is crucial in settings with high consequences, where transparency and clarity are necessary for accountability and user trust.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 43, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 42, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What risks do prompt injection attacks pose to GAI, especially regarding misinformation and data poisoning?","[' \n11 \nvalue chain (e.g., data inputs, processing, GAI training, or deployment environments), conventional \ncybersecurity practices may need to adapt or evolve. \nFor instance, prompt injection involves modifying what input is provided to a GAI system so that it \nbehaves in unintended ways. In direct prompt injections, attackers might craft malicious prompts and \ninput them directly to a GAI system, with a variety of downstream negative consequences to \ninterconnected systems. Indirect prompt injection attacks occur when adversaries remotely (i.e., without \na direct interface) exploit LLM-integrated applications by injecting prompts into data likely to be \nretrieved. Security researchers have already demonstrated how indirect prompt injections can exploit \nvulnerabilities by stealing proprietary data or running malicious code remotely on a machine. Merely \nquerying a closed production model can elicit previously undisclosed information about that model. \nAnother cybersecurity risk to GAI is data poisoning, in which an adversary compromises a training \ndataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts \nof the model could exacerbate risks associated with GAI system outputs. \nTrustworthy AI Characteristics: Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n2.10. \nIntellectual Property \nIntellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair \nuse under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI \noutputs displaying instances of training data memorization (see Data Privacy above) could infringe on \ncopyright. \nHow GAI relates to copyright, including the status of generated content that is similar to but does not \nstrictly copy work protected by copyright, is currently being debated in legal fora. Similar discussions are \ntaking place regarding the use or emulation of personal identity, likeness, or voice without permission. \nTrustworthy AI Characteristics: Accountable and Transparent, Fair with Harmful Bias Managed, Privacy \nEnhanced \n2.11. \nObscene, Degrading, and/or Abusive Content \nGAI can ease the production of and access to illegal non-consensual intimate imagery (NCII) of adults, \nand/or child sexual abuse material (CSAM). GAI-generated obscene, abusive or degrading content can \ncreate privacy, psychological and emotional, and even physical harms, and in some cases may be illegal. \nGenerated explicit or obscene AI content may include highly realistic ā€œdeepfakesā€ of real individuals, \nincluding children. The spread of this kind of material can have downstream negative consequences: in \nthe context of CSAM, even if the generated images do not resemble speciļ¬c individuals, the prevalence \nof such images can divert time and resources from eļ¬€orts to ļ¬nd real-world victims. Outside of CSAM, \nthe creation and spread of NCII disproportionately impacts women and sexual minorities, and can have \nsubsequent negative consequences including decline in overall mental health, substance abuse, and \neven suicidal thoughts. \nData used for training GAI models may unintentionally include CSAM and NCII. A recent report noted \nthat several commonly used GAI training datasets were found to contain hundreds of known images of \n', ' \n10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities. \nDisinformation and misinformation ā€“ both of which may be facilitated by GAI ā€“ may erode public trust in \ntrue or valid evidence and information, with downstream eļ¬€ects. For example, a synthetic image of a \nPentagon blast went viral and brieļ¬‚y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system \nand the integrity and (when applicable) the conļ¬dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speciļ¬c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published. \n']","Prompt injection attacks pose significant risks to GAI by enabling attackers to modify inputs to the system, leading to unintended behaviors and potential misinformation. Direct prompt injections can result in malicious prompts being inputted, causing negative consequences for interconnected systems. Indirect prompt injection attacks exploit vulnerabilities in LLM-integrated applications, potentially leading to the theft of proprietary data or the execution of malicious code. Additionally, data poisoning is a risk where adversaries compromise training datasets, manipulating the outputs or operations of GAI systems, which can exacerbate misinformation and the reliability of generated content.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 14, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 13, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What key processes and stakeholder interactions ensure automated systems' safety and effectiveness?,"[' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n', ' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Key processes and stakeholder interactions that ensure automated systems' safety and effectiveness include ongoing monitoring procedures, clear organizational oversight, consultation with the public during various phases of development, extensive testing before deployment, and proactive risk identification and mitigation. These processes involve continuous evaluation of performance metrics, involvement of organizational stakeholders, engagement with diverse impacted communities, and adherence to domain-specific best practices for testing.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What strategies can help prevent algorithmic bias in automated systems for underserved communities?,"["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n"", ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Strategies to prevent algorithmic bias in automated systems for underserved communities include conducting proactive equity assessments during the design phase, ensuring the use of representative and robust data, and guarding against the use of proxies that may lead to algorithmic discrimination. These strategies involve reviewing potential input data, historical context, and accessibility for people with disabilities, as well as testing for correlation between demographic information and attributes to identify and remove any proxies.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What drives the choice of humans over automation in sensitive areas?,"[' \nSECTION TITLE\nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nYou should be able to opt out, where appropriate, and have access to a person who can quickly \nconsider and remedy problems you encounter. You should be able to opt out from automated systems in \nfavor of a human alternative, where appropriate. Appropriateness should be determined based on reasonable \nexpectations in a given context and with a focus on ensuring broad accessibility and protecting the public from \nespecially harmful impacts. In some cases, a human or other alternative may be required by law. You should have \naccess to timely human consideration and remedy by a fallback and escalation process if an automated system \nfails, it produces an error, or you would like to appeal or contest its impacts on you. Human consideration and \nfallback should be accessible, equitable, effective, maintained, accompanied by appropriate operator training, and \nshould not impose an unreasonable burden on the public. Automated systems with an intended use within sensi\xad\ntive domains, including, but not limited to, criminal justice, employment, education, and health, should additional\xad\nly be tailored to the purpose, provide meaningful access for oversight, include training for any people interacting \nwith the system, and incorporate human consideration for adverse or high-risk decisions. Reporting that includes \na description of these human governance processes and assessment of their timeliness, accessibility, outcomes, \nand effectiveness should be made public whenever possible. \nDefinitions for key terms in The Blueprint for an AI Bill of Rights can be found in Applying the Blueprint for an AI Bill of Rights. \nAccompanying analysis and tools for actualizing each principle can be found in the Technical Companion. \n7\n']","The choice of humans over automation in sensitive areas is driven by the need for human consideration and remedy, particularly in contexts where automated systems may fail, produce errors, or where individuals wish to appeal or contest the impacts of these systems. This choice is also influenced by the requirement for appropriateness based on reasonable expectations, ensuring broad accessibility, and protecting the public from especially harmful impacts.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 6, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What ensures good governance in automated systems?,"[' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n']","Good governance in automated systems is ensured by laying out clear governance structures and procedures, which include clearly-stated governance procedures before deploying the system, as well as the responsibility of specific individuals or entities to oversee ongoing assessment and mitigation. Organizational stakeholders should be involved in establishing these governance procedures, and responsibility should rest high enough in the organization to allow for prompt decision-making regarding resources, mitigation, incident response, and potential rollback. Additionally, those in charge should be aware of any use cases with the potential for meaningful impact on people's rights, opportunities, or access, and it may be appropriate for an independent ethics review to be conducted before deployment.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What links do harmful AI biases have to data privacy or misinformation risks?,"[' \n26 \nMAP 4.1: Approaches for mapping AI technology and legal risks of its components ā€“ including the use of third-party data or \nsoftware ā€“ are in place, followed, and documented, as are risks of infringement of a third-partyā€™s intellectual property or other \nrights. \nAction ID \nSuggested Action \nGAI Risks \nMP-4.1-001 Conduct periodic monitoring of AI-generated content for privacy risks; address any \npossible instances of PII or sensitive data exposure. \nData Privacy \nMP-4.1-002 Implement processes for responding to potential intellectual property infringement \nclaims or other rights. \nIntellectual Property \nMP-4.1-003 \nConnect new GAI policies, procedures, and processes to existing model, data, \nsoftware development, and IT governance and to legal, compliance, and risk \nmanagement activities. \nInformation Security; Data Privacy \nMP-4.1-004 Document training data curation policies, to the extent possible and according to \napplicable laws and policies. \nIntellectual Property; Data Privacy; \nObscene, Degrading, and/or \nAbusive Content \nMP-4.1-005 \nEstablish policies for collection, retention, and minimum quality of data, in \nconsideration of the following risks: Disclosure of inappropriate CBRN information; \nUse of Illegal or dangerous content; Oļ¬€ensive cyber capabilities; Training data \nimbalances that could give rise to harmful biases; Leak of personally identiļ¬able \ninformation, including facial likenesses of individuals. \nCBRN Information or Capabilities; \nIntellectual Property; Information \nSecurity; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-006 Implement policies and practices deļ¬ning how third-party intellectual property and \ntraining data will be used, stored, and protected. \nIntellectual Property; Value Chain \nand Component Integration \nMP-4.1-007 Re-evaluate models that were ļ¬ne-tuned or enhanced on top of third-party \nmodels. \nValue Chain and Component \nIntegration \nMP-4.1-008 \nRe-evaluate risks when adapting GAI models to new domains. Additionally, \nestablish warning systems to determine if a GAI system is being used in a new \ndomain where previous assumptions (relating to context of use or mapped risks \nsuch as security, and safety) may no longer hold. \nCBRN Information or Capabilities; \nIntellectual Property; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-009 Leverage approaches to detect the presence of PII or sensitive data in generated \noutput text, image, video, or audio. \nData Privacy \n', "" \n27 \nMP-4.1-010 \nConduct appropriate diligence on training data use to assess intellectual property, \nand privacy, risks, including to examine whether use of proprietary or sensitive \ntraining data is consistent with applicable laws. \nIntellectual Property; Data Privacy \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n \nMAP 5.1: Likelihood and magnitude of each identiļ¬ed impact (both potentially beneļ¬cial and harmful) based on expected use, past \nuses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \nthe AI system, or other data are identiļ¬ed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \ndata generation capabilities for potential misuse or vulnerabilities. \nInformation Integrity; Information \nSecurity \nMP-5.1-002 \nIdentify potential content provenance harms of GAI, such as misinformation or \ndisinformation, deepfakes, including NCII, or tampered content. Enumerate and \nrank risks based on their likelihood and potential impact, and determine how well \nprovenance solutions address speciļ¬c risks and/or harms. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMP-5.1-003 \nConsider disclosing use of GAI to end users in relevant contexts, while considering \nthe objective of disclosure, the context of use, the likelihood and magnitude of the \nrisk posed, the audience of the disclosure, as well as the frequency of the \ndisclosures. \nHuman-AI Conļ¬guration \nMP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \nestimates. \nInformation Integrity; CBRN \nInformation or Capabilities; \nDangerous, Violent, or Hateful \nContent; Harmful Bias and \nHomogenization \nMP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \nidentify anomalous or unforeseen failure modes. \nInformation Security \nMP-5.1-006 \nProļ¬le threats and negative impacts arising from GAI systems interacting with, \nmanipulating, or generating content, and outlining known and potential \nvulnerabilities and the likelihood of their occurrence. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Design, AI Development, AI Impact Assessment, Aļ¬€ected Individuals and Communities, End-\nUsers, Operation and Monitoring \n \n""]","The context does not explicitly link harmful AI biases to data privacy or misinformation risks. However, it mentions risks such as harmful biases, data privacy, and misinformation in separate sections, indicating that these issues are recognized but not directly connected in the provided text.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 29, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 30, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What to review for ethical use of sensitive data?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \xad\xad\xad\xad\xad\xad\nIn addition to the privacy expectations above for general non-sensitive data, any system collecting, using, shar-\ning, or storing sensitive data should meet the expectations below. Depending on the technological use case and \nbased on an ethical assessment, consent for sensitive data may need to be acquired from a guardian and/or child. \nProvide enhanced protections for data related to sensitive domains \nNecessary functions only. Sensitive data should only be used for functions strictly necessary for that \ndomain or for functions that are required for administrative reasons (e.g., school attendance records), unless \nconsent is acquired, if appropriate, and the additional expectations in this section are met. Consent for non-\nnecessary functions should be optional, i.e., should not be required, incentivized, or coerced in order to \nreceive opportunities or access to services. In cases where data is provided to an entity (e.g., health insurance \ncompany) in order to facilitate payment for such a need, that data should only be used for that purpose. \nEthical review and use prohibitions. Any use of sensitive data or decision process based in part on sensi-\ntive data that might limit rights, opportunities, or access, whether the decision is automated or not, should go \nthrough a thorough ethical review and monitoring, both in advance and by periodic review (e.g., via an indepen-\ndent ethics committee or similarly robust process). In some cases, this ethical review may determine that data \nshould not be used or shared for specific uses even with consent. Some novel uses of automated systems in this \ncontext, where the algorithm is dynamically developing and where the science behind the use case is not well \nestablished, may also count as human subject experimentation, and require special review under organizational \ncompliance bodies applying medical, scientific, and academic human subject experimentation ethics rules and \ngovernance procedures. \nData quality. In sensitive domains, entities should be especially careful to maintain the quality of data to \navoid adverse consequences arising from decision-making based on flawed or inaccurate data. Such care is \nnecessary in a fragmented, complex data ecosystem and for datasets that have limited access such as for fraud \nprevention and law enforcement. It should be not left solely to individuals to carry the burden of reviewing and \ncorrecting data. Entities should conduct regular, independent audits and take prompt corrective measures to \nmaintain accurate, timely, and complete data. \nLimit access to sensitive data and derived data. Sensitive data and derived data should not be sold, \nshared, or made public as part of data brokerage or other agreements. Sensitive data includes data that can be \nused to infer sensitive information; even systems that are not directly marketed as sensitive domain technologies \nare expected to keep sensitive data private. Access to such data should be limited based on necessity and based \non a principle of local control, such that those individuals closest to the data subject have more access while \nthose who are less proximate do not (e.g., a teacher has access to their studentsā€™ daily progress data while a \nsuperintendent does not). \nReporting. In addition to the reporting on data privacy (as listed above for non-sensitive data), entities devel-\noping technologies related to a sensitive domain and those collecting, using, storing, or sharing sensitive data \nshould, whenever appropriate, regularly provide public reports describing: any data security lapses or breaches \nthat resulted in sensitive data leaks; the number, type, and outcomes of ethical pre-reviews undertaken; a \ndescription of any data sold, shared, or made public, and how that data was assessed to determine it did not pres-\nent a sensitive data risk; and ongoing risk identification and management procedures, and any mitigation added \nbased on these procedures. Reporting should be provided in a clear and machine-readable manner. \n38\n']","Any use of sensitive data or decision processes based in part on sensitive data that might limit rights, opportunities, or access should go through a thorough ethical review and monitoring, both in advance and by periodic review. This may involve an independent ethics committee or a similarly robust process. The ethical review may determine that data should not be used or shared for specific uses even with consent.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 37, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the concerns with surveillance tech in education and healthcare?,"["" \n65. See, e.g., Scott Ikeda. Major Data Broker Exposes 235 Million Social Media Profiles in Data Lead: Info\nAppears to Have Been Scraped Without Permission. CPO Magazine. Aug. 28, 2020. https://\nwww.cpomagazine.com/cyber-security/major-data-broker-exposes-235-million-social-media-profiles\xad\nin-data-leak/; Lily Hay Newman. 1.2 Billion Records Found Exposed Online in a Single Server. WIRED,\nNov. 22, 2019. https://www.wired.com/story/billion-records-exposed-online/\n66. Lola Fadulu. Facial Recognition Technology in Public Housing Prompts Backlash. New York Times.\nSept. 24, 2019.\nhttps://www.nytimes.com/2019/09/24/us/politics/facial-recognition-technology-housing.html\n67. Jo Constantz. ā€˜They Were Spying On Usā€™: Amazon, Walmart, Use Surveillance Technology to Bust\nUnions. Newsweek. Dec. 13, 2021.\nhttps://www.newsweek.com/they-were-spying-us-amazon-walmart-use-surveillance-technology-bust\xad\nunions-1658603\n68. See, e.g., enforcement actions by the FTC against the photo storage app Everalbaum\n(https://www.ftc.gov/legal-library/browse/cases-proceedings/192-3172-everalbum-inc-matter), and\nagainst Weight Watchers and their subsidiary Kurbo\n(https://www.ftc.gov/legal-library/browse/cases-proceedings/1923228-weight-watchersww)\n69. See, e.g., HIPAA, Pub. L 104-191 (1996); Fair Debt Collection Practices Act (FDCPA), Pub. L. 95-109\n(1977); Family Educational Rights and Privacy Act (FERPA) (20 U.S.C. Ā§ 1232g), Children's Online\nPrivacy Protection Act of 1998, 15 U.S.C. 6501ā€“6505, and Confidential Information Protection and\nStatistical Efficiency Act (CIPSEA) (116 Stat. 2899)\n70. Marshall Allen. You Snooze, You Lose: Insurers Make The Old Adage Literally True. ProPublica. Nov.\n21, 2018.\nhttps://www.propublica.org/article/you-snooze-you-lose-insurers-make-the-old-adage-literally-true\n71. Charles Duhigg. How Companies Learn Your Secrets. The New York Times. Feb. 16, 2012.\nhttps://www.nytimes.com/2012/02/19/magazine/shopping-habits.html\n72. Jack Gillum and Jeff Kao. Aggression Detectors: The Unproven, Invasive Surveillance Technology\nSchools are Using to Monitor Students. ProPublica. Jun. 25, 2019.\nhttps://features.propublica.org/aggression-detector/the-unproven-invasive-surveillance-technology\xad\nschools-are-using-to-monitor-students/\n73. Drew Harwell. Cheating-detection companies made millions during the pandemic. Now students are\nfighting back. Washington Post. Nov. 12, 2020.\nhttps://www.washingtonpost.com/technology/2020/11/12/test-monitoring-student-revolt/\n74. See, e.g., Heather Morrison. Virtual Testing Puts Disabled Students at a Disadvantage. Government\nTechnology. May 24, 2022.\nhttps://www.govtech.com/education/k-12/virtual-testing-puts-disabled-students-at-a-disadvantage;\nLydia X. Z. Brown, Ridhi Shetty, Matt Scherer, and Andrew Crawford. Ableism And Disability\nDiscrimination In New Surveillance Technologies: How new surveillance technologies in education,\npolicing, health care, and the workplace disproportionately harm disabled people. Center for Democracy\nand Technology Report. May 24, 2022.\nhttps://cdt.org/insights/ableism-and-disability-discrimination-in-new-surveillance-technologies-how\xad\nnew-surveillance-technologies-in-education-policing-health-care-and-the-workplace\xad\ndisproportionately-harm-disabled-people/\n69\n"", ' \n \n \nENDNOTES\n75. See., e.g., Sam Sabin. Digital surveillance in a post-Roe world. Politico. May 5, 2022. https://\nwww.politico.com/newsletters/digital-future-daily/2022/05/05/digital-surveillance-in-a-post-roe\xad\nworld-00030459; Federal Trade Commission. FTC Sues Kochava for Selling Data that Tracks People at\nReproductive Health Clinics, Places of Worship, and Other Sensitive Locations. Aug. 29, 2022. https://\nwww.ftc.gov/news-events/news/press-releases/2022/08/ftc-sues-kochava-selling-data-tracks-people\xad\nreproductive-health-clinics-places-worship-other\n76. Todd Feathers. This Private Equity Firm Is Amassing Companies That Collect Data on Americaā€™s\nChildren. The Markup. Jan. 11, 2022.\nhttps://themarkup.org/machine-learning/2022/01/11/this-private-equity-firm-is-amassing-companies\xad\nthat-collect-data-on-americas-children\n77. Reed Albergotti. Every employee who leaves Apple becomes an ā€˜associateā€™: In job databases used by\nemployers to verify resume information, every former Apple employeeā€™s title gets erased and replaced with\na generic title. The Washington Post. Feb. 10, 2022.\nhttps://www.washingtonpost.com/technology/2022/02/10/apple-associate/\n78. National Institute of Standards and Technology. Privacy Framework Perspectives and Success\nStories. Accessed May 2, 2022.\nhttps://www.nist.gov/privacy-framework/getting-started-0/perspectives-and-success-stories\n79. ACLU of New York. What You Need to Know About New Yorkā€™s Temporary Ban on Facial\nRecognition in Schools. Accessed May 2, 2022.\nhttps://www.nyclu.org/en/publications/what-you-need-know-about-new-yorks-temporary-ban-facial\xad\nrecognition-schools\n80. New York State Assembly. Amendment to Education Law. Enacted Dec. 22, 2020.\nhttps://nyassembly.gov/leg/?default_fld=&leg_video=&bn=S05140&term=2019&Summary=Y&Text=Y\n81. U.S Department of Labor. Labor-Management Reporting and Disclosure Act of 1959, As Amended.\nhttps://www.dol.gov/agencies/olms/laws/labor-management-reporting-and-disclosure-act (Section\n203). See also: U.S Department of Labor. Form LM-10. OLMS Fact Sheet, Accessed May 2, 2022. https://\nwww.dol.gov/sites/dolgov/files/OLMS/regs/compliance/LM-10_factsheet.pdf\n82. See, e.g., Apple. Protecting the Userā€™s Privacy. Accessed May 2, 2022.\nhttps://developer.apple.com/documentation/uikit/protecting_the_user_s_privacy; Google Developers.\nDesign for Safety: Android is secure by default and private by design. Accessed May 3, 2022.\nhttps://developer.android.com/design-for-safety\n83. Karen Hao. The coming war on the hidden algorithms that trap people in poverty. MIT Tech Review.\nDec. 4, 2020.\nhttps://www.technologyreview.com/2020/12/04/1013068/algorithms-create-a-poverty-trap-lawyers\xad\nfight-back/\n84. Anjana Samant, Aaron Horowitz, Kath Xu, and Sophie Beiers. Family Surveillance by Algorithm.\nACLU. Accessed May 2, 2022.\nhttps://www.aclu.org/fact-sheet/family-surveillance-algorithm\n70\n']","The concerns with surveillance technology in education and healthcare include its invasive nature, potential for discrimination, and the disproportionate harm it may cause to disabled individuals. Specifically, new surveillance technologies can monitor students in ways that may violate their privacy and exacerbate existing inequalities, particularly for those with disabilities.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 68, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 69, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the options for high-priority AI risks and their link to org tolerance?,"[' \n40 \nMANAGE 1.3: Responses to the AI risks deemed high priority, as identiļ¬ed by the MAP function, are developed, planned, and \ndocumented. Risk response options can include mitigating, transferring, avoiding, or accepting. \nAction ID \nSuggested Action \nGAI Risks \nMG-1.3-001 \nDocument trade-oļ¬€s, decision processes, and relevant measurement and \nfeedback results for risks that do not surpass organizational risk tolerance, for \nexample, in the context of model release: Consider diļ¬€erent approaches for \nmodel release, for example, leveraging a staged release approach. Consider \nrelease approaches in the context of the model and its projected use cases. \nMitigate, transfer, or avoid risks that surpass organizational risk tolerances. \nInformation Security \nMG-1.3-002 \nMonitor the robustness and eļ¬€ectiveness of risk controls and mitigation plans \n(e.g., via red-teaming, ļ¬eld testing, participatory engagements, performance \nassessments, user feedback mechanisms). \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Development, AI Deployment, AI Impact Assessment, Operation and Monitoring \n \nMANAGE 2.2: Mechanisms are in place and applied to sustain the value of deployed AI systems. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.2-001 \nCompare GAI system outputs against pre-deļ¬ned organization risk tolerance, \nguidelines, and principles, and review and test AI-generated content against \nthese guidelines. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content \nMG-2.2-002 \nDocument training data sources to trace the origin and provenance of AI-\ngenerated content. \nInformation Integrity \nMG-2.2-003 \nEvaluate feedback loops between GAI system content provenance and human \nreviewers, and update where needed. Implement real-time monitoring systems \nto aļ¬ƒrm that content provenance protocols remain eļ¬€ective. \nInformation Integrity \nMG-2.2-004 \nEvaluate GAI content and data for representational biases and employ \ntechniques such as re-sampling, re-ranking, or adversarial training to mitigate \nbiases in the generated content. \nInformation Security; Harmful Bias \nand Homogenization \nMG-2.2-005 \nEngage in due diligence to analyze GAI output for harmful content, potential \nmisinformation, and CBRN-related or NCII content. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content \n']","The options for high-priority AI risks include mitigating, transferring, avoiding, or accepting these risks. Specifically, for risks that do not surpass organizational risk tolerance, it is suggested to document trade-offs, decision processes, and relevant measurement and feedback results. For risks that surpass organizational risk tolerances, the recommended actions are to mitigate, transfer, or avoid those risks.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 43, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How does Navigator training relate to health coverage access?,"["" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nHealthcare ā€œnavigatorsā€ help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is ā€œan individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.ā€106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \nā€œtrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal \ncourts have found that such cure procedures are constitutionally required.110 \nBallot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot. \n52\n""]","Navigator training is related to health coverage access as it equips individuals or organizations to help consumers, small businesses, and their employees navigate the process of finding and obtaining health coverage options through the Marketplace. This training enables Navigators to assist with completing eligibility and enrollment forms, thereby facilitating access to affordable and comprehensive health coverage for uninsured consumers.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 51, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What connects NIST's AI Safety Institute to AI bias mgmt?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n', ' \n57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence \nNorthcutt, C. et al. (2021) Pervasive Label Errors in Test Sets Destabilize Machine Learning Benchmarks. \narXiv. https://arxiv.org/pdf/2103.14749 \nOECD (2023) ""Advancing accountability in AI: Governing and managing risks throughout the lifecycle for \ntrustworthy AI"", OECD Digital Economy Papers, No. 349, OECD Publishing, Paris. \nhttps://doi.org/10.1787/2448f04b-en \nOECD (2024) ""Deļ¬ning AI incidents and related terms"" OECD Artiļ¬cial Intelligence Papers, No. 16, OECD \nPublishing, Paris. https://doi.org/10.1787/d1a8d965-en \nOpenAI (2023) GPT-4 System Card. https://cdn.openai.com/papers/gpt-4-system-card.pdf \nOpenAI (2024) GPT-4 Technical Report. https://arxiv.org/pdf/2303.08774 \nPadmakumar, V. et al. (2024) Does writing with language models reduce content diversity? ICLR. \nhttps://arxiv.org/pdf/2309.05196 \nPark, P. et. al. (2024) AI deception: A survey of examples, risks, and potential solutions. Patterns, 5(5). \narXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Diļ¬€usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Artiļ¬cial intelligence and biological misuse: Diļ¬€erentiating risks of language models \nand biological design tools. arXiv. https://arxiv.org/pdf/2306.13952 \n']",The answer to given question is not present in context,reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 60, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What ensures automated systems are safe and fair?,"[' AI BILL OF RIGHTS\nFFECTIVE SYSTEMS\nineffective systems. Automated systems should be \ncommunities, stakeholders, and domain experts to identify \nSystems should undergo pre-deployment testing, risk \nthat demonstrate they are safe and effective based on \nincluding those beyond the intended use, and adherence to \nprotective measures should include the possibility of not \nAutomated systems should not be designed with an intent \nreasonably foreseeable possibility of endangering your safety or the safety of your community. They should \nstemming from unintended, yet foreseeable, uses or \n \n \n \n \n \n \n \nSECTION TITLE\nBLUEPRINT FOR AN\nSAFE AND E \nYou should be protected from unsafe or \ndeveloped with consultation from diverse \nconcerns, risks, and potential impacts of the system. \nidentification and mitigation, and ongoing monitoring \ntheir intended use, mitigation of unsafe outcomes \ndomain-specific standards. Outcomes of these \ndeploying the system or removing a system from use. \nor \nbe designed to proactively protect you from harms \nimpacts of automated systems. You should be protected from inappropriate or irrelevant data use in the \ndesign, development, and deployment of automated systems, and from the compounded harm of its reuse. \nIndependent evaluation and reporting that confirms that the system is safe and effective, including reporting of \nsteps taken to mitigate potential harms, should be performed and the results made public whenever possible. \nALGORITHMIC DISCRIMINATION PROTECTIONS\nYou should not face discrimination by algorithms and systems should be used and designed in \nan equitable way. Algorithmic discrimination occurs when automated systems contribute to unjustified \ndifferent treatment or impacts disfavoring people based on their race, color, ethnicity, sex (including \npregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other \nclassification protected by law. Depending on the specific circumstances, such algorithmic discrimination \nmay violate legal protections. Designers, developers, and deployers of automated systems should take \nproactive \nand \ncontinuous \nmeasures \nto \nprotect \nindividuals \nand \ncommunities \nfrom algorithmic \ndiscrimination and to use and design systems in an equitable way. This protection should include proactive \nequity assessments as part of the system design, use of representative data and protection against proxies \nfor demographic features, ensuring accessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent \nevaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5\n']","Automated systems should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring to ensure they are safe and effective. They should be developed with consultation from diverse communities, stakeholders, and domain experts, and should include protective measures to prevent endangering safety. Additionally, independent evaluation and reporting that confirms the system's safety and effectiveness should be performed, with results made public whenever possible.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 4, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What issues come from biased automated systems in hiring and justice?,"["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n""]","Biased automated systems in hiring can lead to discriminatory decisions, such as hiring tools that reject women applicants for spurious reasons, penalizing resumes with the word 'womenā€™s'. In the justice system, predictive models can disproportionately label Black students as high risk of dropping out, and risk assessment tools can overpredict recidivism for some groups of color, leading to unfair treatment and outcomes.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What ensures independent eval & reporting for system safety?,"[' \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\xad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\xad\ncating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\xad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\xad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g., \nvia application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the \norganizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency, \nresults, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20\n']","Independent evaluation for system safety is ensured by designing automated systems to allow for independent evaluation through mechanisms such as application programming interfaces. Independent evaluators, including researchers, journalists, ethics review boards, inspectors general, and third-party auditors, should have access to the system and samples of associated data, consistent with privacy, security, law, or regulation. Additionally, entities responsible for automated systems should provide regularly-updated reports that include an overview of the system, data used, risk assessments, performance testing results, and independent evaluation outcomes, all presented in plain language and a machine-readable format.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does public input influence the AI Bill of Rights?,"[' \n \n \nABOUT THIS FRAMEWORK\xad\xad\xad\xad\xad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizationsā€”from governments at all levels to companies of \nall sizesā€”to uphold these values. Experts from across the private sector, governments, and international \nconsortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policyā€”such as \nsector-specific privacy laws and oversight requirementsā€”do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the countryā€”from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal governmentā€”on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-\ning sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4\n']","Public input influences the AI Bill of Rights by providing insights and feedback from impacted communities, industry stakeholders, technology developers, and experts. The White House Office of Science and Technology Policy conducted a year-long process to gather this input through various means, including panel discussions and public listening sessions, which helped shape the principles and practices outlined in the Blueprint for an AI Bill of Rights.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 3, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What issues arise from hidden criteria changes in benefit allocation?,"[' \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nA predictive policing system claimed to identify individuals at greatest risk to commit or become the victim of\ngun violence (based on automated analysis of social ties to gang members, criminal histories, previous experi\xad\nences of gun violence, and other factors) and led to individuals being placed on a watch list with no\nexplanation or public transparency regarding how the system came to its conclusions.85 Both police and\nthe public deserve to understand why and how such a system is making these determinations.\nā€¢\nA system awarding benefits changed its criteria invisibly. Individuals were denied benefits due to data entry\nerrors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42\n']","Issues arising from hidden criteria changes in benefit allocation include individuals being denied benefits due to data entry errors and other system flaws, which were only revealed when an explanation of the system was demanded. The lack of transparency made it harder for errors to be corrected in a timely manner.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 41, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What IP risks come from GAI using copyrighted works and data poisoning?,"[' \n11 \nvalue chain (e.g., data inputs, processing, GAI training, or deployment environments), conventional \ncybersecurity practices may need to adapt or evolve. \nFor instance, prompt injection involves modifying what input is provided to a GAI system so that it \nbehaves in unintended ways. In direct prompt injections, attackers might craft malicious prompts and \ninput them directly to a GAI system, with a variety of downstream negative consequences to \ninterconnected systems. Indirect prompt injection attacks occur when adversaries remotely (i.e., without \na direct interface) exploit LLM-integrated applications by injecting prompts into data likely to be \nretrieved. Security researchers have already demonstrated how indirect prompt injections can exploit \nvulnerabilities by stealing proprietary data or running malicious code remotely on a machine. Merely \nquerying a closed production model can elicit previously undisclosed information about that model. \nAnother cybersecurity risk to GAI is data poisoning, in which an adversary compromises a training \ndataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts \nof the model could exacerbate risks associated with GAI system outputs. \nTrustworthy AI Characteristics: Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n2.10. \nIntellectual Property \nIntellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair \nuse under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI \noutputs displaying instances of training data memorization (see Data Privacy above) could infringe on \ncopyright. \nHow GAI relates to copyright, including the status of generated content that is similar to but does not \nstrictly copy work protected by copyright, is currently being debated in legal fora. Similar discussions are \ntaking place regarding the use or emulation of personal identity, likeness, or voice without permission. \nTrustworthy AI Characteristics: Accountable and Transparent, Fair with Harmful Bias Managed, Privacy \nEnhanced \n2.11. \nObscene, Degrading, and/or Abusive Content \nGAI can ease the production of and access to illegal non-consensual intimate imagery (NCII) of adults, \nand/or child sexual abuse material (CSAM). GAI-generated obscene, abusive or degrading content can \ncreate privacy, psychological and emotional, and even physical harms, and in some cases may be illegal. \nGenerated explicit or obscene AI content may include highly realistic ā€œdeepfakesā€ of real individuals, \nincluding children. The spread of this kind of material can have downstream negative consequences: in \nthe context of CSAM, even if the generated images do not resemble speciļ¬c individuals, the prevalence \nof such images can divert time and resources from eļ¬€orts to ļ¬nd real-world victims. Outside of CSAM, \nthe creation and spread of NCII disproportionately impacts women and sexual minorities, and can have \nsubsequent negative consequences including decline in overall mental health, substance abuse, and \neven suicidal thoughts. \nData used for training GAI models may unintentionally include CSAM and NCII. A recent report noted \nthat several commonly used GAI training datasets were found to contain hundreds of known images of \n']","Intellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair use under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI outputs displaying instances of training data memorization could infringe on copyright. Additionally, data poisoning poses a risk where an adversary compromises a training dataset used by a model to manipulate its outputs or operation, potentially leading to malicious tampering with data or parts of the model.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 14, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What ensures human oversight in automated voting signatures?,"[' \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere are many reasons people may prefer not to use an automated system: the system can be flawed and can lead to \nunintended outcomes; it may reinforce bias or be inaccessible; it may simply be inconvenient or unavailable; or it may \nreplace a paper or manual process to which people had grown accustomed. Yet members of the public are often \npresented with no alternative, or are forced to endure a cumbersome process to reach a human decision-maker once \nthey decide they no longer want to deal exclusively with the automated system or be impacted by its results. As a result \nof this lack of human reconsideration, many receive delayed access, or lose access, to rights, opportunities, benefits, \nand critical services. The American public deserves the assurance that, when rights, opportunities, or access are \nmeaningfully at stake and there is a reasonable expectation of an alternative to an automated system, they can conve\xad\nniently opt out of an automated system and will not be disadvantaged for that choice. In some cases, such a human or \nother alternative may be required by law, for example it could be required as ā€œreasonable accommodationsā€ for people \nwith disabilities. \nIn addition to being able to opt out and use a human alternative, the American public deserves a human fallback \nsystem in the event that an automated system fails or causes harm. No matter how rigorously an automated system is \ntested, there will always be situations for which the system fails. The American public deserves protection via human \nreview against these outlying or unexpected scenarios. In the case of time-critical systems, the public should not have \nto waitā€”immediate human consideration and fallback should be available. In many time-critical systems, such a \nremedy is already immediately available, such as a building manager who can open a door in the case an automated \ncard access system fails. \nIn the criminal justice system, employment, education, healthcare, and other sensitive domains, automated systems \nare used for many purposes, from pre-trial risk assessments and parole decisions to technologies that help doctors \ndiagnose disease. Absent appropriate safeguards, these technologies can lead to unfair, inaccurate, or dangerous \noutcomes. These sensitive domains require extra protections. It is critically important that there is extensive human \noversight in such settings. \nThese critical protections have been adopted in some scenarios. Where automated systems have been introduced to \nprovide the public access to government benefits, existing human paper and phone-based processes are generally still \nin place, providing an important alternative to ensure access. Companies that have introduced automated call centers \noften retain the option of dialing zero to reach an operator. When automated identity controls are in place to board an \nairplane or enter the country, there is a person supervising the systems who can be turned to for help or to appeal a \nmisidentification. \nThe American people deserve the reassurance that such procedures are in place to protect their rights, opportunities, \nand access. People make mistakes, and a human alternative or fallback mechanism will not always have the right \nanswer, but they serve as an important check on the power and validity of automated systems. \nā€¢ An automated signature matching system is used as part of the voting process in many parts of the country to\ndetermine whether the signature on a mail-in ballot matches the signature on file. These signature matching\nsystems are less likely to work correctly for some voters, including voters with mental or physical\ndisabilities, voters with shorter or hyphenated names, and voters who have changed their name.97 A human\ncuring process,98 which helps voters to confirm their signatures and correct other voting mistakes, is\nimportant to ensure all votes are counted,99 and it is already standard practice in much of the country for\nboth an election official and the voter to have the opportunity to review and correct any such issues.100 \n47\n']","A human curing process helps voters confirm their signatures and correct other voting mistakes, ensuring that all votes are counted. This process is already standard practice in much of the country, allowing both an election official and the voter to review and correct any issues.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 46, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do algorithmic impact assessments relate to automated system transparency?,"["" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably clear, timely, understandable, and accessible notice of use, and \nexplanations as to how and why a decision was made or an action was taken by the system. These expectations are \nexplained below. \nProvide clear, timely, understandable, and accessible notice of use and explanations \xad\nGenerally accessible plain language documentation. The entity responsible for using the automated \nsystem should ensure that documentation describing the overall system (including any human components) is \npublic and easy to find. The documentation should describe, in plain language, how the system works and how \nany automated component is used to determine an action or decision. It should also include expectations about \nreporting described throughout this framework, such as the algorithmic impact assessments described as \npart of Algorithmic Discrimination Protections. \nAccountable. Notices should clearly identify the entity responsible for designing each component of the \nsystem and the entity using it. \nTimely and up-to-date. Users should receive notice of the use of automated systems in advance of using or \nwhile being impacted by the technology. An explanation should be available with the decision itself, or soon \nthereafter. Notice should be kept up-to-date and people impacted by the system should be notified of use case \nor key functionality changes. \nBrief and clear. Notices and explanations should be assessed, such as by research on usersā€™ experiences, \nincluding user testing, to ensure that the people using or impacted by the automated system are able to easily \nfind notices and explanations, read them quickly, and understand and act on them. This includes ensuring that \nnotices and explanations are accessible to users with disabilities and are available in the language(s) and read-\ning level appropriate for the audience. Notices and explanations may need to be available in multiple forms, \n(e.g., on paper, on a physical sign, or online), in order to meet these expectations and to be accessible to the \nAmerican public. \nProvide explanations as to how and why a decision was made or an action was taken by an \nautomated system \nTailored to the purpose. Explanations should be tailored to the specific purpose for which the user is \nexpected to use the explanation, and should clearly state that purpose. An informational explanation might \ndiffer from an explanation provided to allow for the possibility of recourse, an appeal, or one provided in the \ncontext of a dispute or contestation process. For the purposes of this framework, 'explanation' should be \nconstrued broadly. An explanation need not be a plain-language statement about causality but could consist of \nany mechanism that allows the recipient to build the necessary understanding and intuitions to achieve the \nstated purpose. Tailoring should be assessed (e.g., via user experience research). \nTailored to the target of the explanation. Explanations should be targeted to specific audiences and \nclearly state that audience. An explanation provided to the subject of a decision might differ from one provided \nto an advocate, or to a domain expert or decision maker. Tailoring should be assessed (e.g., via user experience \nresearch). \n43\n""]",The answer to given question is not present in context,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 42, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What knowledge and security factors should be noted for GAI deployment?,"[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n']","The context mentions several knowledge and security factors for GAI deployment, including the need to document the extent of human domain knowledge employed to improve GAI system performance, verify sources and citations in GAI system outputs, track instances of anthropomorphization in GAI system interfaces, verify GAI system training data and TEVV data provenance, and regularly review security and safety guardrails, especially in novel circumstances.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do security measures relate to info integrity?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",The answer to given question is not present in context,reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What links are there between tech protections and the AI Bill of Rights?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n- \nUSING THIS TECHNICAL COMPANION\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the design, \nuse, and deployment of automated systems to protect the rights of the American public in the age of artificial \nintelligence. This technical companion considers each principle in the Blueprint for an AI Bill of Rights and \nprovides examples and concrete steps for communities, industry, governments, and others to take in order to \nbuild these protections into policy, practice, or the technological design process. \nTaken together, the technical protections and practices laid out in the Blueprint for an AI Bill of Rights can help \nguard the American public against many of the potential and actual harms identified by researchers, technolo\xad\ngists, advocates, journalists, policymakers, and communities in the United States and around the world. This \ntechnical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing \nmonitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have\nconfidence that their rights, opportunities, and access as well as their expectations about technologies are respected. \n3\nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE: \nThis section provides real-life examples of how these guiding principles can become reality, through laws, policies, and practices. \nIt describes practical technical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe examples provided are not critiques or endorsements, but rather are offered as illustrative cases to help \nprovide a concrete vision for actualizing the Blueprint for an AI Bill of Rights. Effectively implementing these \nprocesses require the cooperation of and collaboration among industry, civil society, researchers, policymakers, \ntechnologists, and the public. \n14\n']",The context does not provide specific links between tech protections and the AI Bill of Rights.,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 13, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +Which NSF programs ensure automated system safety and compliance?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\nSome U.S government agencies have developed specific frameworks for ethical use of AI \nsystems. The Department of Energy (DOE) has activated the AI Advancement Council that oversees coordina-\ntion and advises on implementation of the DOE AI Strategy and addresses issues and/or escalations on the \nethical use and development of AI systems.20 The Department of Defense has adopted Artificial Intelligence \nEthical Principles, and tenets for Responsible Artificial Intelligence specifically tailored to its national \nsecurity and defense activities.21 Similarly, the U.S. Intelligence Community (IC) has developed the Principles \nof Artificial Intelligence Ethics for the Intelligence Community to guide personnel on whether and how to \ndevelop and use AI in furtherance of the IC\'s mission, as well as an AI Ethics Framework to help implement \nthese principles.22\nThe National Science Foundation (NSF) funds extensive research to help foster the \ndevelopment of automated systems that adhere to and advance their safety, security and \neffectiveness. Multiple NSF programs support research that directly addresses many of these principles: \nthe National AI Research Institutes23 support research on all aspects of safe, trustworthy, fair, and explainable \nAI algorithms and systems; the Cyber Physical Systems24 program supports research on developing safe \nautonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace25 \nprogram supports research on cybersecurity and privacy enhancing technologies in automated systems; the \nFormal Methods in the Field26 program supports research on rigorous formal verification and analysis of \nautomated systems and machine learning, and the Designing Accountable Software Systems27 program supports \nresearch on rigorous and reproducible methodologies for developing software systems with legal and regulatory \ncompliance in mind. \nSome state legislatures have placed strong transparency and validity requirements on \nthe use of pretrial risk assessments. The use of algorithmic pretrial risk assessments has been a \ncause of concern for civil rights groups.28 Idaho Code Section 19-1910, enacted in 2019,29 requires that any \npretrial risk assessment, before use in the state, first be ""shown to be free of bias against any class of \nindividuals protected from discrimination by state or federal law"", that any locality using a pretrial risk \nassessment must first formally validate the claim of its being free of bias, that ""all documents, records, and \ninformation used to build or validate the risk assessment shall be open to public inspection,"" and that assertions \nof trade secrets cannot be used ""to quash discovery in a criminal matter by a party to a criminal case."" \n22\n']","The NSF programs that ensure automated system safety and compliance include the National AI Research Institutes, which support research on safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program, which supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program, which supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program, which supports research on rigorous formal verification and analysis of automated systems and machine learning; and the Designing Accountable Software Systems program, which supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 21, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What drives the need for human input in sensitive automated systems?,"['You should be able to opt out, where appropriate, and \nhave access to a person who can quickly consider and \nremedy problems you encounter. You should be able to opt \nout from automated systems in favor of a human alternative, where \nappropriate. Appropriateness should be determined based on rea\xad\nsonable expectations in a given context and with a focus on ensuring \nbroad accessibility and protecting the public from especially harm\xad\nful impacts. In some cases, a human or other alternative may be re\xad\nquired by law. You should have access to timely human consider\xad\nation and remedy by a fallback and escalation process if an automat\xad\ned system fails, it produces an error, or you would like to appeal or \ncontest its impacts on you. Human consideration and fallback \nshould be accessible, equitable, effective, maintained, accompanied \nby appropriate operator training, and should not impose an unrea\xad\nsonable burden on the public. Automated systems with an intended \nuse within sensitive domains, including, but not limited to, criminal \njustice, employment, education, and health, should additionally be \ntailored to the purpose, provide meaningful access for oversight, \ninclude training for any people interacting with the system, and in\xad\ncorporate human consideration for adverse or high-risk decisions. \nReporting that includes a description of these human governance \nprocesses and assessment of their timeliness, accessibility, out\xad\ncomes, and effectiveness should be made public whenever possible. \nHUMAN ALTERNATIVES, CONSIDERATION\nALLBACK\nF\nAND\n, \n46\n']","The need for human input in sensitive automated systems is driven by the requirement for timely human consideration and remedy when automated systems fail, produce errors, or when individuals wish to appeal or contest the impacts of these systems. Additionally, human input is necessary to ensure that automated systems are tailored to their intended purpose, provide meaningful access for oversight, and incorporate human consideration for adverse or high-risk decisions.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 45, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What links field testing, user feedback, and GAI eval?","[' \n50 \nParticipatory Engagement Methods \nOn an ad hoc or more structured basis, organizations can design and use a variety of channels to engage \nexternal stakeholders in product development or review. Focus groups with select experts can provide \nfeedback on a range of issues. Small user studies can provide feedback from representative groups or \npopulations. Anonymous surveys can be used to poll or gauge reactions to speciļ¬c features. Participatory \nengagement methods are often less structured than ļ¬eld testing or red teaming, and are more \ncommonly used in early stages of AI or product development. \nField Testing \nField testing involves structured settings to evaluate risks and impacts and to simulate the conditions \nunder which the GAI system will be deployed. Field style tests can be adapted from a focus on user \npreferences and experiences towards AI risks and impacts ā€“ both negative and positive. When carried \nout with large groups of users, these tests can provide estimations of the likelihood of risks and impacts \nin real world interactions. \nOrganizations may also collect feedback on outcomes, harms, and user experience directly from users in \nthe production environment after a model has been released, in accordance with human subject \nstandards such as informed consent and compensation. Organizations should follow applicable human \nsubjects research requirements, and best practices such as informed consent and subject compensation, \nwhen implementing feedback activities. \nAI Red-teaming \nAI red-teaming is an evolving practice that references exercises often conducted in a controlled \nenvironment and in collaboration with AI developers building AI models to identify potential adverse \nbehavior or outcomes of a GAI model or system, how they could occur, and stress test safeguardsā€. AI \nred-teaming can be performed before or after AI models or systems are made available to the broader \npublic; this section focuses on red-teaming in pre-deployment contexts. \nThe quality of AI red-teaming outputs is related to the background and expertise of the AI red team \nitself. Demographically and interdisciplinarily diverse AI red teams can be used to identify ļ¬‚aws in the \nvarying contexts where GAI will be used. For best results, AI red teams should demonstrate domain \nexpertise, and awareness of socio-cultural aspects within the deployment context. AI red-teaming results \nshould be given additional analysis before they are incorporated into organizational governance and \ndecision making, policy and procedural updates, and AI risk management eļ¬€orts. \nVarious types of AI red-teaming may be appropriate, depending on the use case: \nā€¢ \nGeneral Public: Performed by general users (not necessarily AI or technical experts) who are \nexpected to use the model or interact with its outputs, and who bring their own lived \nexperiences and perspectives to the task of AI red-teaming. These individuals may have been \nprovided instructions and material to complete tasks which may elicit harmful model behaviors. \nThis type of exercise can be more eļ¬€ective with large groups of AI red-teamers. \nā€¢ \nExpert: Performed by specialists with expertise in the domain or speciļ¬c AI red-teaming context \nof use (e.g., medicine, biotech, cybersecurity). \nā€¢ \nCombination: In scenarios when it is diļ¬ƒcult to identify and recruit specialists with suļ¬ƒcient \ndomain and contextual expertise, AI red-teaming exercises may leverage both expert and \n', ' \n49 \nearly lifecycle TEVV approaches are developed and matured for GAI, organizations may use \nrecommended ā€œpre-deployment testingā€ practices to measure performance, capabilities, limits, risks, \nand impacts. This section describes risk measurement and estimation as part of pre-deployment TEVV, \nand examines the state of play for pre-deployment testing methodologies. \nLimitations of Current Pre-deployment Test Approaches \nCurrently available pre-deployment TEVV processes used for GAI applications may be inadequate, non-\nsystematically applied, or fail to reļ¬‚ect or mismatched to deployment contexts. For example, the \nanecdotal testing of GAI system capabilities through video games or standardized tests designed for \nhumans (e.g., intelligence tests, professional licensing exams) does not guarantee GAI system validity or \nreliability in those domains. Similarly, jailbreaking or prompt engineering tests may not systematically \nassess validity or reliability risks. \nMeasurement gaps can arise from mismatches between laboratory and real-world settings. Current \ntesting approaches often remain focused on laboratory conditions or restricted to benchmark test \ndatasets and in silico techniques that may not extrapolate well toā€”or directly assess GAI impacts in real-\nworld conditions. For example, current measurement gaps for GAI make it diļ¬ƒcult to precisely estimate \nits potential ecosystem-level or longitudinal risks and related political, social, and economic impacts. \nGaps between benchmarks and real-world use of GAI systems may likely be exacerbated due to prompt \nsensitivity and broad heterogeneity of contexts of use. \nA.1.5. Structured Public Feedback \nStructured public feedback can be used to evaluate whether GAI systems are performing as intended \nand to calibrate and verify traditional measurement methods. Examples of structured feedback include, \nbut are not limited to: \nā€¢ \nParticipatory Engagement Methods: Methods used to solicit feedback from civil society groups, \naļ¬€ected communities, and users, including focus groups, small user studies, and surveys. \nā€¢ \nField Testing: Methods used to determine how people interact with, consume, use, and make \nsense of AI-generated information, and subsequent actions and eļ¬€ects, including UX, usability, \nand other structured, randomized experiments. \nā€¢ \nAI Red-teaming: A structured testing exercise used to probe an AI system to ļ¬nd ļ¬‚aws and \nvulnerabilities such as inaccurate, harmful, or discriminatory outputs, often in a controlled \nenvironment and in collaboration with system developers. \nInformation gathered from structured public feedback can inform design, implementation, deployment \napproval, maintenance, or decommissioning decisions. Results and insights gleaned from these exercises \ncan serve multiple purposes, including improving data quality and preprocessing, bolstering governance \ndecision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation. \n']","Field testing, user feedback, and GAI evaluation are linked through structured public feedback mechanisms that assess how GAI systems perform in real-world conditions. Field testing evaluates risks and impacts in controlled settings, while user feedback, gathered through participatory engagement methods, helps organizations understand user interactions and experiences with AI-generated information. Together, these approaches inform the design, implementation, and governance of GAI systems.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 53, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 52, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What risk controls for third-party GAI in compliance?,"[' \n48 \nā€¢ Data protection \nā€¢ Data retention \nā€¢ Consistency in use of deļ¬ning key terms \nā€¢ Decommissioning \nā€¢ Discouraging anonymous use \nā€¢ Education \nā€¢ Impact assessments \nā€¢ Incident response \nā€¢ Monitoring \nā€¢ Opt-outs \nā€¢ Risk-based controls \nā€¢ Risk mapping and measurement \nā€¢ Science-backed TEVV practices \nā€¢ Secure software development practices \nā€¢ Stakeholder engagement \nā€¢ Synthetic content detection and \nlabeling tools and techniques \nā€¢ Whistleblower protections \nā€¢ Workforce diversity and \ninterdisciplinary teams\nEstablishing acceptable use policies and guidance for the use of GAI in formal human-AI teaming settings \nas well as diļ¬€erent levels of human-AI conļ¬gurations can help to decrease risks arising from misuse, \nabuse, inappropriate repurpose, and misalignment between systems and users. These practices are just \none example of adapting existing governance protocols for GAI contexts. \nA.1.3. Third-Party Considerations \nOrganizations may seek to acquire, embed, incorporate, or use open-source or proprietary third-party \nGAI models, systems, or generated data for various applications across an enterprise. Use of these GAI \ntools and inputs has implications for all functions of the organization ā€“ including but not limited to \nacquisition, human resources, legal, compliance, and IT services ā€“ regardless of whether they are carried \nout by employees or third parties. Many of the actions cited above are relevant and options for \naddressing third-party considerations. \nThird party GAI integrations may give rise to increased intellectual property, data privacy, or information \nsecurity risks, pointing to the need for clear guidelines for transparency and risk management regarding \nthe collection and use of third-party data for model inputs. Organizations may consider varying risk \ncontrols for foundation models, ļ¬ne-tuned models, and embedded tools, enhanced processes for \ninteracting with external GAI technologies or service providers. Organizations can apply standard or \nexisting risk controls and processes to proprietary or open-source GAI technologies, data, and third-party \nservice providers, including acquisition and procurement due diligence, requests for software bills of \nmaterials (SBOMs), application of service level agreements (SLAs), and statement on standards for \nattestation engagement (SSAE) reports to help with third-party transparency and risk management for \nGAI systems. \nA.1.4. Pre-Deployment Testing \nOverview \nThe diverse ways and contexts in which GAI systems may be developed, used, and repurposed \ncomplicates risk mapping and pre-deployment measurement eļ¬€orts. Robust test, evaluation, validation, \nand veriļ¬cation (TEVV) processes can be iteratively applied ā€“ and documented ā€“ in early stages of the AI \nlifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous \n']","Organizations can apply standard or existing risk controls and processes to proprietary or open-source GAI technologies, data, and third-party service providers, including acquisition and procurement due diligence, requests for software bills of materials (SBOMs), application of service level agreements (SLAs), and statement on standards for attestation engagement (SSAE) reports to help with third-party transparency and risk management for GAI systems.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 51, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What ensures effective incident response for third-party GAI?,"[' \n22 \nGV-6.2-003 \nEstablish incident response plans for third-party GAI technologies: Align incident \nresponse plans with impacts enumerated in MAP 5.1; Communicate third-party \nGAI incident response plans to all relevant AI Actors; Deļ¬ne ownership of GAI \nincident response functions; Rehearse third-party GAI incident response plans at \na regular cadence; Improve incident response plans based on retrospective \nlearning; Review incident response plans for alignment with relevant breach \nreporting, data protection, data privacy, or other laws. \nData Privacy; Human-AI \nConļ¬guration; Information \nSecurity; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization \nGV-6.2-004 \nEstablish policies and procedures for continuous monitoring of third-party GAI \nsystems in deployment. \nValue Chain and Component \nIntegration \nGV-6.2-005 \nEstablish policies and procedures that address GAI data redundancy, including \nmodel weights and other system artifacts. \nHarmful Bias and Homogenization \nGV-6.2-006 \nEstablish policies and procedures to test and manage risks related to rollover and \nfallback technologies for GAI systems, acknowledging that rollover and fallback \nmay include manual processing. \nInformation Integrity \nGV-6.2-007 \nReview vendor contracts and avoid arbitrary or capricious termination of critical \nGAI technologies or vendor services and non-standard terms that may amplify or \ndefer liability in unexpected ways and/or contribute to unauthorized data \ncollection by vendors or third-parties (e.g., secondary data use). Consider: Clear \nassignment of liability and responsibility for incidents, GAI system changes over \ntime (e.g., ļ¬ne-tuning, drift, decay); Request: Notiļ¬cation and disclosure for \nserious incidents arising from third-party data and systems; Service Level \nAgreements (SLAs) in vendor contracts that address incident response, response \ntimes, and availability of critical support. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV, Third-party entities \n \nMAP 1.1: Intended purposes, potentially beneļ¬cial uses, context speciļ¬c laws, norms and expectations, and prospective settings in \nwhich the AI system will be deployed are understood and documented. Considerations include: the speciļ¬c set or types of users \nalong with their expectations; potential positive and negative impacts of system uses to individuals, communities, organizations, \nsociety, and the planet; assumptions and related limitations about AI system purposes, uses, and risks across the development or \nproduct AI lifecycle; and related TEVV and system metrics. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.1-001 \nWhen identifying intended purposes, consider factors such as internal vs. \nexternal use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty \n']","Effective incident response for third-party GAI is ensured by establishing incident response plans that align with impacts, communicating these plans to relevant AI actors, defining ownership of incident response functions, rehearsing the plans regularly, improving them based on retrospective learning, and reviewing for alignment with relevant laws.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 25, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What data leaks cause privacy issues?,"[' \n4 \n1. CBRN Information or Capabilities: Eased access to or synthesis of materially nefarious \ninformation or design capabilities related to chemical, biological, radiological, or nuclear (CBRN) \nweapons or other dangerous materials or agents. \n2. Confabulation: The production of conļ¬dently stated but erroneous or false content (known \ncolloquially as ā€œhallucinationsā€ or ā€œfabricationsā€) by which users may be misled or deceived.6 \n3. Dangerous, Violent, or Hateful Content: Eased production of and access to violent, inciting, \nradicalizing, or threatening content as well as recommendations to carry out self-harm or \nconduct illegal activities. Includes diļ¬ƒculty controlling public exposure to hateful and disparaging \nor stereotyping content. \n4. Data Privacy: Impacts due to leakage and unauthorized use, disclosure, or de-anonymization of \nbiometric, health, location, or other personally identiļ¬able information or sensitive data.7 \n5. Environmental Impacts: Impacts due to high compute resource utilization in training or \noperating GAI models, and related outcomes that may adversely impact ecosystems. \n6. Harmful Bias or Homogenization: Ampliļ¬cation and exacerbation of historical, societal, and \nsystemic biases; performance disparities8 between sub-groups or languages, possibly due to \nnon-representative training data, that result in discrimination, ampliļ¬cation of biases, or \nincorrect presumptions about performance; undesired homogeneity that skews system or model \noutputs, which may be erroneous, lead to ill-founded decision-making, or amplify harmful \nbiases. \n7. Human-AI Conļ¬guration: Arrangements of or interactions between a human and an AI system \nwhich can result in the human inappropriately anthropomorphizing GAI systems or experiencing \nalgorithmic aversion, automation bias, over-reliance, or emotional entanglement with GAI \nsystems. \n8. Information Integrity: Lowered barrier to entry to generate and support the exchange and \nconsumption of content which may not distinguish fact from opinion or ļ¬ction or acknowledge \nuncertainties, or could be leveraged for large-scale dis- and mis-information campaigns. \n9. Information Security: Lowered barriers for oļ¬€ensive cyber capabilities, including via automated \ndiscovery and exploitation of vulnerabilities to ease hacking, malware, phishing, oļ¬€ensive cyber \n \n \n6 Some commenters have noted that the terms ā€œhallucinationā€ and ā€œfabricationā€ anthropomorphize GAI, which \nitself is a risk related to GAI systems as it can inappropriately attribute human characteristics to non-human \nentities. \n7 What is categorized as sensitive data or sensitive PII can be highly contextual based on the nature of the \ninformation, but examples of sensitive information include information that relates to an information subjectā€™s \nmost intimate sphere, including political opinions, sex life, or criminal convictions. \n8 The notion of harm presumes some baseline scenario that the harmful factor (e.g., a GAI model) makes worse. \nWhen the mechanism for potential harm is a disparity between groups, it can be diļ¬ƒcult to establish what the \nmost appropriate baseline is to compare against, which can result in divergent views on when a disparity between \nAI behaviors for diļ¬€erent subgroups constitutes a harm. In discussing harms from disparities such as biased \nbehavior, this document highlights examples where someoneā€™s situation is worsened relative to what it would have \nbeen in the absence of any AI system, making the outcome unambiguously a harm of the system. \n']","The context mentions impacts due to leakage and unauthorized use, disclosure, or de-anonymization of biometric, health, location, or other personally identifiable information or sensitive data as causes of privacy issues.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 7, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the risks of collecting sensitive student data?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nā€¢\nContinuous positive airway pressure machines gather data for medical purposes, such as diagnosing sleep\napnea, and send usage data to a patientā€™s insurance company, which may subsequently deny coverage for the\ndevice based on usage data. Patients were not aware that the data would be used in this way or monitored\nby anyone other than their doctor.70 \nā€¢\nA department store company used predictive analytics applied to collected consumer data to determine that a\nteenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her\nhouse, revealing to her father that she was pregnant.71\nā€¢\nSchool audio surveillance systems monitor student conversations to detect potential ""stress indicators"" as\na warning of potential violence.72 Online proctoring systems claim to detect if a student is cheating on an\nexam using biometric markers.73 These systems have the potential to limit student freedom to express a range\nof emotions at school and may inappropriately flag students with disabilities who need accommodations or\nuse screen readers or dictation software as cheating.74\nā€¢\nLocation data, acquired from a data broker, can be used to identify people who visit abortion clinics.75\nā€¢\nCompanies collect student data such as demographic information, free or reduced lunch status, whether\nthey\'ve used drugs, or whether they\'ve expressed interest in LGBTQI+ groups, and then use that data to \nforecast student success.76 Parents and education experts have expressed concern about collection of such\nsensitive data without express parental consent, the lack of transparency in how such data is being used, and\nthe potential for resulting discriminatory impacts.\nā€¢ Many employers transfer employee data to third party job verification services. This information is then used\nby potential future employers, banks, or landlords. In one case, a former employee alleged that a\ncompany supplied false data about her job title which resulted in a job offer being revoked.77\n37\n']","The risks of collecting sensitive student data include concerns about the lack of express parental consent, the lack of transparency in how the data is being used, and the potential for resulting discriminatory impacts. Additionally, the data collected can include sensitive information such as demographic details, drug use, and interest in LGBTQI+ groups, which may lead to inappropriate forecasting of student success and flagging of students with disabilities as cheating.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 36, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do AI red-teaming and stakeholder engagement connect in privacy risk assessment?,"[' \n35 \nMEASURE 2.9: The AI model is explained, validated, and documented, and AI system output is interpreted within its context ā€“ as \nidentiļ¬ed in the MAP function ā€“ to inform responsible use and governance. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.9-001 \nApply and document ML explanation results such as: Analysis of embeddings, \nCounterfactual prompts, Gradient-based attributions, Model \ncompression/surrogate models, Occlusion/term reduction. \nConfabulation \nMS-2.9-002 \nDocument GAI model details including: Proposed use and organizational value; \nAssumptions and limitations, Data collection methodologies; Data provenance; \nData quality; Model architecture (e.g., convolutional neural network, \ntransformers, etc.); Optimization objectives; Training algorithms; RLHF \napproaches; Fine-tuning or retrieval-augmented generation approaches; \nEvaluation data; Ethical considerations; Legal and regulatory requirements. \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.10-001 \nConduct AI red-teaming to assess issues such as: Outputting of training data \nsamples, and subsequent reverse engineering, model extraction, and \nmembership inference risks; Revealing biometric, conļ¬dential, copyrighted, \nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \nTracking or revealing location information of users or members of training \ndatasets. \nHuman-AI Conļ¬guration; \nInformation Integrity; Intellectual \nProperty \nMS-2.10-002 \nEngage directly with end-users and other stakeholders to understand their \nexpectations and concerns regarding content provenance. Use this feedback to \nguide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n']","AI red-teaming and stakeholder engagement connect in privacy risk assessment by engaging directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance. This feedback is then used to guide the design of provenance data-tracking techniques, which is essential for addressing privacy risks identified during AI red-teaming assessments.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What connects attack surfaces to system and data risks?,"[' \n5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a systemā€™s availability or the conļ¬dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across \nthe AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scientiļ¬c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational \nlikelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) ā€“ highly specialized AI systems trained on \nscientiļ¬c data that aid in chemical and biological design ā€“ may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \neļ¬ƒcacious, including for beneļ¬cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable \n']","The context discusses increased attack surfaces for targeted cyberattacks, which may compromise a system's availability or the confidentiality or integrity of training data, code, or model weights. This connection indicates that as attack surfaces increase, the risks to systems and data also escalate.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 8, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What laws show data privacy principles in action?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nDATA PRIVACY \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe Privacy Act of 1974 requires privacy protections for personal information in federal \nrecords systems, including limits on data retention, and also provides individuals a general \nright to access and correct their data. Among other things, the Privacy Act limits the storage of individual \ninformation in federal systems of records, illustrating the principle of limiting the scope of data retention. Under \nthe Privacy Act, federal agencies may only retain data about an individual that is ā€œrelevant and necessaryā€ to \naccomplish an agencyā€™s statutory purpose or to comply with an Executive Order of the President. The law allows \nfor individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\xad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Actā€™s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individualā€™s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individualā€™s ā€œqualifications, character, rights, ā€¦ \nopportunitiesā€¦, or benefits.ā€ \nNISTā€™s Privacy Framework provides a comprehensive, detailed and actionable approach for \norganizations to manage privacy risks. The NIST Framework gives organizations ways to identify and \ncommunicate their privacy risks and goals to support ethical decision-making in system, product, and service \ndesign or deployment, as well as the measures they are taking to demonstrate compliance with applicable laws \nor regulations. It has been voluntarily adopted by organizations across many different sectors around the world.78\nA school boardā€™s attempt to surveil public school studentsā€”undertaken without \nadequate community inputā€”sparked a state-wide biometrics moratorium.79 Reacting to a plan in \nthe city of Lockport, New York, the stateā€™s legislature banned the use of facial recognition systems and other \nā€œbiometric identifying technologyā€ in schools until July 1, 2022.80 The law additionally requires that a report on \nthe privacy, civil rights, and civil liberties implications of the use of such technologies be issued before \nbiometric identification technologies can be used in New York schools. \nFederal law requires employers, and any consultants they may retain, to report the costs \nof surveilling employees in the context of a labor dispute, providing a transparency \nmechanism to help protect worker organizing. Employers engaging in workplace surveillance ""where \nan object there-of, directly or indirectly, is [ā€¦] to obtain information concerning the activities of employees or a \nlabor organization in connection with a labor dispute"" must report expenditures relating to this surveillance to \nthe Department of Labor Office of Labor-Management Standards, and consultants who employers retain for \nthese purposes must also file reports regarding their activities.81\nPrivacy choices on smartphones show that when technologies are well designed, privacy \nand data agency can be meaningful and not overwhelming. These choicesā€”such as contextual, timely \nalerts about location trackingā€”are brief, direct, and use-specific. Many of the expectations listed here for \nprivacy by design and use-specific consent mirror those distributed to developers as best practices when \ndeveloping for smart phone devices,82 such as being transparent about how user data will be used, asking for app \npermissions during their use so that the use-context will be clear to users, and ensuring that the app will still \nwork if users deny (or later revoke) some permissions. \n39\n']","The Privacy Act of 1974 exemplifies data privacy principles in action by requiring privacy protections for personal information in federal records systems, including limits on data retention and providing individuals a general right to access and correct their data. Additionally, federal law mandates employers to report the costs of surveilling employees during labor disputes, which serves as a transparency mechanism to protect worker organizing.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 38, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What ensures AI transparency per NIST?,"[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n']",The answer to given question is not present in context,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What safeguards do ethics reviews provide for automated systems?,"[' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nWhile technologies are being deployed to solve problems across a wide array of issues, our reliance on technology can \nalso lead to its use in situations where it has not yet been proven to workā€”either at all or within an acceptable range \nof error. In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. \nAutomated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informa\xad\ntion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposeful\xad\nly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \nor unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad\nvators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \nfrom unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consis\xad\ntently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harm\xad\nful outcomes. \nā€¢\nA proprietary model was developed to predict the likelihood of sepsis in hospitalized patients and was imple\xad\nmented at hundreds of hospitals around the country. An independent study showed that the model predictions\nunderperformed relative to the designerā€™s claims while also causing ā€˜alert fatigueā€™ by falsely alerting\nlikelihood of sepsis.6\nā€¢\nOn social media, Black people who quote and criticize racist messages have had their own speech silenced when\na platformā€™s automated moderation system failed to distinguish this ā€œcounter speechā€ (or other critique\nand journalism) from the original hateful messages to which such speech responded.7\nā€¢\nA device originally developed to help people track and find lost items has been used as a tool by stalkers to track\nvictimsā€™ locations in violation of their privacy and safety. The device manufacturer took steps after release to\nprotect people from unwanted tracking by alerting people on their phones when a device is found to be moving\nwith them over time and also by having the device make an occasional noise, but not all phones are able\nto receive the notification and the devices remain a safety concern due to their misuse.8 \nā€¢\nAn algorithm used to deploy police was found to repeatedly send police to neighborhoods they regularly visit,\neven if those neighborhoods were not the ones with the highest crime rates. These incorrect crime predictions\nwere the result of a feedback loop generated from the reuse of data from previous arrests and algorithm\npredictions.9\n16\n']",Ethics reviews provide safeguards for automated systems by vetting key development decisions to prevent harm from occurring. They help identify and mitigate potential harms through pre-deployment testing and ongoing monitoring processes.,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 15, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What ensures fair design in automated systems?,"[' AI BILL OF RIGHTS\nFFECTIVE SYSTEMS\nineffective systems. Automated systems should be \ncommunities, stakeholders, and domain experts to identify \nSystems should undergo pre-deployment testing, risk \nthat demonstrate they are safe and effective based on \nincluding those beyond the intended use, and adherence to \nprotective measures should include the possibility of not \nAutomated systems should not be designed with an intent \nreasonably foreseeable possibility of endangering your safety or the safety of your community. They should \nstemming from unintended, yet foreseeable, uses or \n \n \n \n \n \n \n \nSECTION TITLE\nBLUEPRINT FOR AN\nSAFE AND E \nYou should be protected from unsafe or \ndeveloped with consultation from diverse \nconcerns, risks, and potential impacts of the system. \nidentification and mitigation, and ongoing monitoring \ntheir intended use, mitigation of unsafe outcomes \ndomain-specific standards. Outcomes of these \ndeploying the system or removing a system from use. \nor \nbe designed to proactively protect you from harms \nimpacts of automated systems. You should be protected from inappropriate or irrelevant data use in the \ndesign, development, and deployment of automated systems, and from the compounded harm of its reuse. \nIndependent evaluation and reporting that confirms that the system is safe and effective, including reporting of \nsteps taken to mitigate potential harms, should be performed and the results made public whenever possible. \nALGORITHMIC DISCRIMINATION PROTECTIONS\nYou should not face discrimination by algorithms and systems should be used and designed in \nan equitable way. Algorithmic discrimination occurs when automated systems contribute to unjustified \ndifferent treatment or impacts disfavoring people based on their race, color, ethnicity, sex (including \npregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other \nclassification protected by law. Depending on the specific circumstances, such algorithmic discrimination \nmay violate legal protections. Designers, developers, and deployers of automated systems should take \nproactive \nand \ncontinuous \nmeasures \nto \nprotect \nindividuals \nand \ncommunities \nfrom algorithmic \ndiscrimination and to use and design systems in an equitable way. This protection should include proactive \nequity assessments as part of the system design, use of representative data and protection against proxies \nfor demographic features, ensuring accessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent \nevaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5\n']","Fair design in automated systems is ensured through proactive and continuous measures to protect individuals and communities from algorithmic discrimination. This includes conducting equity assessments as part of the system design, using representative data, ensuring accessibility for people with disabilities, performing pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Additionally, independent evaluation and reporting, including algorithmic impact assessments and disparity testing results, should be made public whenever possible to confirm these protections.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 4, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What GAI activities contribute most to carbon emissions?,"[' \n8 \nTrustworthy AI Characteristics: Accountable and Transparent, Privacy Enhanced, Safe, Secure and \nResilient \n2.5. Environmental Impacts \nTraining, maintaining, and operating (running inference on) GAI systems are resource-intensive activities, \nwith potentially large energy and environmental footprints. Energy and carbon emissions vary based on \nwhat is being done with the GAI model (i.e., pre-training, ļ¬ne-tuning, inference), the modality of the \ncontent, hardware used, and type of task or application. \nCurrent estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-\ntrip ļ¬‚ights between San Francisco and New York. In a study comparing energy consumption and carbon \nemissions for LLM inference, generative tasks (e.g., text summarization) were found to be more energy- \nand carbon-intensive than discriminative or non-generative tasks (e.g., text classiļ¬cation). \nMethods for creating smaller versions of trained models, such as model distillation or compression, \ncould reduce environmental impacts at inference time, but training and tuning such models may still \ncontribute to their environmental impacts. Currently there is no agreed upon method to estimate \nenvironmental impacts from GAI. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe \n2.6. Harmful Bias and Homogenization \nBias exists in many forms and can become ingrained in automated systems. AI systems, including GAI \nsystems, can increase the speed and scale at which harmful biases manifest and are acted upon, \npotentially perpetuating and amplifying harms to individuals, groups, communities, organizations, and \nsociety. For example, when prompted to generate images of CEOs, doctors, lawyers, and judges, current \ntext-to-image models underrepresent women and/or racial minorities, and people with disabilities. \nImage generator models have also produced biased or stereotyped output for various demographic \ngroups and have diļ¬ƒculty producing non-stereotyped content even when the prompt speciļ¬cally \nrequests image features that are inconsistent with the stereotypes. Harmful bias in GAI models, which \nmay stem from their training data, can also cause representational harms or perpetuate or exacerbate \nbias based on race, gender, disability, or other protected classes. \nHarmful bias in GAI systems can also lead to harms via disparities between how a model performs for \ndiļ¬€erent subgroups or languages (e.g., an LLM may perform less well for non-English languages or \ncertain dialects). Such disparities can contribute to discriminatory decision-making or ampliļ¬cation of \nexisting societal biases. In addition, GAI systems may be inappropriately trusted to perform similarly \nacross all subgroups, which could leave the groups facing underperformance with worse outcomes than \nif no GAI system were used. Disparate or reduced performance for lower-resource languages also \npresents challenges to model adoption, inclusion, and accessibility, and may make preservation of \nendangered languages more diļ¬ƒcult if GAI systems become embedded in everyday processes that would \notherwise have been opportunities to use these languages. \nBias is mutually reinforcing with the problem of undesired homogenization, in which GAI systems \nproduce skewed distributions of outputs that are overly uniform (for example, repetitive aesthetic styles \n']","The GAI activities that contribute most to carbon emissions include training, maintaining, and operating GAI systems, particularly during the pre-training, fine-tuning, and inference stages. Current estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-trip flights between San Francisco and New York.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 11, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What AI systems improve design in chem & bio?,"[' \n5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a systemā€™s availability or the conļ¬dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across \nthe AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scientiļ¬c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational \nlikelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) ā€“ highly specialized AI systems trained on \nscientiļ¬c data that aid in chemical and biological design ā€“ may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \neļ¬ƒcacious, including for beneļ¬cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable \n']","Chemical and biological design tools (BDTs) are highly specialized AI systems trained on scientific data that aid in chemical and biological design, potentially improving design capabilities beyond what text-based LLMs can provide.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 8, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How to align synthetic data with real stats while ensuring privacy?,"[' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']","Consider opportunities to responsibly use synthetic data and other privacy enhancing techniques in GAI development, where appropriate and applicable, to match the statistical properties of real-world data without disclosing personally identifiable information or contributing to homogenization.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What ensures AI transparency per NIST?,"[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n']",The answer to given question is not present in context,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What policies ensure GAI risk assessment with transparency and safety?,"[' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n']","The policies that ensure GAI risk assessment with transparency and safety include establishing transparency policies and processes for documenting the origin and history of training data and generated data for GAI applications, as well as establishing policies to evaluate risk-relevant capabilities of GAI and the robustness of safety measures prior to deployment and on an ongoing basis.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What dual aspects should automated systems cover for effective oversight?,"[' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n']",Automated systems should cover ongoing monitoring procedures and clear organizational oversight for effective oversight.,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What term refers to GAI's misleading false content?,"[' \n6 \n2.2. Confabulation \nā€œConfabulationā€ refers to a phenomenon in which GAI systems generate and conļ¬dently present \nerroneous or false content in response to prompts. Confabulations also include generated outputs that \ndiverge from the prompts or other input or that contradict previously generated statements in the same \ncontext. These phenomena are colloquially also referred to as ā€œhallucinationsā€ or ā€œfabrications.ā€ \nConfabulations can occur across GAI outputs and contexts.9,10 Confabulations are a natural result of the \nway generative models are designed: they generate outputs that approximate the statistical distribution \nof their training data; for example, LLMs predict the next token or word in a sentence or phrase. While \nsuch statistical prediction can produce factually accurate and consistent outputs, it can also produce \noutputs that are factually inaccurate or internally inconsistent. This dynamic is particularly relevant when \nit comes to open-ended prompts for long-form responses and in domains which require highly \ncontextual and/or domain expertise. \nRisks from confabulations may arise when users believe false content ā€“ often due to the conļ¬dent nature \nof the response ā€“ leading users to act upon or promote the false information. This poses a challenge for \nmany real-world applications, such as in healthcare, where a confabulated summary of patient \ninformation reports could cause doctors to make incorrect diagnoses and/or recommend the wrong \ntreatments. Risks of confabulated content may be especially important to monitor when integrating GAI \ninto applications involving consequential decision making. \nGAI outputs may also include confabulated logic or citations that purport to justify or explain the \nsystemā€™s answer, which may further mislead humans into inappropriately trusting the systemā€™s output. \nFor instance, LLMs sometimes provide logical steps for how they arrived at an answer even when the \nanswer itself is incorrect. Similarly, an LLM could falsely assert that it is human or has human traits, \npotentially deceiving humans into believing they are speaking with another human. \nThe extent to which humans can be deceived by LLMs, the mechanisms by which this may occur, and the \npotential risks from adversarial prompting of such behavior are emerging areas of study. Given the wide \nrange of downstream impacts of GAI, it is diļ¬ƒcult to estimate the downstream scale and impact of \nconfabulations. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Safe, Valid and Reliable, Explainable \nand Interpretable \n2.3. Dangerous, Violent, or Hateful Content \nGAI systems can produce content that is inciting, radicalizing, or threatening, or that gloriļ¬es violence, \nwith greater ease and scale than other technologies. LLMs have been reported to generate dangerous or \nviolent recommendations, and some models have generated actionable instructions for dangerous or \n \n \n9 Confabulations of falsehoods are most commonly a problem for text-based outputs; for audio, image, or video \ncontent, creative generation of non-factual content can be a desired behavior. \n10 For example, legal confabulations have been shown to be pervasive in current state-of-the-art LLMs. See also, \ne.g., \n']","The term that refers to GAI's misleading false content is ""confabulation.""",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 9, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What's the role of interdisciplinary teams & human-AI config in GAI risk mgmt?,"[' \n23 \nMP-1.1-002 \nDetermine and document the expected and acceptable GAI system context of \nuse in collaboration with socio-cultural and other domain experts, by assessing: \nAssumptions and limitations; Direct value to the organization; Intended \noperational environment and observed usage patterns; Potential positive and \nnegative impacts to individuals, public safety, groups, communities, \norganizations, democratic institutions, and the physical environment; Social \nnorms and expectations. \nHarmful Bias and Homogenization \nMP-1.1-003 \nDocument risk measurement plans to address identiļ¬ed risks. Plans may \ninclude, as applicable: Individual and group cognitive biases (e.g., conļ¬rmation \nbias, funding bias, groupthink) for AI Actors involved in the design, \nimplementation, and use of GAI systems; Known past GAI system incidents and \nfailure modes; In-context use and foreseeable misuse, abuse, and oļ¬€-label use; \nOver reliance on quantitative metrics and methodologies without suļ¬ƒcient \nawareness of their limitations in the context(s) of use; Standard measurement \nand structured human feedback approaches; Anticipated human-AI \nconļ¬gurations. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent \nMP-1.1-004 \nIdentify and document foreseeable illegal uses or applications of the GAI system \nthat surpass organizational risk tolerances. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Obscene, Degrading, \nand/or Abusive Content \nAI Actor Tasks: AI Deployment \n \nMAP 1.2: Interdisciplinary AI Actors, competencies, skills, and capacities for establishing context reļ¬‚ect demographic diversity and \nbroad domain and user experience expertise, and their participation is documented. Opportunities for interdisciplinary \ncollaboration are prioritized. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.2-001 \nEstablish and empower interdisciplinary teams that reļ¬‚ect a wide range of \ncapabilities, competencies, demographic groups, domain expertise, educational \nbackgrounds, lived experiences, professions, and skills across the enterprise to \ninform and conduct risk measurement and management functions. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMP-1.2-002 \nVerify that data or benchmarks used in risk measurement, and users, \nparticipants, or subjects involved in structured GAI public feedback exercises \nare representative of diverse in-context user populations. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nAI Actor Tasks: AI Deployment \n \n']","Interdisciplinary teams play a crucial role in GAI risk management by reflecting a wide range of capabilities, competencies, demographic groups, domain expertise, educational backgrounds, lived experiences, professions, and skills. Their participation is documented, and opportunities for interdisciplinary collaboration are prioritized. Additionally, human-AI configuration is important as it addresses harmful bias and homogenization, ensuring that data or benchmarks used in risk measurement are representative of diverse in-context user populations.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 26, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do digital content transparency tools ensure AI traceability and integrity?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","Digital content transparency solutions ensure AI traceability and integrity by enabling the documentation of each instance where content is generated, modified, or shared, providing a tamper-proof history of the content. Additionally, robust version control systems can be applied to track changes across the AI lifecycle over time.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What can be done to prevent algorithmic bias in automated systems?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","To prevent algorithmic bias in automated systems, proactive equity assessments should be conducted during the design phase to identify potential discrimination and effects on equity. Data used in system development should be representative and reviewed for bias, and the use of demographic information should be avoided to prevent algorithmic discrimination. Proactive testing should be performed to identify and remove proxies that may lead to discrimination, and organizations should monitor systems closely for any resulting algorithmic discrimination.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do you ensure ethical data collection and privacy?,"[' \n \n \n \n \nSECTION TITLE\nDATA PRIVACY\nYou should be protected from abusive data practices via built-in protections and you \nshould have agency over how data about you is used. You should be protected from violations of \nprivacy through design choices that ensure such protections are included by default, including ensuring that \ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases \nwhere it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \nin plain language, and give you agency over data collection and the specific context of use; current hard-to\xad\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the use of such surveillance \ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \nreporting that confirms your data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or access. \nNOTICE AND EXPLANATION\nYou should know that an automated system is being used and understand how and why it \ncontributes to outcomes that impact you. Designers, developers, and deployers of automated systems \nshould provide generally accessible plain language documentation including clear descriptions of the overall \nsystem functioning and the role automation plays, notice that such systems are in use, the individual or organiza\xad\ntion responsible for the system, and explanations of outcomes that are clear, timely, and accessible. Such notice \nshould be kept up-to-date and people impacted by the system should be notified of significant use case or key \nfunctionality changes. You should know how and why an outcome impacting you was determined by an \nautomated system, including when the automated system is not the sole input determining the outcome. \nAutomated systems should provide explanations that are technically valid, meaningful and useful to you and to \nany operators or others who need to understand the system, and calibrated to the level of risk based on the \ncontext. Reporting that includes summary information about these automated systems in plain language and \nassessments of the clarity and quality of the notice and explanations should be made public whenever possible. \n6\n']","To ensure ethical data collection and privacy, designers, developers, and deployers of automated systems should seek user permission and respect their decisions regarding data collection, use, access, transfer, and deletion. They should implement built-in protections, ensure data collection conforms to reasonable expectations, and only collect data that is strictly necessary. Consent should be meaningful and understandable, and enhanced protections should be in place for sensitive domains. Additionally, there should be oversight of surveillance technologies to protect privacy and civil liberties.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 5, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What are the perks of logging GAI incidents for AI risk mgmt?,"[' \n53 \nDocumenting, reporting, and sharing information about GAI incidents can help mitigate and prevent \nharmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness \nand standardization of GAI incident reporting could promote this transparency and improve GAI risk \nmanagement across the AI ecosystem. \nDocumentation and Involvement of AI Actors \nAI Actors should be aware of their roles in reporting AI incidents. To better understand previous incidents \nand implement measures to prevent similar ones in the future, organizations could consider developing \nguidelines for publicly available incident reporting which include information about AI actor \nresponsibilities. These guidelines would help AI system operators identify GAI incidents across the AI \nlifecycle and with AI Actors regardless of role. Documentation and review of third-party inputs and \nplugins for GAI systems is especially important for AI Actors in the context of incident disclosure; LLM \ninputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents. \n \n']","Logging GAI incidents can facilitate smoother sharing of information with relevant AI Actors, empower them in responding to and managing AI incidents, and improve GAI risk management across the AI ecosystem. It also aids in documenting and reviewing third-party inputs and plugins, which is crucial for incident disclosure.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 56, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What makes it hard for people to challenge algorithmic decisions?,"["" \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nAutomated systems now determine opportunities, from employment to credit, and directly shape the American \npublicā€™s experiences, from the courtroom to online classrooms, in ways that profoundly impact peopleā€™s lives. But this \nexpansive impact is not always visible. An applicant might not know whether a person rejected their resume or a \nhiring algorithm moved them to the bottom of the list. A defendant in the courtroom might not know if a judge deny\xad\ning their bail is informed by an automated system that labeled them ā€œhigh risk.ā€ From correcting errors to contesting \ndecisions, people are often denied the knowledge they need to address the impact of automated systems on their lives. \nNotice and explanations also serve an important safety and efficacy purpose, allowing experts to verify the reasonable\xad\nness of a recommendation before enacting it. \nIn order to guard against potential harms, the American public needs to know if an automated system is being used. \nClear, brief, and understandable notice is a prerequisite for achieving the other protections in this framework. Like\xad\nwise, the public is often unable to ascertain how or why an automated system has made a decision or contributed to a \nparticular outcome. The decision-making processes of automated systems tend to be opaque, complex, and, therefore, \nunaccountable, whether by design or by omission. These factors can make explanations both more challenging and \nmore important, and should not be used as a pretext to avoid explaining important decisions to the people impacted \nby those choices. In the context of automated systems, clear and valid explanations should be recognized as a baseline \nrequirement. \nProviding notice has long been a standard practice, and in many cases is a legal requirement, when, for example, \nmaking a video recording of someone (outside of a law enforcement or national security context). In some cases, such \nas credit, lenders are required to provide notice and explanation to consumers. Techniques used to automate the \nprocess of explaining such systems are under active research and improvement and such explanations can take many \nforms. Innovative companies and researchers are rising to the challenge and creating and deploying explanatory \nsystems that can help the public better understand decisions that impact them. \nWhile notice and explanation requirements are already in place in some sectors or situations, the American public \ndeserve to know consistently and across sectors if an automated system is being used in a way that impacts their rights, \nopportunities, or access. This knowledge should provide confidence in how the public is being treated, and trust in the \nvalidity and reasonable use of automated systems. \nā€¢\nA lawyer representing an older client with disabilities who had been cut off from Medicaid-funded home\nhealth-care assistance couldn't determine why, especially since the decision went against historical access\npractices. In a court hearing, the lawyer learned from a witness that the state in which the older client\nlived had recently adopted a new algorithm to determine eligibility.83 The lack of a timely explanation made it\nharder to understand and contest the decision.\nā€¢\nA formal child welfare investigation is opened against a parent based on an algorithm and without the parent\never being notified that data was being collected and used as part of an algorithmic child maltreatment\nrisk assessment.84 The lack of notice or an explanation makes it harder for those performing child\nmaltreatment assessments to validate the risk assessment and denies parents knowledge that could help them\ncontest a decision.\n41\n""]","People find it hard to challenge algorithmic decisions because they are often denied the knowledge needed to address the impact of automated systems on their lives. The decision-making processes of these systems tend to be opaque and complex, making it difficult for individuals to ascertain how or why a decision was made. Additionally, the lack of clear and timely explanations can hinder their ability to contest decisions effectively.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 40, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How is AI performance evaluated with human safety and privacy in mind?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']","AI performance is evaluated with human safety and privacy in mind by implementing measures such as assessing and managing statistical biases related to GAI content provenance, documenting how content provenance data is tracked, providing human subjects with options to withdraw participation or revoke consent, and using techniques like anonymization and differential privacy to minimize risks associated with linking AI-generated content back to individual human subjects.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What issues come from easy access to obscene content?,"[' \n5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a systemā€™s availability or the conļ¬dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across \nthe AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scientiļ¬c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational \nlikelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) ā€“ highly specialized AI systems trained on \nscientiļ¬c data that aid in chemical and biological design ā€“ may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \neļ¬ƒcacious, including for beneļ¬cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable \n']","Easy access to obscene content can lead to the production of and access to obscene, degrading, and/or abusive imagery, which can cause harm, including synthetic child sexual abuse material (CSAM) and nonconsensual intimate images (NCII) of adults.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 8, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do user feedback and community input assess AI risks?,"[' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n']","User feedback and community input assess AI risks through established feedback processes that allow end users and impacted communities to report problems and appeal system outcomes. These processes are integrated into AI system evaluation metrics, which include conducting impact assessments on how AI-generated content might affect different social, economic, and cultural groups, as well as understanding user perceptions and interactions with GAI content.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What should automated systems consider for consent and ethics in sensitive data?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \xad\xad\xad\xad\xad\xad\nIn addition to the privacy expectations above for general non-sensitive data, any system collecting, using, shar-\ning, or storing sensitive data should meet the expectations below. Depending on the technological use case and \nbased on an ethical assessment, consent for sensitive data may need to be acquired from a guardian and/or child. \nProvide enhanced protections for data related to sensitive domains \nNecessary functions only. Sensitive data should only be used for functions strictly necessary for that \ndomain or for functions that are required for administrative reasons (e.g., school attendance records), unless \nconsent is acquired, if appropriate, and the additional expectations in this section are met. Consent for non-\nnecessary functions should be optional, i.e., should not be required, incentivized, or coerced in order to \nreceive opportunities or access to services. In cases where data is provided to an entity (e.g., health insurance \ncompany) in order to facilitate payment for such a need, that data should only be used for that purpose. \nEthical review and use prohibitions. Any use of sensitive data or decision process based in part on sensi-\ntive data that might limit rights, opportunities, or access, whether the decision is automated or not, should go \nthrough a thorough ethical review and monitoring, both in advance and by periodic review (e.g., via an indepen-\ndent ethics committee or similarly robust process). In some cases, this ethical review may determine that data \nshould not be used or shared for specific uses even with consent. Some novel uses of automated systems in this \ncontext, where the algorithm is dynamically developing and where the science behind the use case is not well \nestablished, may also count as human subject experimentation, and require special review under organizational \ncompliance bodies applying medical, scientific, and academic human subject experimentation ethics rules and \ngovernance procedures. \nData quality. In sensitive domains, entities should be especially careful to maintain the quality of data to \navoid adverse consequences arising from decision-making based on flawed or inaccurate data. Such care is \nnecessary in a fragmented, complex data ecosystem and for datasets that have limited access such as for fraud \nprevention and law enforcement. It should be not left solely to individuals to carry the burden of reviewing and \ncorrecting data. Entities should conduct regular, independent audits and take prompt corrective measures to \nmaintain accurate, timely, and complete data. \nLimit access to sensitive data and derived data. Sensitive data and derived data should not be sold, \nshared, or made public as part of data brokerage or other agreements. Sensitive data includes data that can be \nused to infer sensitive information; even systems that are not directly marketed as sensitive domain technologies \nare expected to keep sensitive data private. Access to such data should be limited based on necessity and based \non a principle of local control, such that those individuals closest to the data subject have more access while \nthose who are less proximate do not (e.g., a teacher has access to their studentsā€™ daily progress data while a \nsuperintendent does not). \nReporting. In addition to the reporting on data privacy (as listed above for non-sensitive data), entities devel-\noping technologies related to a sensitive domain and those collecting, using, storing, or sharing sensitive data \nshould, whenever appropriate, regularly provide public reports describing: any data security lapses or breaches \nthat resulted in sensitive data leaks; the number, type, and outcomes of ethical pre-reviews undertaken; a \ndescription of any data sold, shared, or made public, and how that data was assessed to determine it did not pres-\nent a sensitive data risk; and ongoing risk identification and management procedures, and any mitigation added \nbased on these procedures. Reporting should be provided in a clear and machine-readable manner. \n38\n']","Automated systems should consider that consent for sensitive data may need to be acquired from a guardian and/or child, and that consent for non-necessary functions should be optional. Additionally, any use of sensitive data or decision processes based on sensitive data that might limit rights, opportunities, or access should undergo a thorough ethical review and monitoring. This includes ensuring that data quality is maintained to avoid adverse consequences from flawed data, limiting access to sensitive data based on necessity, and providing regular public reports on data security lapses and ethical pre-reviews.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 37, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What links are there between digital IDs, welfare efficiency, and community impacts?","["" \n \n \n \n \nAPPENDIX\nā€¢\nJulia Simon-Mishel, Supervising Attorney, Philadelphia Legal Assistance\nā€¢\nDr. Zachary Mahafza, Research & Data Analyst, Southern Poverty Law Center\nā€¢\nJ. Khadijah Abdurahman, Tech Impact Network Research Fellow, AI Now Institute, UCLA C2I1, and\nUWA Law School\nPanelists separately described the increasing scope of technology use in providing for social welfare, including \nin fraud detection, digital ID systems, and other methods focused on improving efficiency and reducing cost. \nHowever, various panelists individually cautioned that these systems may reduce burden for government \nagencies by increasing the burden and agency of people using and interacting with these technologies. \nAdditionally, these systems can produce feedback loops and compounded harm, collecting data from \ncommunities and using it to reinforce inequality. Various panelists suggested that these harms could be \nmitigated by ensuring community input at the beginning of the design process, providing ways to opt out of \nthese systems and use associated human-driven mechanisms instead, ensuring timeliness of benefit payments, \nand providing clear notice about the use of these systems and clear explanations of how and what the \ntechnologies are doing. Some panelists suggested that technology should be used to help people receive \nbenefits, e.g., by pushing benefits to those in need and ensuring automated decision-making systems are only \nused to provide a positive outcome; technology shouldn't be used to take supports away from people who need \nthem. \nPanel 6: The Healthcare System. This event explored current and emerging uses of technology in the \nhealthcare system and consumer products related to health. \nWelcome:\nā€¢\nAlondra Nelson, Deputy Director for Science and Society, White House Office of Science and Technology\nPolicy\nā€¢\nPatrick Gaspard, President and CEO, Center for American Progress\nModerator: Micky Tripathi, National Coordinator for Health Information Technology, U.S Department of \nHealth and Human Services. \nPanelists: \nā€¢\nMark Schneider, Health Innovation Advisor, ChristianaCare\nā€¢\nZiad Obermeyer, Blue Cross of California Distinguished Associate Professor of Policy and Management,\nUniversity of California, Berkeley School of Public Health\nā€¢\nDorothy Roberts, George A. Weiss University Professor of Law and Sociology and the Raymond Pace and\nSadie Tanner Mossell Alexander Professor of Civil Rights, University of Pennsylvania\nā€¢\nDavid Jones, A. Bernard Ackerman Professor of the Culture of Medicine, Harvard University\nā€¢\nJamila Michener, Associate Professor of Government, Cornell University; Co-Director, Cornell Center for\nHealth Equity\xad\nPanelists discussed the impact of new technologies on health disparities; healthcare access, delivery, and \noutcomes; and areas ripe for research and policymaking. Panelists discussed the increasing importance of tech-\nnology as both a vehicle to deliver healthcare and a tool to enhance the quality of care. On the issue of \ndelivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.\n59\n""]","Panelists described the increasing scope of technology use in providing for social welfare, including digital ID systems, which are focused on improving efficiency and reducing cost. However, they cautioned that these systems may reduce the burden for government agencies by increasing the burden and agency of people using and interacting with these technologies. Additionally, these systems can produce feedback loops and compounded harm, collecting data from communities and using it to reinforce inequality. To mitigate these harms, it was suggested that community input should be ensured at the beginning of the design process, and there should be ways to opt out of these systems and use associated human-driven mechanisms instead.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 58, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What drives extra data protections in health and finance?,"[' \n \n \n \nDATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nSome domains, including health, employment, education, criminal justice, and personal finance, have long been \nsingled out as sensitive domains deserving of enhanced data protections. This is due to the intimate nature of these \ndomains as well as the inability of individuals to opt out of these domains in any meaningful way, and the \nhistorical discrimination that has often accompanied data knowledge.69 Domains understood by the public to be \nsensitive also change over time, including because of technological developments. Tracking and monitoring \ntechnologies, personal tracking devices, and our extensive data footprints are used and misused more than ever \nbefore; as such, the protections afforded by current legal guidelines may be inadequate. The American public \ndeserves assurances that data related to such sensitive domains is protected and used appropriately and only in \nnarrowly defined contexts with clear benefits to the individual and/or society. \nTo this end, automated systems that collect, use, share, or store data related to these sensitive domains should meet \nadditional expectations. Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined \nbelow); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or \nsensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, \ngeolocation data, data related to interaction with the criminal justice system, relationship history and legal status such \nas custody and divorce information, and home, work, or school environmental data); or have the reasonable potential \nto be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm \ndue to identity theft. Data and metadata generated by or about those who are not yet legal adults is also sensitive, even \nif not related to a sensitive domain. Such data includes, but is not limited to, numerical, text, image, audio, or video \ndata. ā€œSensitive domainsā€ are those in which activities being conducted can cause material harms, including signifi\xad\ncant adverse effects on human rights such as autonomy and dignity, as well as civil liberties and civil rights. Domains \nthat have historically been singled out as deserving of enhanced data protections or where such enhanced protections \nare reasonably expected by the public include, but are not limited to, health, family planning and care, employment, \neducation, criminal justice, and personal finance. In the context of this framework, such domains are considered \nsensitive whether or not the specifics of a system context would necessitate coverage under existing law, and domains \nand data that are considered sensitive are understood to change over time based on societal norms and context. \n36\n']","Extra data protections in health and finance are driven by the intimate nature of these domains, the inability of individuals to opt out in a meaningful way, and the historical discrimination that has often accompanied data knowledge. Additionally, the potential for material harms, including significant adverse effects on human rights such as autonomy and dignity, civil liberties, and civil rights, necessitates enhanced protections.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 35, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What insights did OSTP seek from experts in AI Bill of Rights panels?,"[' \n \n \n \n \nSECTION TITLE\nAPPENDIX\nListening to the American People \nThe White House Office of Science and Technology Policy (OSTP) led a yearlong process to seek and distill \ninput from people across the country ā€“ from impacted communities to industry stakeholders to \ntechnology developers to other experts across fields and sectors, as well as policymakers across the Federal \ngovernment ā€“ on the issue of algorithmic and data-driven harms and potential remedies. Through panel \ndiscussions, public listening sessions, private meetings, a formal request for information, and input to a \npublicly accessible and widely-publicized email address, people across the United States spoke up about \nboth the promises and potential harms of these technologies, and played a central role in shaping the \nBlueprint for an AI Bill of Rights. \nPanel Discussions to Inform the Blueprint for An AI Bill of Rights \nOSTP co-hosted a series of six panel discussions in collaboration with the Center for American Progress, \nthe Joint Center for Political and Economic Studies, New America, the German Marshall Fund, the Electronic \nPrivacy Information Center, and the Mozilla Foundation. The purpose of these convenings ā€“ recordings of \nwhich are publicly available online112 ā€“ was to bring together a variety of experts, practitioners, advocates \nand federal government officials to offer insights and analysis on the risks, harms, benefits, and \npolicy opportunities of automated systems. Each panel discussion was organized around a wide-ranging \ntheme, exploring current challenges and concerns and considering what an automated society that \nrespects democratic values should look like. These discussions focused on the topics of consumer \nrights and protections, the criminal justice system, equal opportunities and civil justice, artificial \nintelligence and democratic values, social welfare and development, and the healthcare system. \nSummaries of Panel Discussions: \nPanel 1: Consumer Rights and Protections. This event explored the opportunities and challenges for \nindividual consumers and communities in the context of a growing ecosystem of AI-enabled consumer \nproducts, advanced platforms and services, ā€œInternet of Thingsā€ (IoT) devices, and smart city products and \nservices. \nWelcome:\nā€¢\nRashida Richardson, Senior Policy Advisor for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nKaren Kornbluh, Senior Fellow and Director of the Digital Innovation and Democracy Initiative, German\nMarshall Fund\nModerator: \nDevin E. Willis, Attorney, Division of Privacy and Identity Protection, Bureau of Consumer Protection, Federal \nTrade Commission \nPanelists: \nā€¢\nTamika L. Butler, Principal, Tamika L. Butler Consulting\nā€¢\nJennifer Clark, Professor and Head of City and Regional Planning, Knowlton School of Engineering, Ohio\nState University\nā€¢\nCarl Holshouser, Senior Vice President for Operations and Strategic Initiatives, TechNet\nā€¢\nSurya Mattu, Senior Data Engineer and Investigative Data Journalist, The Markup\nā€¢\nMariah Montgomery, National Campaign Director, Partnership for Working Families\n55\n']","OSTP sought insights and analysis on the risks, harms, benefits, and policy opportunities of automated systems from a variety of experts, practitioners, advocates, and federal government officials during the AI Bill of Rights panels. The discussions focused on consumer rights and protections, the criminal justice system, equal opportunities and civil justice, artificial intelligence and democratic values, social welfare and development, and the healthcare system.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 54, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What key elements ensure clarity in docs about an automated system's impact?,"[' \nYou should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40\n']","Key elements that ensure clarity in documentation about an automated system's impact include providing generally accessible plain language documentation, clear descriptions of the overall system functioning and the role of automation, timely updates about significant use case or key functionality changes, and explanations of outcomes that are clear, timely, and accessible.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 39, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What biases to note for pre-deployment measurement error models?,"[' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n']","The context mentions documenting biases or statistical variance in applied metrics or structured human feedback processes, particularly when modeling complex societal constructs such as hateful content. However, it does not specify particular biases to note for pre-deployment measurement error models.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"Which automated systems affect equal opportunities in edu, housing, & jobs?","[' \n \n \n \n \n \n \n \n \nAPPENDIX\nExamples of Automated Systems \nThe below examples are meant to illustrate the breadth of automated systems that, insofar as they have the \npotential to meaningfully impact rights, opportunities, or access to critical resources or services, should \nbe covered by the Blueprint for an AI Bill of Rights. These examples should not be construed to limit that \nscope, which includes automated systems that may not yet exist, but which fall under these criteria. \nExamples of automated systems for which the Blueprint for an AI Bill of Rights should be considered include \nthose that have the potential to meaningfully impact: \nā€¢ Civil rights, civil liberties, or privacy, including but not limited to:\nSpeech-related systems such as automated content moderation tools; \nSurveillance and criminal justice system algorithms such as risk assessments, predictive \n policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems, \nprojections of student progress or outcomes, algorithms that determine access to resources or \n rograms, and surveillance of classes (whether online or in-person); \nHousing-related systems such as tenant screening algorithms, automated valuation systems that \n estimate the value of homes used in mortgage underwriting or home insurance, and automated \n valuations from online aggregator websites; and \nEmployment-related systems such as workplace algorithms that inform all aspects of the terms \n and conditions of employment including, but not limited to, pay or promotion, hiring or termina- \n tion algorithms, virtual or augmented reality workplace training programs, and electronic work \nplace surveillance and management systems. \nā€¢ Access to critical resources and services, including but not limited to:\nHealth and health insurance technologies such as medical AI systems and devices, AI-assisted \n diagnostic tools, algorithms or predictive models used to support clinical decision making, medical \n or insurance health risk assessments, drug addiction risk assessments and associated access alg \n-orithms, wearable technologies, wellness apps, insurance care allocation algorithms, and health\ninsurance cost and underwriting algorithms;\nFinancial system algorithms such as loan allocation algorithms, financial system access determi-\nnation algorithms, credit scoring systems, insurance algorithms including risk assessments, auto\n-mated interest rate determinations, and financial algorithms that apply penalties (e.g., that can\ngarnish wages or withhold tax returns);\n53\n']","Automated systems that affect equal opportunities in education include algorithms that detect student cheating or plagiarism, admissions algorithms, online or virtual reality student monitoring systems, projections of student progress or outcomes, algorithms that determine access to resources or programs, and surveillance of classes. In housing, tenant screening algorithms, automated valuation systems for mortgage underwriting or home insurance, and automated valuations from online aggregator websites are relevant. For employment, workplace algorithms that inform terms and conditions of employment, hiring or termination algorithms, virtual or augmented reality workplace training programs, and electronic workplace surveillance and management systems are included.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 52, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True diff --git a/Tasks/Task 3/task3-del1.png b/Tasks/Task 3/task3-del1.png new file mode 100644 index 0000000000000000000000000000000000000000..b6e8f61c54ac813e2b432cf977b0fec3be4cd730 Binary files /dev/null and b/Tasks/Task 3/task3-del1.png differ diff --git a/Tasks/Task 3/task3-del11.png b/Tasks/Task 3/task3-del11.png new file mode 100644 index 0000000000000000000000000000000000000000..0bc087b7060563b5fe1ce109bee0a249342688f3 Binary files /dev/null and b/Tasks/Task 3/task3-del11.png differ diff --git a/Tasks/Task 3/task3-generate-dataset-ragas-eval.ipynb b/Tasks/Task 3/task3-generate-dataset-ragas-eval.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..69b00fa13d76b593a0d02c559d76cc528b5a5d92 --- /dev/null +++ b/Tasks/Task 3/task3-generate-dataset-ragas-eval.ipynb @@ -0,0 +1,590 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Synthetic data generation using Ragas framework" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> Python packages are installed from `requirements.txt` file into virtual environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -qU langsmith langchain-core langchain-community langchain-openai langchain-qdrant langchain_experimental pymupdf ragas" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "aiofiles==23.2.1\n", + "aiohappyeyeballs==2.4.0\n", + "aiohttp==3.10.5\n", + "aiosignal==1.3.1\n", + "annotated-types==0.7.0\n", + "anyio==3.7.1\n", + "appdirs==1.4.4\n", + "appnope==0.1.4\n", + "asttokens==2.4.1\n", + "asyncer==0.0.2\n", + "attrs==24.2.0\n", + "bidict==0.23.1\n", + "certifi==2024.8.30\n", + "chainlit==0.7.700\n", + "charset-normalizer==3.3.2\n", + "click==8.1.7\n", + "comm==0.2.2\n", + "dataclasses-json==0.5.14\n", + "datasets==3.0.0\n", + "debugpy==1.8.5\n", + "decorator==5.1.1\n", + "Deprecated==1.2.14\n", + "dill==0.3.8\n", + "distro==1.9.0\n", + "executing==2.1.0\n", + "fastapi==0.100.1\n", + "fastapi-socketio==0.0.10\n", + "filelock==3.16.1\n", + "filetype==1.2.0\n", + "frozenlist==1.4.1\n", + "fsspec==2024.6.1\n", + "googleapis-common-protos==1.65.0\n", + "grpcio==1.66.1\n", + "grpcio-tools==1.62.3\n", + "h11==0.14.0\n", + "h2==4.1.0\n", + "hpack==4.0.0\n", + "httpcore==0.17.3\n", + "httpx==0.24.1\n", + "huggingface-hub==0.25.0\n", + "hyperframe==6.0.1\n", + "idna==3.10\n", + "importlib_metadata==8.4.0\n", + "ipykernel==6.29.5\n", + "ipython==8.27.0\n", + "jedi==0.19.1\n", + "Jinja2==3.1.4\n", + "jiter==0.5.0\n", + "joblib==1.4.2\n", + "jsonpatch==1.33\n", + "jsonpointer==3.0.0\n", + "jupyter_client==8.6.3\n", + "jupyter_core==5.7.2\n", + "langchain==0.3.0\n", + "langchain-community==0.3.0\n", + "langchain-core==0.3.5\n", + "langchain-experimental==0.3.0\n", + "langchain-huggingface==0.1.0\n", + "langchain-openai==0.2.0\n", + "langchain-qdrant==0.1.4\n", + "langchain-text-splitters==0.3.0\n", + "langsmith==0.1.125\n", + "Lazify==0.4.0\n", + "MarkupSafe==2.1.5\n", + "marshmallow==3.22.0\n", + "matplotlib-inline==0.1.7\n", + "mpmath==1.3.0\n", + "multidict==6.1.0\n", + "multiprocess==0.70.16\n", + "mypy-extensions==1.0.0\n", + "nest-asyncio==1.6.0\n", + "networkx==3.3\n", + "numpy==1.26.4\n", + "openai==1.44.1\n", + "opentelemetry-api==1.27.0\n", + "opentelemetry-exporter-otlp==1.27.0\n", + "opentelemetry-exporter-otlp-proto-common==1.27.0\n", + "opentelemetry-exporter-otlp-proto-grpc==1.27.0\n", + "opentelemetry-exporter-otlp-proto-http==1.27.0\n", + "opentelemetry-instrumentation==0.48b0\n", + "opentelemetry-proto==1.27.0\n", + "opentelemetry-sdk==1.27.0\n", + "opentelemetry-semantic-conventions==0.48b0\n", + "orjson==3.10.7\n", + "packaging==23.2\n", + "pandas==2.2.3\n", + "parso==0.8.4\n", + "pexpect==4.9.0\n", + "pillow==10.4.0\n", + "platformdirs==4.3.6\n", + "portalocker==2.10.1\n", + "prompt_toolkit==3.0.47\n", + "protobuf==4.25.5\n", + "psutil==6.0.0\n", + "ptyprocess==0.7.0\n", + "pure_eval==0.2.3\n", + "pyarrow==17.0.0\n", + "pydantic==2.9.2\n", + "pydantic-settings==2.5.2\n", + "pydantic_core==2.23.4\n", + "Pygments==2.18.0\n", + "PyJWT==2.9.0\n", + "PyMuPDF==1.24.10\n", + "pymupdf4llm==0.0.17\n", + "PyMuPDFb==1.24.10\n", + "pypdf==4.3.1\n", + "pysbd==0.3.4\n", + "python-dateutil==2.9.0.post0\n", + "python-dotenv==1.0.1\n", + "python-engineio==4.9.1\n", + "python-graphql-client==0.4.3\n", + "python-multipart==0.0.6\n", + "python-socketio==5.11.4\n", + "pytz==2024.2\n", + "PyYAML==6.0.2\n", + "pyzmq==26.2.0\n", + "qdrant-client==1.11.2\n", + "ragas==0.1.19\n", + "regex==2024.9.11\n", + "requests==2.32.3\n", + "safetensors==0.4.5\n", + "scikit-learn==1.5.2\n", + "scipy==1.14.1\n", + "sentence-transformers==3.1.1\n", + "simple-websocket==1.0.0\n", + "six==1.16.0\n", + "sniffio==1.3.1\n", + "SQLAlchemy==2.0.35\n", + "stack-data==0.6.3\n", + "starlette==0.27.0\n", + "sympy==1.13.3\n", + "syncer==2.0.3\n", + "tenacity==8.5.0\n", + "threadpoolctl==3.5.0\n", + "tiktoken==0.7.0\n", + "tokenizers==0.19.1\n", + "tomli==2.0.1\n", + "torch==2.4.1\n", + "tornado==6.4.1\n", + "tqdm==4.66.5\n", + "traitlets==5.14.3\n", + "transformers==4.44.2\n", + "typing-inspect==0.9.0\n", + "typing_extensions==4.12.2\n", + "tzdata==2024.1\n", + "uptrace==1.26.0\n", + "urllib3==2.2.3\n", + "uvicorn==0.23.2\n", + "watchfiles==0.20.0\n", + "wcwidth==0.2.13\n", + "websockets==13.1\n", + "wrapt==1.16.0\n", + "wsproto==1.2.0\n", + "xxhash==3.5.0\n", + "yarl==1.11.1\n", + "zipp==3.20.2\n" + ] + } + ], + "source": [ + "!pip freeze\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "from uuid import uuid4\n", + "\n", + "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", + "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangChain API Key:\")\n", + "\n", + "os.environ[\"LANGCHAIN_PROJECT\"] = \"AIM-SDG-MidTerm - AI Safety\"\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", + "\n", + "os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pdfloader import PDFLoaderWrapper\n", + "from langchain_experimental.text_splitter import SemanticChunker\n", + "\n", + "BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n", + "NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n", + "SMALL_DOC = \"https://arxiv.org/pdf/1908.10084\" \n", + "documents_to_preload = [\n", + " BOR_FILE_PATH,\n", + " NIST_FILE_PATH\n", + " # SMALL_DOC\n", + "]\n", + "\n", + "pdf_loader = PDFLoaderWrapper(\n", + " documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n", + ")\n", + "documents = await pdf_loader.aload()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print (\"Importing packages\")\n", + "from ragas.testset.generator import TestsetGenerator\n", + "from ragas.testset.evolutions import simple, reasoning, multi_context\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", + "from ragas.testset.docstore import Document, DocumentStore,InMemoryDocumentStore\n", + "from langchain_experimental.text_splitter import SemanticChunker\n", + "from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline\n", + "from ragas.testset.extractor import KeyphraseExtractor\n", + "\n", + "print (\"Packages import complete\")\n", + "print (\"Getting the Embedding model from Huggingface\")\n", + "# Using best performing embedding model from hugging face to generate quality dataset.\n", + "# Need GPU\n", + "model_name = \"Snowflake/snowflake-arctic-embed-l\"\n", + "embedding_model = HuggingFaceEmbeddings(model_name=model_name)\n", + "print (\"Embedding model loaded\")\n", + "\n", + "print (\"Splitting the documents into semantic chunks\")\n", + "text_splitter = SemanticChunker(embedding_model, breakpoint_threshold_type=\"percentile\",breakpoint_threshold_amount=90)\n", + "chunked_docs = text_splitter.split_documents(documents)\n", + "\n", + "print (\"Creating the document store for ragas and loading LLM models\")\n", + "generator_llm = ChatOpenAI(model=\"gpt-4o-mini\")\n", + "critic_llm = ChatOpenAI(model=\"gpt-4o\")\n", + "\n", + "keyphrase_extractor = KeyphraseExtractor(llm=generator_llm)\n", + "docstore = InMemoryDocumentStore(splitter=text_splitter,extractor=keyphrase_extractor, embeddings=embedding_model)\n", + "\n", + "\n", + "print (\"Creating the testset generator\")\n", + "generator = TestsetGenerator.from_langchain( # Default uses TokenTextSplitter\n", + " generator_llm=generator_llm,\n", + " critic_llm=critic_llm,\n", + " embeddings=embedding_model,\n", + " docstore=docstore # Document store uses SemenaticChunker\n", + ")\n", + "\n", + "distributions = {\n", + " simple: 0.5,\n", + " multi_context: 0.3,\n", + " reasoning: 0.2\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tests_per_doc = 2 \n", + "test_size = tests_per_doc * len(documents)\n", + "\n", + "testset = generator.generate_with_langchain_docs(\n", + " documents, \n", + " test_size, \n", + " distributions, \n", + " with_debugging_logs=True\n", + ") # Default RunConfig(max_retries=15, max_wait=90)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testset.to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langsmith import Client\n", + "\n", + "client = Client()\n", + "\n", + "dataset_name = \"AI Safety\"\n", + "\n", + "dataset = client.create_dataset(\n", + " dataset_name=dataset_name,\n", + " description=\"Questions about AI Safety\"\n", + ")\n", + "\n", + "for test in testset.to_pandas().iterrows():\n", + " client.create_example(\n", + " inputs={\n", + " \"question\": test[1][\"question\"]\n", + " },\n", + " outputs={\n", + " \"answer\": test[1][\"ground_truth\"]\n", + " },\n", + " metadata={\n", + " \"context\": test[0]\n", + " },\n", + " dataset_id=dataset.id\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create Rag chain to generate answers for above questions in the dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> Note that we are usig Qdrant cloud where the pdf document is processed and saved for us to consume. For the RAG pipeline we use the same embedding model originally used to populate the Qdrant vectorstore." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_qdrant import QdrantVectorStore\n", + "from langchain_core.documents import Document\n", + "from qdrant_client import QdrantClient\n", + "from qdrant_client.http.models import Distance, VectorParams\n", + "\n", + "dimension = 1024\n", + "collection_name = \"ai-safety-sr-arctic-embed-l-semantic\"\n", + "qdrant_server = \"https://500cb0e8-ea08-4662-b4f2-3eca11e635da.europe-west3-0.gcp.cloud.qdrant.io:6333\"\n", + "qdrant_client = QdrantClient(url=qdrant_server,api_key=os.environ[\"QDRANT_API_KEY\"])\n", + "qdrant_client.create_collection(\n", + " collection_name=collection_name,\n", + " vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n", + ")\n", + "\n", + "vector_store = QdrantVectorStore(\n", + " client=qdrant_client,\n", + " collection_name=collection_name,\n", + " embedding=embedding_model,\n", + ")\n", + "\n", + "retriever = vector_store.as_retriever()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "\n", + "RAG_PROMPT = \"\"\"\\\n", + "Given a provided context and question, you must answer the question based only on context.\n", + "\n", + "If you cannot answer the question based on the context - you must say \"I don't know\".\n", + "\n", + "Context: {context}\n", + "Question: {question}\n", + "\"\"\"\n", + "\n", + "rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import ChatOpenAI\n", + "\n", + "# Using the same model used in the app.\n", + "chat_model_name = \"gpt-4o\"\n", + "llm = ChatOpenAI(model=chat_model_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from operator import itemgetter\n", + "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", + "from langchain.schema import StrOutputParser\n", + "\n", + "ai_safety_rag_chain = (\n", + " {\"context\": itemgetter(\"question\") | retriever, \"question\": itemgetter(\"question\")}\n", + " | rag_prompt | llm | StrOutputParser()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ai_safety_rag_chain.invoke({\"question\" : \"What steps can organizations take to minimize bias in AI models?\"})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LangSmith Evaluation setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langsmith.evaluation import LangChainStringEvaluator, evaluate\n", + "\n", + "eval_llm = ChatOpenAI(model=\"gpt-4o\")\n", + "\n", + "# Evaluators\n", + "qa_evaluator = LangChainStringEvaluator(\"qa\", config={\"llm\" : eval_llm})\n", + "\n", + "# Faithfulness Evaluator\n", + "# Checks whether the generated answer is faithful to the provided source material or context.\n", + "faithfulness_evaluator = LangChainStringEvaluator(\n", + " \"criteria\",\n", + " config={\n", + " \"criteria\": {\n", + " \"faithfulness\": (\n", + " \"Is the answer faithful to the given context?\"\n", + " )\n", + " },\n", + " \"llm\": eval_llm\n", + " },\n", + " prepare_data=lambda run, example: {\n", + " \"prediction\": run.outputs[\"output\"],\n", + " \"reference\": example.outputs[\"answer\"],\n", + " \"input\": example.inputs[\"question\"],\n", + " }\n", + ")\n", + "\n", + "# Answer Relevancy Evaluator\n", + "# Determines whether the answer is relevant to the user's question.\n", + "answer_relevancy_evaluator = LangChainStringEvaluator(\n", + " \"criteria\",\n", + " config={\n", + " \"criteria\": {\n", + " \"relevancy\": (\n", + " \"Does the answer address the question and provide relevant information?\"\n", + " )\n", + " },\n", + " \"llm\": eval_llm\n", + " },\n", + " prepare_data=lambda run, example: {\n", + " \"prediction\": run.outputs[\"output\"],\n", + " \"reference\": example.outputs[\"answer\"],\n", + " \"input\": example.inputs[\"question\"],\n", + " }\n", + ")\n", + "\n", + "# Context Precision Evaluator\n", + "# Evaluates how precisely the answer uses information from the given context.\n", + "context_precision_evaluator = LangChainStringEvaluator(\n", + " \"criteria\",\n", + " config={\n", + " \"criteria\": {\n", + " \"context_precision\": (\n", + " \"Does the answer precisely use information from the provided context?\"\n", + " )\n", + " },\n", + " \"llm\": eval_llm\n", + " },\n", + " prepare_data=lambda run, example: {\n", + " \"prediction\": run.outputs[\"output\"],\n", + " \"reference\": example.outputs[\"answer\"],\n", + " \"input\": example.inputs[\"question\"],\n", + " }\n", + ")\n", + "\n", + "# Context Recall Evaluator\n", + "# Determines if the answer recalls all the necessary and relevant information from the context.\n", + "context_recall_evaluator = LangChainStringEvaluator(\n", + " \"criteria\",\n", + " config={\n", + " \"criteria\": {\n", + " \"context_recall\": (\n", + " \"Does the answer recall all relevant information from the provided context?\"\n", + " )\n", + " },\n", + " \"llm\": eval_llm\n", + " },\n", + " prepare_data=lambda run, example: {\n", + " \"prediction\": run.outputs[\"output\"],\n", + " \"reference\": example.outputs[\"answer\"],\n", + " \"input\": example.inputs[\"question\"],\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "evaluate(\n", + " ai_safety_rag_chain.invoke,\n", + " data=dataset_name,\n", + " evaluators=[\n", + " qa_evaluator,\n", + " faithfulness_evaluator,\n", + " answer_relevancy_evaluator,\n", + " context_precision_evaluator,\n", + " context_recall_evaluator\n", + " ],\n", + " metadata={\"revision_id\": \"ai_safety_rag_chain\"},\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/Task 4/MtebRanking.png b/Tasks/Task 4/MtebRanking.png new file mode 100644 index 0000000000000000000000000000000000000000..e78f78bf3cb3a3f2687713ca9a0c485e736465bd Binary files /dev/null and b/Tasks/Task 4/MtebRanking.png differ diff --git a/Tasks/Task 4/colab-task4-finetuning-os-embed.ipynb b/Tasks/Task 4/colab-task4-finetuning-os-embed.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1d3fb560a0356ccef8a49ba5f1f0c7774f13072c --- /dev/null +++ b/Tasks/Task 4/colab-task4-finetuning-os-embed.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":44476,"status":"ok","timestamp":1727124155293,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"QwoAcm55nM2V","outputId":"ac34c229-241f-4a3b-e548-6073f244f3c6"},"outputs":[{"name":"stdout","output_type":"stream","text":["\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m50.4/50.4 kB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m981.5/981.5 kB\u001b[0m \u001b[31m47.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m290.2/290.2 kB\u001b[0m \u001b[31m23.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m397.0/397.0 kB\u001b[0m \u001b[31m27.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m45.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m472.8/472.8 kB\u001b[0m \u001b[31m26.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.5/1.5 MB\u001b[0m \u001b[31m50.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m3.5/3.5 MB\u001b[0m \u001b[31m63.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m185.7/185.7 kB\u001b[0m \u001b[31m16.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m38.0/38.0 MB\u001b[0m \u001b[31m17.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m15.9/15.9 MB\u001b[0m \u001b[31m30.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m51.5/51.5 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m54.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m74.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m27.0/27.0 MB\u001b[0m \u001b[31m64.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m207.2/207.2 kB\u001b[0m \u001b[31m16.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m245.3/245.3 kB\u001b[0m \u001b[31m20.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m519.3/519.3 kB\u001b[0m \u001b[31m33.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m10.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m76.4/76.4 kB\u001b[0m \u001b[31m6.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m375.6/375.6 kB\u001b[0m \u001b[31m25.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m12.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m71.1/71.1 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m258.9/258.9 kB\u001b[0m \u001b[31m22.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m56.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m159.9/159.9 kB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m553.2/553.2 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m13.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m274.7/274.7 kB\u001b[0m \u001b[31m24.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m92.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m45.3/45.3 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m194.1/194.1 kB\u001b[0m \u001b[31m18.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m82.7/82.7 kB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m80.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m318.9/318.9 kB\u001b[0m \u001b[31m27.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m292.8/292.8 kB\u001b[0m \u001b[31m24.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m54.5/54.5 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m4.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m57.5/57.5 kB\u001b[0m \u001b[31m5.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Building wheel for langdetect (setup.py) ... \u001b[?25l\u001b[?25hdone\n"]}],"source":["%pip install -qU \\\n"," langsmith==0.1.125 \\\n"," langchain_openai \\\n"," langchain_huggingface \\\n"," langchain-core==0.2.41 \\\n"," langchain \\\n"," langchain_community \\\n"," langchain-qdrant==0.1.4 \\\n"," langchain-text-splitters \\\n"," langchain-openai \\\n"," langchain_huggingface \\\n"," faiss-cpu \\\n"," langchain-experimental \\\n"," unstructured==0.15.7 \\\n"," python-pptx==1.0.2 \\\n"," nltk==3.9.1 \\\n"," PyMuPDF==1.24.10 \\\n"," ragas==0.1.18 \\\n"," protobuf==3.20.3 \\\n"," pyarrow==14.0.1 \\\n"," fsspec==2024.6.1 \\\n"," sentence_transformers \\\n"," datasets \\\n"," pyarrow==14.0.1\n"]},{"cell_type":"code","execution_count":2,"metadata":{"executionInfo":{"elapsed":909,"status":"ok","timestamp":1727124163339,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"WO5RqdMwnM2X"},"outputs":[],"source":["import nest_asyncio\n","\n","nest_asyncio.apply()"]},{"cell_type":"code","execution_count":3,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":32497,"status":"ok","timestamp":1727124249790,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"wjcm16KRnM2X","outputId":"56116829-a1b3-4b96-a052-50e8f68465a4"},"outputs":[{"name":"stdout","output_type":"stream","text":["LangChain API Key:Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n","OpenAI API Key:Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n","Enter Your Qdrant API Key: Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n","Enter Your Qdrant URL: Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·Ā·\n"]}],"source":["import os\n","import getpass\n","from uuid import uuid4\n","\n","os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n","os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangChain API Key:\")\n","\n","os.environ[\"LANGCHAIN_PROJECT\"] = \"AIM-FINE-TUNING - AI Safety\"\n","os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n","\n","os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")\n","os.environ[\"QDRANT_URL\"] = getpass.getpass(\"Enter Your Qdrant URL: \")\n"]},{"cell_type":"markdown","metadata":{"id":"uxk5A7FQnM2X"},"source":["## Preparing Training documents"]},{"cell_type":"code","execution_count":4,"metadata":{"executionInfo":{"elapsed":431,"status":"ok","timestamp":1727124256029,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"DCrVn1rNnM2Y"},"outputs":[],"source":["from langchain_experimental.text_splitter import SemanticChunker\n","from enum import Enum\n","from typing import List\n","from langchain_community.document_loaders import PyMuPDFLoader\n","from langchain_core.documents import Document\n","import asyncio\n","\n","class PDFLoaderWrapper():\n"," class LoaderType(str, Enum):\n"," PYMUPDF = \"pymupdf\"\n","\n"," def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n"," self.file_path = file_path if isinstance(file_path, list) else [file_path]\n"," self.loader_type = loader_type\n","\n"," async def aload(self) -> List[Document]:\n"," all_docs = []\n"," for file_path in self.file_path:\n"," if self.loader_type == self.LoaderType.PYMUPDF:\n"," try:\n"," loader = PyMuPDFLoader(file_path)\n"," docs = await loader.aload()\n"," all_docs.extend(docs)\n"," except Exception as e:\n"," print(f\"Error loading file {file_path}: {e}\")\n"," continue\n"," return all_docs\n"]},{"cell_type":"code","execution_count":9,"metadata":{"executionInfo":{"elapsed":3975,"status":"ok","timestamp":1727124331388,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"00RzzO1GnM2Y"},"outputs":[],"source":["\n","BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n","NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n","SMALL_DOC = \"https://arxiv.org/pdf/1908.10084\"\n","documents_to_preload = [\n"," BOR_FILE_PATH,\n"," NIST_FILE_PATH\n"," # SMALL_DOC\n","]\n","\n","pdf_loader = PDFLoaderWrapper(\n"," documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n",")\n","documents = await pdf_loader.aload()\n","\n"]},{"cell_type":"code","execution_count":6,"metadata":{"executionInfo":{"elapsed":1088,"status":"ok","timestamp":1727124273722,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"H74RpATynM2Y"},"outputs":[],"source":["from langchain_text_splitters import RecursiveCharacterTextSplitter\n","\n","text_splitter = RecursiveCharacterTextSplitter(\n"," chunk_size = 1024,\n"," chunk_overlap = 50,\n"," length_function = len\n",")"]},{"cell_type":"code","execution_count":10,"metadata":{"executionInfo":{"elapsed":2,"status":"ok","timestamp":1727124340132,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"MLPTaIhGnM2Z"},"outputs":[],"source":["training_documents = text_splitter.split_documents(documents)"]},{"cell_type":"code","execution_count":11,"metadata":{"executionInfo":{"elapsed":1070,"status":"ok","timestamp":1727124357614,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"KUZ0cOvRnM2Z"},"outputs":[],"source":["import uuid\n","\n","id_set = set()\n","\n","for document in training_documents:\n"," id = str(uuid.uuid4())\n"," while id in id_set:\n"," id = uuid.uuid4()\n"," id_set.add(id)\n"," document.metadata[\"id\"] = id"]},{"cell_type":"code","execution_count":12,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":924,"status":"ok","timestamp":1727124367912,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"-C0NFZ7WnM2Z","outputId":"0d5e22e3-dc41-4757-e18e-1a0d55be428b"},"outputs":[{"name":"stdout","output_type":"stream","text":["Training set: 343 documents\n","Validation set: 57 documents\n","Test set: 58 documents\n"]}],"source":["import random\n","\n","total_documents = len(training_documents)\n","\n","# Define the split percentages\n","train_percent = 0.75 # 75% for training\n","val_percent = 0.125 # 12.5% for validation\n","test_percent = 0.125 # 12.5% for testing\n","\n","# Shuffle the documents\n","random.shuffle(training_documents)\n","\n","# Calculate the split indices\n","train_split = int(total_documents * train_percent)\n","val_split = int(total_documents * (train_percent + val_percent))\n","\n","# Split the documents\n","training_split_documents = training_documents[:train_split]\n","val_split_documents = training_documents[train_split:val_split]\n","test_split_documents = training_documents[val_split:]\n","\n","print(f\"Training set: {len(training_split_documents)} documents\")\n","print(f\"Validation set: {len(val_split_documents)} documents\")\n","print(f\"Test set: {len(test_split_documents)} documents\")\n"]},{"cell_type":"markdown","metadata":{"id":"qqW7tWUQnM2Z"},"source":["## Constructing a Fine-Tuning dataset"]},{"cell_type":"code","execution_count":13,"metadata":{"executionInfo":{"elapsed":888,"status":"ok","timestamp":1727124394706,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"n1UQEXjHnM2Z"},"outputs":[],"source":["from langchain_openai import ChatOpenAI\n","from langchain_core.prompts import ChatPromptTemplate\n","\n","qa_chat_model = ChatOpenAI(\n"," model=\"gpt-4o\",\n"," temperature=0\n",")\n","\n","qa_prompt = \"\"\"\\\n","Given the following context, you must generate questions based on only the provided context.\n","Check internet the question that you generate is realistic and asked by online users and\n","include only such questions in the output to be realistic.\n","You are to generate {n_questions} questions which should be provided in the following format:\n","\n","1. QUESTION #1\n","2. QUESTION #2\n","...\n","\n","Context:\n","{context}\n","\"\"\"\n","\n","qa_prompt_template = ChatPromptTemplate.from_template(qa_prompt)\n","\n","question_generation_chain = qa_prompt_template | qa_chat_model"]},{"cell_type":"code","execution_count":14,"metadata":{"executionInfo":{"elapsed":3,"status":"ok","timestamp":1727124402836,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"mQkh-hI1nM2Z"},"outputs":[],"source":["import tqdm\n","def create_questions(documents, n_questions):\n"," questions = {}\n"," relevant_docs = {}\n"," for document in tqdm.tqdm(documents):\n"," document_content = {\"context\" : document.page_content, \"questions\" : []}\n"," questions_generated = question_generation_chain.invoke({\"context\": document.page_content, \"n_questions\": n_questions})\n"," for question in questions_generated.content.split(\"\\n\"):\n"," question_id = str(uuid.uuid4())\n"," questions[question_id] = \"\".join(question.split(\".\")[1:]).strip()\n"," relevant_docs[question_id] = [document.metadata[\"id\"]]\n"," return questions, relevant_docs"]},{"cell_type":"code","execution_count":15,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1042347,"status":"ok","timestamp":1727125448974,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"CYvHbsstnM2a","outputId":"4d9b2bb5-9faf-41ac-8d50-a70c5e5c7f70"},"outputs":[{"name":"stderr","output_type":"stream","text":["100%|ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ| 343/343 [17:21<00:00, 3.04s/it]\n"]},{"data":{"text/plain":["3430"]},"execution_count":15,"metadata":{},"output_type":"execute_result"}],"source":["training_questions, training_relevant_contexts = create_questions(training_split_documents,10)\n","len(training_questions)"]},{"cell_type":"code","execution_count":16,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":3,"status":"ok","timestamp":1727125498523,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"ps3fGySIsyIy","outputId":"2e5fbf9e-f55e-4f85-a483-63d60b3189c8"},"outputs":[{"name":"stdout","output_type":"stream","text":["Data saved to training_questions.json\n"]}],"source":["import json\n","\n","# Specify the filename where you want to save the JSON\n","filename = 'training_questions.json'\n","\n","# Write the dictionary to a JSON file\n","with open(filename, 'w') as json_file:\n"," json.dump(training_questions, json_file)\n","\n","print(f\"Data saved to {filename}\")\n"]},{"cell_type":"code","execution_count":17,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":71083,"status":"ok","timestamp":1727125605219,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"3I2x2S7knM2a","outputId":"f2b78a4a-fe0b-4222-ebc9-e596bdf00bca"},"outputs":[{"name":"stderr","output_type":"stream","text":["100%|ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ| 57/57 [01:10<00:00, 1.24s/it]\n"]},{"data":{"text/plain":["114"]},"execution_count":17,"metadata":{},"output_type":"execute_result"}],"source":["val_questions, val_relevant_contexts = create_questions(val_split_documents,2)\n","len(val_questions)"]},{"cell_type":"code","execution_count":18,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":446,"status":"ok","timestamp":1727125609579,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"Bllm0YqWtGzv","outputId":"7d94dd16-61cc-441b-cd6c-77deb4fcde8e"},"outputs":[{"name":"stdout","output_type":"stream","text":["Data saved to val_questions.json\n"]}],"source":["import json\n","\n","# Specify the filename where you want to save the JSON\n","filename = 'val_questions.json'\n","\n","# Write the dictionary to a JSON file\n","with open(filename, 'w') as json_file:\n"," json.dump(val_questions, json_file)\n","\n","print(f\"Data saved to {filename}\")\n"]},{"cell_type":"code","execution_count":19,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":65631,"status":"ok","timestamp":1727125677944,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"_4HLmlITnM2a","outputId":"4a63669b-f746-46d4-ff29-f48ade7d5e99"},"outputs":[{"name":"stderr","output_type":"stream","text":["100%|ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ| 58/58 [01:05<00:00, 1.12s/it]\n"]},{"data":{"text/plain":["116"]},"execution_count":19,"metadata":{},"output_type":"execute_result"}],"source":["test_questions, test_relevant_contexts = create_questions(test_split_documents,2)\n","len(test_questions)"]},{"cell_type":"code","execution_count":20,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":2,"status":"ok","timestamp":1727125684354,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"YR9uIgUHtLwO","outputId":"032f88d5-0743-4e6e-9777-9c24cee7b750"},"outputs":[{"name":"stdout","output_type":"stream","text":["Data saved to test_questions.json\n"]}],"source":["import json\n","\n","# Specify the filename where you want to save the JSON\n","filename = 'test_questions.json'\n","\n","# Write the dictionary to a JSON file\n","with open(filename, 'w') as json_file:\n"," json.dump(test_questions, json_file)\n","\n","print(f\"Data saved to {filename}\")\n"]},{"cell_type":"code","execution_count":22,"metadata":{"executionInfo":{"elapsed":452,"status":"ok","timestamp":1727125746227,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"TLwNWjtcnM2a"},"outputs":[],"source":["import json\n","\n","training_corpus = {train_item.metadata[\"id\"] : train_item.page_content for train_item in training_split_documents}\n","\n","train_dataset = {\n"," \"questions\" : training_questions,\n"," \"relevant_contexts\" : training_relevant_contexts,\n"," \"corpus\" : training_corpus\n","}\n","\n","with open(\"training_dataset.jsonl\", \"w\") as f:\n"," json.dump(train_dataset, f)\n","\n","val_corpus = {val_item.metadata[\"id\"] : val_item.page_content for val_item in val_split_documents}\n","\n","val_dataset = {\n"," \"questions\" : val_questions,\n"," \"relevant_contexts\" : val_relevant_contexts,\n"," \"corpus\" : val_corpus\n","}\n","\n","with open(\"val_dataset.jsonl\", \"w\") as f:\n"," json.dump(val_dataset, f)\n","\n","train_corpus = {test_item.metadata[\"id\"] : test_item.page_content for test_item in test_split_documents}\n","\n","test_dataset = {\n"," \"questions\" : test_questions,\n"," \"relevant_contexts\" : test_relevant_contexts,\n"," \"corpus\" : train_corpus\n","}\n","\n","with open(\"test_dataset.jsonl\", \"w\") as f:\n"," json.dump(test_dataset, f)"]},{"cell_type":"markdown","metadata":{"id":"aZRalj8xnM2a"},"source":["## Fine-tuning `Snowflake/snowflake-arctic-embed-l`"]},{"cell_type":"code","execution_count":23,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":424,"referenced_widgets":["cd8022aaa9404a10939337bd6e1c38bb","e7d91e070fbb4c59a7c83c1c16c89a3a","099335f2c6cf4e7ca148121d38c42764","59b7c58882e34c9eb838c5cfce10eb80","ad76dfdb5a6148f9aed2927225cd53e1","5f2b4972fe224537b6a488d7f1401bca","0ddef191449543a8983807ab48efa88b","06d67987a0a146efb5792b1e30fc83e8","221017bd5a014bde99a58f744b7c849d","4ca494bf09284ce8925260eccb037538","5546d729b514472e9afb9eb9033244c6","086904037e764ac1a6d9257df68cafaa","d8c3106084b54badb89e30c9b46f056c","b8671fe776264827be1db6ab7ee4f4e4","5442dc5b03f34c72ba8210f8fba28799","939f82c6638548eda8f92c7bc8164957","a35b5d59d9974d459ad3477f4faaba7f","0b7894e5c5d8482185a33cb72d69c041","f3269734f9414c13a2fa9723c2f35790","b91018e2a75d4b029c61e611f8f364cb","5526033b838949e7a20aab86444d5089","0ed6af7c19cf4a2dbfa0b90ecefd49bf","b6e988e0b67046d79bc7d096c3f941aa","ff44ce74e8a54a5fb6ca61606e6ae6f2","b03f7f7e26ac47e2aa4af084c5e3e3bb","1b556db40ab8459d88c689be257900d6","fdc3c61e672a4de89d33b206acf5320b","3aff9c20986845fdaa38eb8fe8affddd","4586772f17524dee97f7b6c45f20667f","fd7703d8bead4df285305942d7feeecb","9440a0e2c13f4102adf4c95da3a4ab77","029a071635644fe0be03b8616dd9e9f7","2fb82da24edc4db381cdef6bedde74ab","33c05991f0244163a09a5e6b9331f025","cd1784eae89e451a8025750847ed6ef1","4fc6c199cbcc45b8a1bd37d8f9fbbeed","5ecae354be0f426291c81f19e2bfa1e1","00d0afb531684ad990141e17d735fdde","f5c2be5d46f04e6d824061b493d20b03","f5268fae9be749b7a05256473c9575c2","5910fc3156bc4195807785dea70d0561","99830a97df904732a0bc416633b3d9d6","811eee9c6b1f40b0931cd979bdcc836b","542e25298f9c4dad842d513e580729f0","89b6159fa92947a282f25d23fa64933f","063b2fb3c1264175839c320cb20540e0","2b5c7fdd9284455d850756a0c7e5a0ac","8154fb8e60f54e35b1951edfb94aeec0","12d52438ca3a45cd9b0b3906005309ea","f1bf959afd114c0da5778504b3dfac3d","c73de0e825ed452b985bbdee42619f1c","41690a7cd102433094de092ffb50d71c","212b6edc6da64aaa81a1391b0a85842b","589b87ab91b64706aa322f3a8ad2b75b","f8bc69007ae54fb0a2eccfc93c83f41b","6cf62d35e3804c9cbc8a75e7b3acf190","ea76a465b6c243269398134dbd294613","ec02a444a9cf473284521bec097b9af3","6e10b4829e3546bd926381c15ba8845a","e83c6cb32f0b4c07953dfc068475b605","a418a3e6152b48ddbde6931ed96a704e","8759483ad1d94043ad524111b2e8be8f","7ed4c1746eed4c0ab62c56486c047534","a98c940d9c164284a475129af41a3886","da69983a92ab46329250fe027115cde1","9e8338c59c13450ca03b2b6c2fbb5263","5a7645ce55ac470ba893e84c68e80bdf","ed4328bc291248bda181f59ecef041c9","c5272bdccaef4fd2883893c757de3de0","708bddc0849d4cf0b4b805d116175184","aeb0c62dbdc244f39c1640702cde4349","c517665a69554416ac467b4b2867dedd","0ae4358a1bfc4c8bac06938b1cc5d677","fb505e3d077341e18d59d8bcc3eb5fe9","8e3ced6e76274954a192160741786b4f","bc3e87e68ab743d9ac18d4aaea17d932","b944421a47d1473492378bd4b85294a5","993af9b12eeb465097dd6ad2ec8f541a","fd6b07049ffd4baea48d98bab00ae91e","6689997c4e8a49b8acc7f1c8d9517be8","894077a1eae9487a9f4a34964099dfbb","13491622b3de4df880aabcf273d6e313","202ce0c76a614dbd82f0f64fc55618f8","b5b3fafd5f97403c8d8a473da84e9163","3985a7dac27942f2b37c1162238af035","95a6b04d532c4cca91b60c87440c808e","348eda1e99ed44b984fc42e11ceb5f22","6d6f3c08bc6d4a7c922ce66d39500574","27abe99a21b6483eab8ad967680d55a6","99e3b3d042fd4b9aabd0c2f93801d644","a312b44e0bb24934ad9e2e8ec7909b25","916a9a3a43e544a5b2eb68ee2eed680b","f8d554b3702847f3a234a14ca0354bd0","28bbd69ad66b447a8fb29b5f5133d8b1","62a62a7f977e4e2db40caf04bae315e9","e89e9331e2534620bd83c71e653052ca","a752d32fcf0046afa6f59d9aaeb19bea","696625b37847449790476516c37c72a9","88f58fdaf0184861949eabf842662ed0","1abfd525ec82401c871d77136c5e9710","36a3dff77fd14e55a9ca2343bf1a0662","05f54ae147f84368b9b9b229d651f372","ebed5481ee7d4833af40c86c9176c397","0d5f11f617c446c3a970f3f8a14d8542","edd40704fe314f3e9b1b4d469de4007c","6c8e86e4dc684183950cb72ec93bc132","993afbad55b0429fba66680948fc3cc8","214d4199769241b7b3a5308a77c0e82c","97b7b8bac3724273b559f807f3e4b63a","a4f06a72f6794a8b8d6d15aec09148c0","5992ffca6d004d93a36f27b1151e6cd7","9bd4c21f75664bbbb3bb7a2924a1696f","aaa254dd30834039876d374499f2d5cd","f785472a2e764df79c4bbe152b6b5c5f","fa832662420b495c8ea2db00a2c76a21","88739c574a6e402e90e11619f7359a4a","35006683857e4f48adb0d9f6249b666a","5d3fee5286ae4b2e8b5f9584b3d8da9a","48224db4e593453a96129ce3b0aecf04","27458abe93644581b06bc2e4b80f580d","3b81c55fb6c443dc96d8fc6342ceda84"]},"executionInfo":{"elapsed":38131,"status":"ok","timestamp":1727125814541,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"DXR3i2aBnM2a","outputId":"6a6c8287-eb72-4f6a-fdea-ebea624c80ce"},"outputs":[{"name":"stderr","output_type":"stream","text":["/usr/local/lib/python3.10/dist-packages/sentence_transformers/cross_encoder/CrossEncoder.py:13: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n"," from tqdm.autonotebook import tqdm, trange\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"cd8022aaa9404a10939337bd6e1c38bb","version_major":2,"version_minor":0},"text/plain":["modules.json: 0%| | 0.00/349 [00:00\n"," \n"," \n"," [17150/17150 1:53:42, Epoch 5/5]\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StepTraining LossValidation LossCosine Accuracy@1Cosine Accuracy@3Cosine Accuracy@5Cosine Accuracy@10Cosine Precision@1Cosine Precision@3Cosine Precision@5Cosine Precision@10Cosine Recall@1Cosine Recall@3Cosine Recall@5Cosine Recall@10Cosine Ndcg@10Cosine Mrr@10Cosine Map@100Dot Accuracy@1Dot Accuracy@3Dot Accuracy@5Dot Accuracy@10Dot Precision@1Dot Precision@3Dot Precision@5Dot Precision@10Dot Recall@1Dot Recall@3Dot Recall@5Dot Recall@10Dot Ndcg@10Dot Mrr@10Dot Map@100
50No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
100No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
150No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
200No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
250No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
300No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
350No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
400No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
450No logNo log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
5000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
5500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
6000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
6500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
7000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
7500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
8000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
8500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
9000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
9500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.4226410.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4790930.4057890.422641
10000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
10500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
11000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
11500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
12000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
12500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
13000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
13500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
14000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
14500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
15000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
15500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
16000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
16500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
17000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
17500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
18000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
18500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
19000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
19500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
20000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
20500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
21000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
21500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
22000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
22500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
23000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
23500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
24000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
24500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
25000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
25500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
26000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
26500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.4227390.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4791980.4058860.422739
27000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
27500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
28000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
28500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
29000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
29500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
30000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
30500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
31000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
31500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
32000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
32500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
33000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
33500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
34000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
34300.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
34500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
35000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
35500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
36000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
36500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
37000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
37500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
38000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
38500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
39000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
39500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
40000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
40500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
41000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.4226590.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4766620.4050090.422659
41500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
42000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
42500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
43000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
43500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
44000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
44500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
45000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
45500.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
46000.000000No log0.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.4230980.2807020.4649120.5350880.7105260.2807020.1549710.1070180.0710530.2807020.4649120.5350880.7105260.4770460.4054480.423098
46500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
47000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
47500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
48000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
48500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
49000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
49500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
50000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
50500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
51000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
51500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
52000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
52500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232030.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423203
53000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
53500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
54000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
54500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
55000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
55500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
56000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
56500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
57000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
57500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
58000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
58500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
59000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
59500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
60000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.4232510.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4795820.4063250.423251
60500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
61000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
61500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
62000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
62500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
63000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
63500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
64000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
64500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
65000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
65500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
66000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
66500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
67000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
67500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
68000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
68500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
68600.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
69000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
69500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
70000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
70500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
71000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
71500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
72000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
72500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
73000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
73500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
74000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
74500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
75000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
75500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
76000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
76500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
77000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
77500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
78000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
78500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
79000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
79500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
80000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
80500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
81000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
81500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
82000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
82500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
83000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
83500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
84000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
84500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
85000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
85500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
86000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
86500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
87000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
87500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
88000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
88500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
89000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
89500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
90000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
90500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
91000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
91500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
92000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
92500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
93000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
93500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
94000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
94500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
95000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
95500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
96000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
96500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
97000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
97500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
98000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
98500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
99000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
99500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
100000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
100500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
101000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
101500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
102000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
102500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
102900.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
103000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
103500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
104000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
104500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
105000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
105500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
106000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
106500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
107000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
107500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
108000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
108500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
109000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
109500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
110000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
110500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
111000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4233730.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423373
111500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
112000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
112500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
113000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
113500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
114000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
114500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
115000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
115500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
116000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
116500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
117000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
117500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
118000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234290.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423429
118500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
119000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
119500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
120000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
120500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
121000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
121500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
122000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
122500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
123000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
123500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
124000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4234860.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423486
124500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
125000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
125500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
126000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
126500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
127000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
127500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
128000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
128500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
129000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
129500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
130000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
130500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
131000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
131500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
132000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
132500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
133000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
133500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
134000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
134500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
135000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
135500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
136000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
136500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
137000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
137200.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
137500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
138000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
138500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
139000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
139500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235420.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423542
140000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
140500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
141000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
141500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
142000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
142500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
143000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
143500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
144000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
144500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
145000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
145500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
146000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
146500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
147000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
147500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
148000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
148500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
149000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
149500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
150000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
150500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
151000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
151500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
152000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
152500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
153000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
153500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
154000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
154500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
155000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
155500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
156000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
156500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
157000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
157500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
158000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
158500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
159000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
159500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
160000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
160500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
161000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
161500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
162000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
162500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
163000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
163500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
164000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
164500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
165000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
165500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
166000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
166500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
167000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
167500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
168000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
168500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
169000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
169500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
170000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
170500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
171000.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568
171500.000000No log0.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.4235680.2807020.4649120.5350880.7192980.2807020.1549710.1070180.0719300.2807020.4649120.5350880.7192980.4797090.4064470.423568

"],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["warmup_steps = int(len(loader) * EPOCHS * 0.1)\n","\n","model.fit(\n"," train_objectives=[(loader, train_loss)],\n"," epochs=EPOCHS,\n"," warmup_steps=warmup_steps,\n"," output_path='finetuned_arctic-embedd-l',\n"," show_progress_bar=True,\n"," evaluator=evaluator,\n"," evaluation_steps=50,\n",")"]},{"cell_type":"code","execution_count":28,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":145,"referenced_widgets":["6402cdd0e7b34f84b8c7fef34cff51d3","37820e89028e450d9b3eb95614386930","9f0bedc74ee2481fb88b093eb23a1458","ff6e1c52ad0c406da08c604a934e7aa1","3a54317901864310830a65a2b38d3356","01c526f4dc91484b92cac4ce649259c2","13e502c9d36c4651a4b8b778f3abf147","656ca2910be84392b13462e3596954e2","5d57068353544e5dad585b56c75cf541","97a14f355075450fa5978b6f9c729419","7e1918b2ea1a4a0eaf2af9eccbaba5fd","c486ad7a260640d5a1c45f518c4e0469","9c5c7ecc51c84c86b7c212a501b9fac5","8eb8aa6a63944c66b71956fe70338e95","1787c966e863494ba89d3150c008bcab","bb6d31017c334cd995fd41e75eeb37fb","98ccdf3dcfe74921ba5d58b52f4dcc9d","14fb81c6beef4d96afd21598f3acc376","b4109464f1834a7dae43f0116f6569fe","dc0460abcdb74448b491f23583ae4766","00afb28cca0243cab2cf00fed4ffb6e7","43ae71619843486e8f2ec6ac5d1274b9","b0d0f04d53674701bbda50dd8b11ba84","350fd98bf967435fae8823f2c084f869","4f6cc0f0c6f841c58f0075c749392328","db7636673d404cafb8de516908734ab9","8695bb0563b648a188829e2e49560feb","6ef9fe1c9e9846d6a28ec690b4ccecb5","9214bf7d49d44c52a40d05cd8fb7587d","0c6657312b014603824252f540fde744","8949ccfc710348a6b8c59bbf49439253","4ad530c43d934970bad9576481fd947f"]},"executionInfo":{"elapsed":448,"status":"ok","timestamp":1727133360275,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"_mOaTlgEJmxa","outputId":"bbaef0ff-b6e4-4ad9-a2af-8f7242b2ae69"},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"6402cdd0e7b34f84b8c7fef34cff51d3","version_major":2,"version_minor":0},"text/plain":["VBox(children=(HTML(value='

\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsground_truthevolution_typemetadataepisode_done
0What is the focus of the 2014 Federal Trade Co...[ENDNOTES\\n57. ISO Technical Management Board....The answer to given question is not present in...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
1What adverse impacts should be assessed during...[32 \\nMEASURE 2.6: The AI system is evaluated ...Adverse impacts, including health and wellbein...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
2What are the expectations for automated system...[DATA PRIVACY \\nWHAT SHOULD BE EXPECTED OF AUT...The expectations for automated systems in term...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
3How should automated systems protect the publi...[SAFE AND EFFECTIVE \\nSYSTEMS \\nWHAT SHOULD BE...Automated systems should protect the public fr...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
4How can integrating pre- and post-deployment e...[While indirect feedback methods such as autom...Integrating pre- and post-deployment external ...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"],"text/plain":[" question \\\n","0 What is the focus of the 2014 Federal Trade Co... \n","1 What adverse impacts should be assessed during... \n","2 What are the expectations for automated system... \n","3 How should automated systems protect the publi... \n","4 How can integrating pre- and post-deployment e... \n","\n"," contexts \\\n","0 [ENDNOTES\\n57. ISO Technical Management Board.... \n","1 [32 \\nMEASURE 2.6: The AI system is evaluated ... \n","2 [DATA PRIVACY \\nWHAT SHOULD BE EXPECTED OF AUT... \n","3 [SAFE AND EFFECTIVE \\nSYSTEMS \\nWHAT SHOULD BE... \n","4 [While indirect feedback methods such as autom... \n","\n"," ground_truth evolution_type \\\n","0 The answer to given question is not present in... simple \n","1 Adverse impacts, including health and wellbein... simple \n","2 The expectations for automated systems in term... simple \n","3 Automated systems should protect the public fr... simple \n","4 Integrating pre- and post-deployment external ... simple \n","\n"," metadata episode_done \n","0 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","1 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","2 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","3 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","4 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True "]},"execution_count":61,"metadata":{},"output_type":"execute_result"}],"source":["testset.to_pandas().head()"]},{"cell_type":"code","execution_count":62,"metadata":{"executionInfo":{"elapsed":1429,"status":"ok","timestamp":1727144285447,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"9LnOHMW_nM2f"},"outputs":[],"source":["from datasets import Dataset\n","\n","def generate_answers(chain, testset):\n"," answers = []\n"," contexts = []\n"," questions = testset.to_pandas()[\"question\"].values.tolist()\n"," ground_truths = testset.to_pandas()[\"ground_truth\"].values.tolist()\n","\n"," for question in tqdm.tqdm(questions):\n"," answer = chain.invoke({\"question\" : question})\n"," answers.append(answer[\"response\"])\n"," contexts.append([context.page_content for context in answer[\"context\"]])\n","\n"," return Dataset.from_dict({\n"," \"question\" : questions,\n"," \"answer\" : answers,\n"," \"contexts\" : contexts,\n"," \"ground_truth\" : ground_truths\n"," })"]},{"cell_type":"code","execution_count":63,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":76657,"status":"ok","timestamp":1727144386762,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"UQGx4xWSnM2f","outputId":"fd21e57a-60ed-4c6f-d288-eb0353ce87bb"},"outputs":[{"name":"stderr","output_type":"stream","text":["100%|ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ| 20/20 [00:40<00:00, 2.04s/it]\n","100%|ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ| 20/20 [00:35<00:00, 1.77s/it]\n"]}],"source":["base_dataset = generate_answers(base_rag_chain, testset)\n","finetune_dataset = generate_answers(fine_tuned_rag_chain, testset)"]},{"cell_type":"code","execution_count":64,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":86,"referenced_widgets":["e622bf4802484e4180e47c25887d6826","2088a9863ab841008c09874e754ef8bd","e6c325123e3d443d85cdbca446883658","b959b621ed974f7680cb91cb4119ae14","376d7493e3c947bfb522e9a6d34e40f6","eaded34e0186406ea1fb991b31f25a08","6aeb3dd342ee4678adf99dde015aeb7e","adff481f63e74e7d860be13e7457cea7","a45be662c59d4faf892e6980b86deaba","1d2b5d5c680b41438f407102c6bc3283","205fefe263fa471d8d6037d509dd6488"]},"executionInfo":{"elapsed":48870,"status":"ok","timestamp":1727144442447,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"dmONclienM2f","outputId":"96a03404-0a19-42a3-f570-a25dab634b95"},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"e622bf4802484e4180e47c25887d6826","version_major":2,"version_minor":0},"text/plain":["Evaluating: 0%| | 0/80 [00:00\\n\\n502 Server Error\\n\\n\\n

Error: Server Error

\\n

The server encountered a temporary error and could not complete your request.

Please try again in 30 seconds.

\\n

\\n\\n')\n"]}],"source":["from ragas.metrics import (\n"," faithfulness,\n"," answer_relevancy,\n"," context_recall,\n"," context_precision,\n",")\n","from ragas import evaluate\n","\n","base_result = evaluate(\n"," base_dataset,\n"," metrics=[\n"," faithfulness,\n"," answer_relevancy,\n"," context_recall,\n"," context_precision,\n"," ],\n",")"]},{"cell_type":"code","execution_count":65,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":1022,"status":"ok","timestamp":1727144448564,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"2QjuTOntnM2f","outputId":"6dbba069-b8d1-44e8-cd2e-fa7f7e51e58d"},"outputs":[{"data":{"text/plain":["{'faithfulness': 0.8086, 'answer_relevancy': 0.6274, 'context_recall': 0.6000, 'context_precision': 0.5127}"]},"execution_count":65,"metadata":{},"output_type":"execute_result"}],"source":["base_result"]},{"cell_type":"code","execution_count":66,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":449},"executionInfo":{"elapsed":970,"status":"ok","timestamp":1727144456465,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"_qRPowKznM2f","outputId":"2f535278-9586-4676-fbef-443b801bda36"},"outputs":[{"name":"stderr","output_type":"stream","text":["/usr/local/lib/python3.10/dist-packages/datasets/table.py:1395: FutureWarning: promote has been superseded by mode='default'.\n"," block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n"," table = cls._concat_blocks(blocks, axis=0)\n"]},{"data":{"application/vnd.google.colaboratory.intrinsic+json":{"summary":"{\n \"name\": \"base_result\",\n \"rows\": 5,\n \"fields\": [\n {\n \"column\": \"question\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"What adverse impacts should be assessed during the training and maintenance of the AI system?\",\n \"How can integrating pre- and post-deployment external feedback enhance the monitoring process for GAI models?\",\n \"What are the expectations for automated systems in terms of data privacy and surveillance?\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"contexts\",\n \"properties\": {\n \"dtype\": \"object\",\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answer\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"The adverse impacts that should be assessed during the training and maintenance of the AI system include:\\n\\n1. **Algorithmic Discrimination**: This involves evaluating the system for any resulting algorithmic discrimination, which includes differential demographic impact and resulting error rates (overall and per demographic group).\\n\\n2. **Data Quality Issues**: This includes assessing any data used to train machine learning models or for other purposes, identifying missing, incomplete, or erroneous data, and providing data relevancy justifications.\\n\\n3. **Risk Identification and Management**: This involves conducting risk identification and management assessments and taking steps to mitigate potential harms.\\n\\n4. **Performance Testing**: This includes the results of performance testing such as accuracy, differential demographic impact, and comparisons to previously deployed systems.\\n\\n5. **Ongoing Monitoring**: This involves ongoing monitoring procedures and regular performance testing reports, including monitoring frequency.\\n\\nThese assessments help ensure that the AI system operates fairly, accurately, and without unintended harmful consequences.\",\n \"Integrating pre- and post-deployment external feedback into the monitoring process for Generative Artificial Intelligence (GAI) models can enhance the process by increasing awareness of performance changes and mitigating potential risks and harms from outputs. This feedback can provide valuable insights into authentication efficacy, vulnerabilities, impacts of adversarial threats, and unintended consequences resulting from the utilization of content provenance approaches on users. By capturing and utilizing this feedback, organizations can better understand and address issues that may arise, thereby improving the overall reliability and safety of GAI systems.\",\n \"The provided context does not explicitly detail the expectations for automated systems in terms of data privacy and surveillance. Therefore, I do not know the answer based on the given information.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"ground_truth\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"Adverse impacts, including health and wellbeing impacts for value chain or other AI Actors that are exposed to sexually explicit, offensive, or violent information during GAI training and maintenance, should be assessed.\",\n \"Integrating pre- and post-deployment external feedback into the monitoring process for GAI models and corresponding applications can help enhance awareness of performance changes and mitigate potential risks and harms from outputs.\",\n \"The expectations for automated systems in terms of data privacy and surveillance include protecting the public from unchecked surveillance through heightened oversight. Surveillance or monitoring systems should be subject to heightened oversight that includes at a minimum assessment of potential harms during design (before deployment) and in an ongoing manner, to ensure that the American public\\u2019s rights, opportunities, and access are protected. Additionally, any system collecting, using, sharing, or storing sensitive data should meet enhanced protections, using sensitive data only for functions strictly necessary for that domain or for administrative reasons, unless consent is acquired and additional expectations are met.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"faithfulness\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.4190802641929825,\n \"min\": 0.0,\n \"max\": 1.0,\n \"num_unique_values\": 4,\n \"samples\": [\n 1.0,\n 0.8181818181818182,\n 0.0\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answer_relevancy\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.4347389817941404,\n \"min\": 0.0,\n \"max\": 1.0000000000000007,\n \"num_unique_values\": 5,\n \"samples\": [\n 0.9981034134190466,\n 0.9339808858594375,\n 0.0\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"context_recall\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.5477225575051662,\n \"min\": 0.0,\n \"max\": 1.0,\n \"num_unique_values\": 2,\n \"samples\": [\n 0.0,\n 1.0\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"context_precision\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.43938087752145943,\n \"min\": 0.0,\n \"max\": 0.9999999999666667,\n \"num_unique_values\": 5,\n \"samples\": [\n 0.9999999999,\n 0.99999999995\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}","type":"dataframe"},"text/html":["\n","
\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsanswerground_truthfaithfulnessanswer_relevancycontext_recallcontext_precision
0What is the focus of the 2014 Federal Trade Co...[Data Privacy; Intellectual \\nProperty, Priori...The focus of the 2014 Federal Trade Commission...The answer to given question is not present in...0.0000001.0000001.00.000000
1What adverse impacts should be assessed during...[organizationā€™s business processes or other ac...The adverse impacts that should be assessed du...Adverse impacts, including health and wellbein...1.0000000.9981030.01.000000
2What are the expectations for automated system...[Data Privacy; Intellectual \\nProperty, detail...The provided context does not explicitly detai...The expectations for automated systems in term...1.0000000.0000000.00.583333
3How should automated systems protect the publi...[detailed steps toward actualizing these princ...Automated systems should protect the public fr...Automated systems should protect the public fr...0.8571430.9473251.01.000000
4How can integrating pre- and post-deployment e...[While indirect feedback methods such as autom...Integrating pre- and post-deployment external ...Integrating pre- and post-deployment external ...0.8181820.9339811.01.000000
\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n","
\n"],"text/plain":[" question \\\n","0 What is the focus of the 2014 Federal Trade Co... \n","1 What adverse impacts should be assessed during... \n","2 What are the expectations for automated system... \n","3 How should automated systems protect the publi... \n","4 How can integrating pre- and post-deployment e... \n","\n"," contexts \\\n","0 [Data Privacy; Intellectual \\nProperty, Priori... \n","1 [organizationā€™s business processes or other ac... \n","2 [Data Privacy; Intellectual \\nProperty, detail... \n","3 [detailed steps toward actualizing these princ... \n","4 [While indirect feedback methods such as autom... \n","\n"," answer \\\n","0 The focus of the 2014 Federal Trade Commission... \n","1 The adverse impacts that should be assessed du... \n","2 The provided context does not explicitly detai... \n","3 Automated systems should protect the public fr... \n","4 Integrating pre- and post-deployment external ... \n","\n"," ground_truth faithfulness \\\n","0 The answer to given question is not present in... 0.000000 \n","1 Adverse impacts, including health and wellbein... 1.000000 \n","2 The expectations for automated systems in term... 1.000000 \n","3 Automated systems should protect the public fr... 0.857143 \n","4 Integrating pre- and post-deployment external ... 0.818182 \n","\n"," answer_relevancy context_recall context_precision \n","0 1.000000 1.0 0.000000 \n","1 0.998103 0.0 1.000000 \n","2 0.000000 0.0 0.583333 \n","3 0.947325 1.0 1.000000 \n","4 0.933981 1.0 1.000000 "]},"execution_count":66,"metadata":{},"output_type":"execute_result"}],"source":["base_result.to_pandas().head()"]},{"cell_type":"code","execution_count":67,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":49,"referenced_widgets":["dc88c7132d8e48eb9db8fd34a3b874f0","4fc26c7d321749ed92bc408d61483a46","43a136e1e1304b638bd70f8a4329cdb1","6f16cf03eeeb464eb527db0e7b325d66","ba9cb2ccc31941d8ad3e150949c17398","1b9110b967264538b0d2bf70f9290469","3eb70a74d6644e60909daa3cbe6fa141","3264339479b2488998eb1a95db90dc7b","72ce3640cd81450791cf9a79a1998b70","2a3888c2c35548198bd6283d239553a0","6d167890f49042618bebe73a926ce6f2"]},"executionInfo":{"elapsed":58408,"status":"ok","timestamp":1727144531287,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"f9SPfkuwnM2f","outputId":"03594cb0-7426-43ca-9d11-f4d82b08b523"},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"dc88c7132d8e48eb9db8fd34a3b874f0","version_major":2,"version_minor":0},"text/plain":["Evaluating: 0%| | 0/80 [00:00\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsanswerground_truthfaithfulnessanswer_relevancycontext_recallcontext_precision
0What is the focus of the 2014 Federal Trade Co...[Data Privacy; Intellectual \\nProperty, Priori...The focus of the 2014 Federal Trade Commission...The answer to given question is not present in...0.5000001.0000001.00.000000
1What adverse impacts should be assessed during...[organizationā€™s business processes or other ac...The adverse impacts that should be assessed du...Adverse impacts, including health and wellbein...1.0000000.9981030.01.000000
2What are the expectations for automated system...[Data Privacy; Intellectual \\nProperty, detail...The provided context does not explicitly detai...The expectations for automated systems in term...0.6666670.0000000.00.588889
3How should automated systems protect the publi...[detailed steps toward actualizing these princ...Automated systems should protect the public fr...Automated systems should protect the public fr...0.9166670.9473251.01.000000
4How can integrating pre- and post-deployment e...[While indirect feedback methods such as autom...Integrating pre- and post-deployment external ...Integrating pre- and post-deployment external ...1.0000000.9339811.01.000000
\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"],"text/plain":[" question \\\n","0 What is the focus of the 2014 Federal Trade Co... \n","1 What adverse impacts should be assessed during... \n","2 What are the expectations for automated system... \n","3 How should automated systems protect the publi... \n","4 How can integrating pre- and post-deployment e... \n","\n"," contexts \\\n","0 [Data Privacy; Intellectual \\nProperty, Priori... \n","1 [organizationā€™s business processes or other ac... \n","2 [Data Privacy; Intellectual \\nProperty, detail... \n","3 [detailed steps toward actualizing these princ... \n","4 [While indirect feedback methods such as autom... \n","\n"," answer \\\n","0 The focus of the 2014 Federal Trade Commission... \n","1 The adverse impacts that should be assessed du... \n","2 The provided context does not explicitly detai... \n","3 Automated systems should protect the public fr... \n","4 Integrating pre- and post-deployment external ... \n","\n"," ground_truth faithfulness \\\n","0 The answer to given question is not present in... 0.500000 \n","1 Adverse impacts, including health and wellbein... 1.000000 \n","2 The expectations for automated systems in term... 0.666667 \n","3 Automated systems should protect the public fr... 0.916667 \n","4 Integrating pre- and post-deployment external ... 1.000000 \n","\n"," answer_relevancy context_recall context_precision \n","0 1.000000 1.0 0.000000 \n","1 0.998103 0.0 1.000000 \n","2 0.000000 0.0 0.588889 \n","3 0.947325 1.0 1.000000 \n","4 0.933981 1.0 1.000000 "]},"execution_count":69,"metadata":{},"output_type":"execute_result"}],"source":["fine_tuned_result.to_pandas().head()"]},{"cell_type":"code","execution_count":71,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":16808,"status":"ok","timestamp":1727144636682,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"vqpjQAua1gJq","outputId":"a77f73cc-3696-44a7-f551-34d9828943e8"},"outputs":[{"name":"stdout","output_type":"stream","text":["Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (3.7.1)\n","Collecting matplotlib\n"," Downloading matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n","Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.3.0)\n","Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (0.12.1)\n","Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (4.53.1)\n","Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.4.7)\n","Requirement already satisfied: numpy>=1.23 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.26.4)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (24.1)\n","Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (10.4.0)\n","Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (3.1.4)\n","Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (2.8.2)\n","Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n","Downloading matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (8.3 MB)\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m8.3/8.3 MB\u001b[0m \u001b[31m101.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hInstalling collected packages: matplotlib\n"," Attempting uninstall: matplotlib\n"," Found existing installation: matplotlib 3.7.1\n"," Uninstalling matplotlib-3.7.1:\n"," Successfully uninstalled matplotlib-3.7.1\n","Successfully installed matplotlib-3.9.2\n"]}],"source":["!pip install -U matplotlib"]},{"cell_type":"code","execution_count":80,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"elapsed":3603,"status":"ok","timestamp":1727145789851,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"Mtm7ufHF17_O","outputId":"45848914-2957-4a15-cb77-e210a5cb953f"},"outputs":[{"name":"stderr","output_type":"stream","text":["/usr/local/lib/python3.10/dist-packages/datasets/table.py:1395: FutureWarning: promote has been superseded by mode='default'.\n"," block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n"," table = cls._concat_blocks(blocks, axis=0)\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1395: FutureWarning: promote has been superseded by mode='default'.\n"," block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n"," table = cls._concat_blocks(blocks, axis=0)\n",":24: UserWarning: FixedFormatter should only be used together with FixedLocator\n"," ax1.set_xticklabels(merged_df['short_question'], rotation=90)\n",":35: UserWarning: FixedFormatter should only be used together with FixedLocator\n"," ax2.set_xticklabels(merged_df['short_question'], rotation=90)\n"]},{"data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAABW0AAASlCAYAAADTW/veAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3hUVfrA8e+dkkmd9E56gNA70otIV8SugIIN9adrX/uua1l7Wde2dqXZBQSVotKLdEFqekJCeq/T7u+Pm5kkJCEJJJkkcz7PwzMzd869cyYJM+e+9z3vkWRZlhEEQRAEQRAEQRAEQRAEQRA6BZW9OyAIgiAIgiAIgiAIgiAIgiDUEkFbQRAEQRAEQRAEQRAEQRCETkQEbQVBEARBEARBEARBEARBEDoREbQVBEEQBEEQBEEQBEEQBEHoRETQVhAEQRAEQRAEQRAEQRAEoRMRQVtBEARBEARBEARBEARBEIRORARtBUEQBEEQBEEQBEEQBEEQOhERtBUEQRAEQRAEQRAEQRAEQehERNBWEARBEARBEARBEARBEAShExFBW0EQBEEQBEFoxubNm5Ekic2bN7d6388//xxJkkhJSWnzfnWEf/3rX0iSZO9uCIIgCIIgOBQRtBUEQRAEQXBA1kBi3X8BAQFMnjyZX375xd7da9KiRYuQJAm9Xk9lZWWD5+Pj423v57XXXrNDD8+fNTiqUqlIT09v8HxJSQkuLi5IksQ999xzXq/xwgsvsGrVqgvsqSAIgiAIgtDeRNBWEARBEATBgT377LMsXbqUJUuW8Mgjj5Cbm8usWbNYu3atvbvWJI1GQ0VFBWvWrGnw3PLly3F2drZDr9qOTqfjyy+/bLD9hx9+uOBjn0/Q9qmnnmo0QC4IgiAIgiC0HxG0FQRBEARBcGAzZ85kwYIF3HjjjTz88MNs27YNrVbbaNCws9DpdEyZMqXRPq5YsYLZs2fboVdtZ9asWZ3ivZWXlwNKkLyrB8IFQRAEQRC6GhG0FQRBEARBEGy8vLxwcXFBo9HU2/7aa68xZswYfH19cXFxYdiwYXz33XcN9t+4cSPjxo3Dy8sLd3d3evfuzRNPPFGvTXV1NU8//TSxsbHodDrCwsJ45JFHqK6ubnE/582bxy+//EJRUZFt2969e4mPj2fevHmN7pOUlMQ111yDj48Prq6ujBo1ip9++qlBu9OnTzN37lzc3NwICAjggQceaLJvf/zxBzNmzMDT0xNXV1cmTpzIjh07Wvw+mnpvhw4d4sSJE7ZtWVlZ/P77702+t5b8TCVJory8nC+++MJWQmLRokVAbWmGY8eOMW/ePLy9vRk3bly95862bNkyRo4ciaurK97e3kyYMIENGzbYnt+3bx/Tp0/Hz88PFxcXoqKiuOWWWy7oZyMIgiAIguAoNM03EQRBEARBELqr4uJi8vLykGWZnJwc3n77bcrKyliwYEG9dm+99RZz5sxh/vz5GAwGvvrqK6655hrWrl1ry/48evQol156KQMHDuTZZ59Fp9ORkJBQL4hpsViYM2cO27dvZ/HixfTp04cjR47w5ptvcurUqRZP3b/yyiu58847+eGHH2yBwBUrVhAXF8fQoUMbtM/OzmbMmDFUVFRw77334uvryxdffMGcOXP47rvvuOKKKwCorKxkypQppKWlce+99xISEsLSpUv5/fffGxzz999/Z+bMmQwbNoynn34alUrFZ599xsUXX8y2bdsYOXJki97L2SZMmECPHj1YsWIFzz77LABff/017u7ujWbatvRnunTpUm677TZGjhzJ4sWLAYiJial3rGuuuYaePXvywgsvIMtyk3185pln+Ne//sWYMWN49tlncXJy4o8//uD3339n2rRp5OTkMG3aNPz9/Xnsscfw8vIiJSWlTUo8CIIgCIIgOARZEARBEARBcDifffaZDDT4p9Pp5M8//7xB+4qKinqPDQaD3L9/f/niiy+2bXvzzTdlQM7NzW3ydZcuXSqrVCp527Zt9bb/73//kwF5x44d5+z3woULZTc3N1mWZfnqq6+Wp0yZIsuyLJvNZjkoKEh+5pln5OTkZBmQX331Vdt+999/vwzUe93S0lI5KipKjoyMlM1msyzLsvyf//xHBuRvvvnG1q68vFyOjY2VAXnTpk2yLMuyxWKRe/bsKU+fPl22WCz1fk5RUVHy1KlTbdusP+vk5ORzvrenn37a9vN7+OGH5djYWNtzI0aMkG+++WZZlmUZkO+++27bc635mbq5uckLFy5s8rVvuOGGJp+zio+Pl1UqlXzFFVfYfm5W1p/FypUrZUDeu3fvOd+zIAiCIAiC0DhRHkEQBEEQBMGBvfvuu2zcuJGNGzeybNkyJk+ezG233dYgI9LFxcV2v7CwkOLiYsaPH8+BAwds2728vABYvXo1Foul0df79ttv6dOnD3FxceTl5dn+XXzxxQBs2rSpxX2fN28emzdvtpUOyMrKarJ8wM8//8zIkSNtU/4B3N3dWbx4MSkpKRw7dszWLjg4mKuvvtrWztXV1ZaZanXo0CFbKYb8/Hzb+ygvL2fKlCls3bq1yZ9BS99bQkICe/futd029d7a8md65513Nttm1apVWCwW/vnPf6JS1T+dsJZRsP4trF27FqPR2OLXFwRBEARBEBSiPIIgCIIgCIIDGzlyJMOHD7c9vuGGGxgyZAj33HMPl156KU5OToASfHv++ec5dOhQgzqpVtdddx0ff/wxt912G4899hhTpkzhyiuv5Oqrr7YF9+Lj4zl+/Dj+/v6N9icnJ6fFfZ81axYeHh58/fXXHDp0iBEjRhAbG0tKSkqDtqmpqVx00UUNtvfp08f2fP/+/UlNTSU2NrZBDdfevXvXexwfHw/AwoULm+xfcXEx3t7eLX4/dQ0ZMoS4uDhWrFiBl5cXQUFBtiDs2dryZxoVFdVsm8TERFQqFX379m2yzcSJE7nqqqt45plnePPNN5k0aRJz585l3rx56HS6FvdHEARBEATBUYmgrSAIgiAIgmCjUqmYPHkyb731FvHx8fTr149t27YxZ84cJkyYwHvvvUdwcDBarZbPPvuMFStW2PZ1cXFh69atbNq0iZ9++ol169bx9ddfc/HFF7NhwwbUajUWi4UBAwbwxhtvNPr6YWFhLe6rTqfjyiuv5IsvviApKYl//etfF/r2W8yaRfvqq68yePDgRtu4u7tf0GvMmzeP999/Hw8PD6677roGWa11+9JWP9O6GdUXQpIkvvvuO3bv3s2aNWtYv349t9xyC6+//jq7d+++4J+NIAiCIAhCdyeCtoIgCIIgCEI9JpMJgLKyMgC+//57nJ2dWb9+fb0syc8++6zBviqViilTpjBlyhTeeOMNXnjhBZ588kk2bdrEJZdcQkxMDH/++SdTpkxpkM16PubNm8enn36KSqXi+uuvb7JdREQEJ0+ebLD9xIkTtuett3/99ReyLNfr39n7Whfw0uv1XHLJJRf8Phozb948/vnPf3LmzBmWLl3aZLvW/Ezb4mceExODxWLh2LFjTQasrUaNGsWoUaP497//zYoVK5g/fz5fffUVt9122wX3QxAEQRAEoTsTNW0FQRAEQRAEG6PRyIYNG3BycrKVDlCr1UiShNlstrVLSUlh1apV9fYtKChocDxrUM9aUuHaa68lIyODjz76qEHbyspKysvLW9XfyZMn89xzz/HOO+8QFBTUZLtZs2axZ88edu3aZdtWXl7Ohx9+SGRkpG2q/6xZs8jMzOS7776ztauoqODDDz+sd7xhw4YRExPDa6+9Zgtu15Wbm9uq99GYmJgY/vOf//Diiy8ycuTIJtu15mfq5uZGUVHRBfVr7ty5qFQqnn322QZ1e2VZBpS6x9b7Vmf/LQiCIAiCIAhNE5m2giAIgiAIDuyXX36xZZvm5OSwYsUK4uPjeeyxx9Dr9QDMnj2bN954gxkzZjBv3jxycnJ49913iY2N5fDhw7ZjPfvss2zdupXZs2cTERFBTk4O7733Hj169LAtAHbjjTfyzTffcOedd7Jp0ybGjh2L2WzmxIkTfPPNN6xfv75ejd3mqFQqnnrqqWbbPfbYY3z55ZfMnDmTe++9Fx8fH7744guSk5P5/vvvbaUHbr/9dt555x1uuukm9u/fT3BwMEuXLsXV1bXB63788cfMnDmTfv36cfPNNxMaGkpGRgabNm1Cr9ezZs2aFr+Pptx3333NtmnNz3TYsGH8+uuvvPHGG4SEhBAVFdVord9ziY2N5cknn+S5555j/PjxXHnlleh0Ovbu3UtISAgvvvgiX3zxBe+99x5XXHEFMTExlJaW8tFHH6HX65k1a9Z5/SwEQRAEQRAciQjaCoIgCIIgOLB//vOftvvOzs7ExcXx/vvvc8cdd9i2X3zxxXzyySe89NJL3H///URFRfHyyy+TkpJSL2g7Z84cUlJS+PTTT8nLy8PPz4+JEyfyzDPP4OnpCSjBzlWrVvHmm2+yZMkSVq5ciaurK9HR0dx333306tWrXd5nYGAgO3fu5NFHH+Xtt9+mqqqKgQMHsmbNGmbPnm1r5+rqym+//cbf/vY33n77bVxdXZk/fz4zZ85kxowZ9Y45adIkdu3aZcv0LSsrIygoiIsuuqjez6+9teZn+sYbb7B48WKeeuopKisrWbhwYauDtqAE6KOionj77bd58skncXV1ZeDAgdx4442AshDZnj17+Oqrr8jOzsbT05ORI0eyfPnyFi12JgiCIAiC4Ogk+ex5S4IgCIIgCIIgCIIgCIIgCILdiJq2giAIgiAIgiAIgiAIgiAInYgI2gqCIAiCIAiCIAiCIAiCIHQiImgrCIIgCIIgCIIgCIIgCILQiYigrSAIgiAIgiAIgiAIgiAIQicigraCIAiCIAiCIAiCIAiCIAidiMbeHehoFouFzMxMPDw8kCTJ3t0RBEEQBEEQBEEQBEEQBMFByLJMaWkpISEhqFRN59M6XNA2MzOTsLAwe3dDEARBEARBEARBEARBEAQHlZ6eTo8ePZp83uGCth4eHoDyg9Hr9XbujSAIgiAIgiAIgiAIgiAIjqKkpISwsDBbjLIpDhe0tZZE0Ov1ImgrCIIgCIIgCIIgCIIgCEKHa65sq1iITBAEQRAEQRAEQRAEQRAEoRMRQVtBEARBEARBEARBEARBEIRORARtBUEQBEEQBEEQBEEQBEEQOhERtBUEQRAEQRAEQRAEQRAEQehERNBWEARBEARBEARBEARBEAShExFBW0EQBEEQBEEQBEEQBEEQhE5EBG0FQRAEQRAEQRAEQRAEQRA6ERG0FQRBEARBEARBEARBEARB6ERE0FYQBEEQBEEQBEEQBEEQBKETEUFbQRAEQRAEQRAEQRAEQRCETkQEbQVBEARBEARBEARBEARBEDoREbQVBEEQBEEQBEEQBEEQBEHoRDT27oDQfgwmEyv+3ExaSRbh+iDmDZqEk8YxfuWy0UDFz0swZaahCQnHddZNSFone3erY1jMkLoTyrLBPRAixoBKbe9edRiT0cDhX7+i9EwaHsHhDLzkejQO8rt35PduMFSzassH5JSkEaAPZ+7EO3By0tm7Wx3CbDJw4MhSckvS8NeHM3TAjag1jvF7d2Ti9+6YTNWVHP7uJUoz0/AICWfg1Y+h0bnYu1sdwpHfu9li5kDOAXIrcvF39WdowFDUDjS2c2gOPK535LGdQ3Pgv3lH/qwX49qmSbIsy/buREcqKSnB09OT4uJi9Hq9vbvTbl7d9i1L4/+LrC6ybZPMXtzY817+Pv4a+3WsA5R8+gLZ7y7FVF67TeMGgXffiP6WJ+zXsY5w7EdY9yiUZNZu04fAjJeh7xz79auDbF/+Gqq3PsO7xGLbVqhXYbnvZsbNf9iOPWt/jvzeP1z9JF/mrSJPUzt5xM9k4Qa/uSy+/N927Fn7+3X7i7x0ajnZasm2LdAs81iv+Vwy7nE79kxoT+L37pi2v3UnqmVb8C6t3VboAZYFExl33//s17EO4Mjv/dfUX3lpz0tkV2TbtgW6BvLYyMe4JOISO/ZMaHcOPK535LGdQ3Pgv3lH/qx31HFtS2OTdi2PsHXrVi677DJCQkKQJIlVq1Y1u8/mzZsZOnQoOp2O2NhYPv/883bvZ1fz6rZv+SLxWSyqonrbLaoivkh8lle3fWufjnWAkk9fIOOVJZjK61+LMJXLZLyyhJJPX7BTzzrAsR/hm5vqf8kBlJxRth/70T796iDbl7+Gz3Of4FUnaAngWWLB57lP2L78NTv1rP058nv/cPWTvFO4mrw6X/IA+WqJdwpX8+HqJ+3Us/b36/YXeTBhOdlnfZPnqODBhOX8uv1F+3RMaFfi9+6Ytr91Jz7vb8GrtP52z1LweX8L29+60z4d6wCO/N5/Tf2VBzc/WO8kHiCnIocHNz/Ir6m/2qlnQrtz4HG9I4/tHJoD/8078me9GNc2z65B2/LycgYNGsS7777bovbJycnMnj2byZMnc+jQIe6//35uu+021q9f38497ToMJhNL4/8LgFT/e872eOmp/2IwmTq4Z+1PNhrIemdJzaOz3nzN46x3lyAbDR3arw5hMStXJWkscb5m27rHlHbdkMloQPXWZ0DD37wK5Seg+u/nmLrh796R37vBUM2XeauUv/CzPvDkmsdf5a3CYKju+M61M7PJwEunlp/zvb98ajlmU/f7vTsy8Xt3TKbqSlTLtgDn+JxftgVTdWVHd63dOfJ7N1vMvLTnJeRGxnbWbS/veRlzNx3bOTQHHtc78tjOoTnw37wjf9aLcW3L2LXA6cyZM5k5c2aL2//vf/8jKiqK119/HYA+ffqwfft23nzzTaZPn97oPtXV1VRX136ol5SUXFinO7kVf25GVhc1GNhaSRLImiIWfvkV/byHolZJqFQSaqn2Vq0ClUpCo5JQSRJqlfLPdt/aVgUqSUKjUtnu1z2e+qz9a/el/nHq7Kdp8Do0+tqNKfvpC8wVTb1zAAlzOZS/fBnu/WNqN2tdYXadTMTtb0JefOOHUGlgzn9rH+96D7L/avol57wDqpprI3s/howDTbed/Tpoa2qzHVgCabubbjvjRXD2VO7/+TUc/qbhVcl6ZCjJUOoDRY0/R7uu6fCvX9UrC3A2FeBdbObwr1+R0yeA7ae3c9Hn+5EsMgeuHYjBXamXE3Iki/C9p8mN9SVxQpRt/0nfnKKvWywBDz6Axt+f1/e9jtPBk8w46YLvkJH4zJ/PttPbWJ+ynqFf/4lTuZEjl/eh3NcNAP/4PGK2pVDUQ8+Jab1sx71oVTzDtDH4Lr4dXVQU7//5PpWHjzDzoIR/3GD8Ft/OgewD/BD/AwNWH8Mtv4Lj03tSHKr87r3Si+n3zX4iW/DeX/zoJmZd9XeGBQ4D4GTBSZYeW9rqn/U/Rv8DnVqpJ/ZD/A8cyD7AtMhpTOgxAYDTpaf535+tn6r64PAH8XH2AWBdyjq2n97OuNBxzIiaAUBhVSGv73u93j4pmcfrTZs7myxJ5Gok1rw/lQHOERxUl/C7qxN+AdMZ5T8LV0spsYdf4G3SbOMECZAkSfmsdAtA9o5GpZJQyWZUGXuxhg0W6EKIU7sDcMBUzA9aEz1jZ7Gw30Iwm2DNvTxbGY+hsQpELt7g37v2cfpu23j0CqdAhmmU3+9JczlLpTJCoy/hrsF3KQ3WPsjrpceINxTWm0LU2HvPUsOBI0sZMeTWJtsJXcuBI0vF790BHf7upXplAc6mArxLYeWNF/Hn3ADb9v67zLiX6zh5SSyFYR6QvhfPHCNT/zAR6u2B/7Ro0s2VfFCdzvB9KkbqhuN9w/W4DBjAe19fSmluOf22l1LlpuLQJZ624/beb8a7UEfiuEhye/lB+h+4FhmZurWacFd3Ai/rSb7FwJtVKfQ5ZGGyehSel12K25gxfPHd1aQX5DP4txLMWom9s7xsx405YsI/25m04aFkDgyGzAN4/JnFnBa89yWf3EHSwPBW/VzjfOJY0HeB7fG/dv4Lk8XEw8MfxstZ6dfPST+zM3Nnq47bw6MHdw6qzf59de+rFFcXc9fguwh1DwVg6+mtbEjZcM7j5FXmNci6qktGJqsiiwM5BxgRNKJVfRQ6udSdLRvXL78WPALrP+XfG8beV/v4p4fA2MRFDZ8omPD32sfrHoeq4sbb6kPh4joZrhufhvLcxtu6+cHUZ2sfb3oBik833tbZUzmvAYxmC+uXzWvR2G7Vlg+4duq9TbYTupiW/s13w3PZAzkHWvRZf/dvd+Pn4tdku2t6X8Mg/0EAHM0/ypfHvyRCH8HtA2+3tXl5z8uUGs7xpdqIWdGzGBMyBoCU4hQ+PvIxfi5+3D/sflubdw6+Q1Z5VquOOylsEp4FqWJc2wJdalWqXbt2cckl9et5TJ8+nfvvv7/JfV588UWeeeaZdu5Z55FW0rL/LHvTU9l11Lude9N+NA2Cw3BXynYmtWBfc+KfYKoTEHXxrh+0TfgNUrY18cLO9YO2yVvg1LqmX2zOO7X3U7bD0ZVNt535cu39tD/g0PKm217yr9r7GfsgsYVTJsqa/kLoykrPpNGSpUhKz6RxzD+P1YmruW6nCY0FXh1ymgK98mVx2RELk3daOF2SxurQw7b9rtwmU1x5BN/Ft6Px92dj6kb6HUnHtMFChQF85s8nsSiR1YmrmbHbhG8pfND7NClBynEn/mVh2k4LB6IlVscctx136i4ozj+M19VXQVQUW9O34nbsMDN/tlCeU4Lf4ttJL01ndeJqxuwxEZUNX4alc7hKGcwOSbAw+3jTAdu6MlKOkFaSZgvaZldkszpxdYv2reuJi56Amnr4B7IPsDpxNdFe0bagbXF18Xkd985Bd9qCtsfyjrE6cTVeOi9b0LbSVHlexwVwMcTTq/BPduo92Kn1xnjUleVnggmkgM0uq1kfGVb/wr71fkkulBxt9JhT048QV1kFQLq7G6v9fXHdLfHT9l64qs18fno5ayN6UKlq5MTDmA0lJxo97rDMYwwrUwpyZ7s4szoogKgkiSnBC9CqVUQc/paNAe5kaFv29Z1TnNaidkLXkFvSst9nS9sJXUNpZsu+43zSjaw21n7PX/SXiZgz8G1IOgeMymfRgCILlx60UOqVjX/ALgp0TqwOCeKivywUp6XjPmE8LgMGsLkkAXOJljl/msnygtUTq2zHffSEmZhEmfXe6WxWK8eNKJW54qCZEud8AsP2UKHRsDoshJ6nzBSfWIlzv364jRnDroKjJJSruepPMxU6eGpqbWLFXQlmYo7I7NKl86ObctxppS3LLMo5k8Jqt4MtamtVXF1cL2i7JnENBouBe4bcY9v2V/5frf7uGeg3sF7Qdn3KerIrspnXZ54taHuq8NR5f6edLbeiicCZ0HW1dLze2Pg/elL9oO2Rb5sOxPYYWT9oe3QllJ5pvG1g//pB2+NroCCx8bY+0fWDtid/hqwjjTYt1vhyW8plZBRWklVSxT+9j0Fg82OcHPE917209G++G57LtvQzfEfmjnM+PyZkjC1om1WWxerE1QwJGFIvaLsuZR15lXmt6l+cT5wtaFtQVcDqxNVE6iPrBW03pW/iVOGpVh03wDWA2NL8FrV19HFtlwraZmVlERhY/2piYGAgJSUlVFZW4uLScEj7+OOP8+CDD9oel5SUEBYW1u59tZdwfVCL2o2JiKKPdzRmi4xZlrHU3Jot2O7XbpOxyDIms3Kr7FPT7qy2dY9jtlhq9kVpZ6nd33acs563vl5zTBYZzmqXovVt0XvPjRyP55SxtRs0zvUbDFsEsU0U+1ad9V9m8DwIH930i9VN8x9wLQQPbrqtus5qqP3mgl/Ppts6udXe7z0LTAY48HnT7a3cA5tv0wV5BLcsu8YjOJzxPfrg5ezF6fl7kGSZm0cNweKiZNq668+QGpJGQJgfDwyrzcYuXXCcCM/eaLyVCx239L8FizYR1zhXPHv1A2BY4DAeGPYAxdcdpKzKwLVj+2P0Un5PLv65pHon4RboxQPDajMsDdefIMClF9pQ5STuhj43UKYdhjYUvCL7ANDXty8PDHsAc8lfpBaXM2t0b6YEegGgCy0kIWcTsVuaGDTXMbDPRPr59bM9jtJH8cCwB1r0c6tLq9La7k+LnEa0VzTDA4fbtgW6BZ7XcfW62uLr43uMx8vZi/6+/W3bPJw8bMctrjByML2I9Mw95OmbHyCka4fzo38fSilkmFFFocdwXFx8UBmcWF5xI2NL0pFl5XNNlpXPJ4sM2RYvkizBAKgxMVxV+1rrK4ey06h85xSVlxOGmVPVA8kuLUCFhRfVN9AnLwOL1PDzrBAPTllqv4dGqo4j1USKd5cP5oRR+bsppZKo3GqOZfZl2p6tANysvpyQgtP4Oefyp2fzM0fKjS37XBS6Bn99yz7rWtpO6Bo8QsKBc8y+qVHUT88DPrWf8/K0KlLNHky/qBcTA/SQtgsdVVguN+Lj7Qvj4gg2lfNAaSK6aTIBXmPR9Va+o+b3uIRS1yJSZ+VhctHwgE/t+MF9cjWpo90ZOySaIRH+kLITjVM11ZeXE+jmAxf3R2+u5oGSk/hMMBBw6SRchgwG4IqoS8kqyCZ1VjYWjYoHfIJtx/UaU0XqIA8G9Q0jplcIpO/F6PcX0PwJZrRO5oHoq8ArrGF9sCaEedQ/H7h36L2YZTPuWnfbtok9Jp4zs6kx/i7+9R7fPuB2yk3lBLjWZkGPCBrR7Hdlemk63536rvnXc/Vvto3QxbR0vD50kZItW5fXWee5kx4HUxOlBDyC6z8e/xAYyhtv63bW/4Ox90JlUb1NlSYzJRUm8s0u7NmRTEZRJacLK+ldNgVJGkSFoeFFmAqjjr0phbbHp0x9gCZmPNYRIL7nupeW/s13w3PZln6GX93r6gbfW3X19qk9x4zxiuGBYQ/U+94BWDxwMZWm1pUTGhIwxHY/xD2EB4Y9gN6p/qJZC/osoLC68Oxdz2mQ/yAsmYda1NbRx7WSLDc2d7PjSZLEypUrmTt3bpNtevXqxc0338zjj9euIPfzzz8ze/ZsKioqGg3anq2lK7R1VQaTieFLJmNRFTU6ZpVlUJm92LdwE06azhuzPztgbLbIWCzUDyJb5HqB44NJ2fS/czZyJTSsfAYgI7lA4ZebGB0X3MjzXZjFDP/prxRqb7QWkKSsvHn/EVCpO7p37c5kNLBn7BC8SiyN/uYtQLGnmpHbD6DROnV099qV9b17llgaLVLeXd67LMvsSMjni10p/HY8G4sMKkxE9nycPLXKVveoLkmW8TPLrLvxAE5OukaO2jyzRcZgsmAwWag2mzGaax8bTBYMZjMGk4zBXH+b0SRTXXebyYLRbLG1q667zWQ5a//GtxlNFtsxVZiI6PkE+WrpnO/9b0PWcsXQqEbemdAVmU0Gpi8ZSo6KJn/vgRZYd9MB1Jqu+/9dqM9UXcmecUPxLG18MQoLUOyB8jmva0lObtfR4vc+MxNLuRpNWCyqUTfDwOvA1aeju9umzBYz07+fTk5FTqO1DiUkAl0DWXfVOtTdcGzn0Gzj+qami3f8uF6WZXLLqskorCSjqLLR29Lq5tdMcddpCPVyIdTbpd5tD2/lvl4LM5cPa3Z8cyFjO6ETcuBzWUf+rHf0cW1LY5OdN2rXiKCgILKz66fEZ2dno9frWxSwdQROGg039ryXLxKfRZbrJxtYw/M39rq3UwdsQampq0JC24rPpUhfNz4YMZHJW7egfNjX/Y+vvPnfR0zkjl4ty0buUlRqmPGysrImEvW/7Gp+DkED4fDXSnZwN6PROmG572Z47hMs1D+xs6D8BCz3LurSQcumWN+71E3fe0mVkR/2n2bJ7lSScmuzP8bG+vJI6FFyjxTwYIAfkizX+7KXaj7wrvebe0GDerVKwsVJjYuTGtA2274jyLLM9vg83vpuNPkhu5t87z7Zowny7H4XJx2ZWuPEY73m82DC8iZ/74/2mt8tB7aOTKNzwbJgItL7W5r+nF8wsdsFbKGF7/3aoahjJpP26kbMlUWEJvwD18CnlVlLwxYpM6JamH3bmahVah4b+RgPbn4QCaneybxUM7Z7dOSj3e4kXqDOuP7GBk/J1t/+jJfaNHhlMlvIKqmqF4Q9bb1f889gar4kl6+bU21A9qzgbA8vV/QuGqRm/j/e4DeXdwpXc/bJbFuN7YRO6JznsjXa+G++s6j7WX+27v5ZL8a1LdOlMm0fffRRfv75Z44cqa2JM2/ePAoKCli37hx1Revo7pm2Vq9u+5al8f9FVhfZtkkmL27sdS9/H3+N/TrWztb9dYYjrz7FnMPbsVTWDu0lF5k1A8cy4O/PM6N/N8uyrevYj8rKm3WvzOtDldIMO95UHk99tn6tq27kx0+ewPt/K/GrU1+90FON5d5FjJv/sP061gG2L38N1Vuf1VuQrSu/91PZpSzZlcIPBzJs0+ncdRquGhrKjaMjiK38C5bMAbOB70NH8I7qTL2FK/xNFq73m8viy/9tr7fQrswWmXEv/04P+UvyAnc1eO++2aM5Ld3A9kcvRt3E4o1C1/Xr9wt4qWg/2XUuwAaZLDzaewGXjHv8HHsKXdn2F69C9f0xvMtqtxV6KAHbcfe1fgHIrmT7W3eiWral3oJsdd+74XQGaTcuwFxUQMyNbmjq1iO/YysED+r4TreRX1N/5aU9L9VbqCbAJYDHL3qcSyKaKOcldAvlr/TDraL+Al5Z+HJm9NMMmb6wVceqMpqbzJDNKFLqyTZXok4lQaDeuclM2RAvF1yd2iYx6L1Vj/F+0dp6QdvuPrYTgD8+hE3/hqqi2m2SCq78CAZcbbdudYRfU3/l0a2PYrAYbNuCXIN4dOSj3fuzvqKAX98bxEve7vXHtWaZR3vN79bj2pbGJu0atC0rKyMhIQGAIUOG8MYbbzB58mR8fHwIDw/n8ccfJyMjgyVLlgCQnJxM//79ufvuu7nlllv4/fffuffee/npp5+YPn16i17TUYK2oJRK+Pzget4+9hgAv129iYCz6xF1Q+v+OkP+d48wN3sdRyqiWOk0ju3h0/jHFYO6d8DWymJWVtYsy1bq/kSMASTY+A/YVbMw2uh7YOpz0NgiSV1U1clTbJaP88iuJ7mkMITLvSbgERzOwEuu77JZpq1lMho4/OtXlJ5J65Lv3Wi2sPFYNkt2pbA7qcC2PTbAnYWjI7hiaA/cdRrIT4SPp0BlIcRdCtcuwWAysWrLB+SUpBGgD2fuxDu6fRbGur/OcNeyA6gwERr0BYXe8YysrKRP+gTeNV/F+wuGOsZnniNacT3v5+xgr18Ee83FBBpNrLcEol682d49E9qRvPJuCld9T0pgb0r9o/AIiWDg1Y91ywzbxpiqKzn83UuUZqbhERLe4L1bDAaqT57EpX9/yDwA+z8n78d9ON/0Mm7jxyvZffu/UBZJihzXpbJvzRYzB3IO8Fvab0R7RjMnZg7OZ6/JIHQrm//Yx6RfpmCW4U7jA7hgJAcv9lrisKBq8B1fUmVUArB1s2MLKzldWEFGUSV5ZYZzvJrCSa0i2MtZKVXg5UKol2u9oGyQpzNadcecO5wsOMnVa2qDdDGW3nwzf3m3H9s5vO1vwq//grBRMPxm2PAUlOfC7NdhxG327l27e23va2zP2M6s6FkMCRjC0ICh3TLDtp7NL8PmFzAH9OPA6FvILUnHXx/O0AE3dvsM2y4RtN28eTOTJ09usH3hwoV8/vnnLFq0iJSUFDZv3lxvnwceeIBjx47Ro0cP/vGPf7Bo0aIWv6YjBW2tpn43lazyLJbOXMrggMH27k6HkFdch3RqHc9zKx9XTWHl/41hcJhXs9Nxur0db8HGfyr3B14Pl78D6s4x5ftCyGYzCRdPobK0kKeuMzNk/FU8M+YZe3fLbopWrqJkzRq8FyzA4+KGn7GdTW5pNV/tSWP5H2lklSgrlKtVElP7BHLTmAhGR/vW/t8tz4dPLoGCJAgdBgvXgpOrHXtvX+v+OsMza46RYz6Aa9gS4qoNPHfGlZSrfhYB2+6quozqV6IZERZgm0amkWX2pKSjve8weEfYuYNCuzAbqf5HL5J+cEZ20hL/6RoCvN0ZGeUjsumbUHXqFMmXzwVZJnrNj+jCAuH1ODCWg0+MUjph8LyGCywJgp2ZLTJv/fshHjR/wh+WOK4z/LNBG1cnNaOjfcgoqiKjqJLSqubrybo5qc/KkK0flPV316HqJJ8nvyT/wiNbHwHAUhnCSG1PPrvyYfCOtG/HhPa17GpI2AjTX4TR/wcHlkJBIoz6P3APaH5/oWsxlMOb/aGyAK76pNtnU5+tS9S0nTRpEueKGX/++eeN7nPw4MF27FX3E6WPIqs8i+TiZIcJ2p755igqix6nMSFQKVPyycck7d1M+KefoA3qhjVtW2rsfeDmD6vvgcNfKR+Q13wOTm727tkFMWZmglqFWZJJ94Mr9Y698FLVsWOU79yJNiys0wZtZVnmQFoRS3al8PORMxjNyneBr5sTN4wMZ95F4YR4NZI9lncKynLAKxxu+MqhA7YAM/oHM7VvEI/9KLOueAmlKhW95STiejR/8iZ0UYm/UyUbmG+QyIy9mN1ndlNhqiBdqyH62GplRW+h+0negrGslIRQZ8rVah5b+QeY3Qj2dObpy/qKizSN0Pj747NoEaa8PHQ9eyrfHQOvxbz/W9QFicoMpN+ehT6XKgHcyAndagaS0HXtSS7gaKUXW9UD+NUytNE2FQYzv53IrbfN21VbJwjr2qB8gaeLtssksEzsMZFls5ZxILUA79WvMFe9FP4Mg0mP2rtrQnsxmyBtl3I/cqxyO7RhXWehGzmwRIlHeEdC37n27k2n1blXoxLaRJRnFLvO7CKpOMneXekQlopSik+aQHbH6fIYKJJQ79uNISmJ4tU/4nfHYnt30b4GzwMXH/h2EcRvhPQ/IOZie/fqgjiFhRH766/c8dkcTJpUor2i7d0lu/K87FKcwnrgNn68vbvSQJXRzI+HMvliVwpHM0ts24eEe7FwdCQzBwSh05xjGlDEaLj5F1A7iSvuNdQqiSsGDOLbT/7JS85vIpEJJ36CUXfau2tCe+g9E88Fq3m0ugT6XMb1a6/naP5RkrVaoo+uFEHbbip9x1fIATJPDNMgm2U4pVzUyiqu4q5lB0Q5lEZovL0JfPSR2gQR9wDMk58n8cW9uPcLIXBADuqCQ3B0pfJv2r9hzD127XNzssqz+C3tN9SSmuvjrrd3d4R2klNaxW+WYfxmGXbOdtcM78GsAcH08FLqybrpus+pvavWlUH+g/DTVvK+pTdz1TuxpGxHhQjadltZf4KhDHSeENjf3r3pcGWGMpzUTjipu3dJABuTAXa+rdwfex+ou8/nV1sTPxkHEO2pBLCSi5Pt3JMOYjYRfNtsDCnJePYaCPEn2DriUv7v1gXop0+zd+86h94z4KbVStZiFw/YWlmQOaDLArOSXe7IXAYOxGXgQHt3o560/AqW/ZHK13vTKa40AuCkUXH5oBBuGh3JgB6e5z5AZSG4eCv3gzvXe+sMegboweLKGsMQBmuOwom1ImjbXam1ED3R9jDKM0oJ2jo5KXU8C1NFiYRuxmyRWZgxl0BJBvZgMQYASkaojLLO9jNrjjG1b5AoldCIupmFZVu2YC4ooPK0J6r3f4PcY3DgCzjyHfSbW7tT+l6oLoHoyZ0q+za9NJ2X9rxEmEeYCNp2YwEeLatXfOWQHoyO8W3n3thXsN6ZQ6r+mABz+h50pmrQiLq23VLKDuU2YgycXcc1ZTtseUUpk9B7Rsf3rQN88tcnfPbXZ9w64Fb+NuRv9u5O+5NUMOVpOLQcBs2zd286NRG0dQBRnkoAy1GCtioPb7weeh2AmFPKtKHf9dE8ceXEc+3meMIvUv5ZFaUrdWUC4uzXp/NgLilB5eHBmfIzVJur0aq0hLiH2LtbAmCxyGyNz2XJrlQ2nczBmuzUw9uFG0dFcO3wMLzdWnA1ef8X8NszcP2X9f9mBRs/dyc8XbSsrxrOPzTLlMUIKwrA1cfeXRPaQVpJGp46Tzx1nrXf8T0GwSWLlBI4QreyJ7mApGJI9/HFGbBU1/8dy8CZ4ir2JBd0+wDOhfKcPRun8HAslZVIGg0ED0Se9RpFRUPQq72whQm2vAQJv4JXBAy9CYYsAA/7l9ey/n/PKMug2lyNTi2CV93RSNN+Bugr+KvElcYKCUpAkKczI6O653e8Rbbw6t5XidRHMrfnXHLDtzJSG8ZrOXlcnHFAmXUldD+pNUFba2mEuk6tg+QtYKrutkHb5OJkzLIZH+fu+f+6AbUGBl2n/BPOSQRtHYB1gHe67DQGs8FxUu5RVp0HSM0vx2i2dNiKp11OeT4su1JZnXPeNxA20t49arG0229Hrqwi6/5rAIjQR3T/VTZbwFJVRcXevZjy8vG6Ym6HvnZxhZFv96ezbHcqKfkVtu0Tevlz06gIJscFtDwbLPF3WPsAyGblvgjaNkqSJIKCkkgzbeQ1dTQPG1VQlCqCtt3NllegooCHq45xvCSZd6e8S5xPHAP9BhLWYzz0v8rePRQukMUik15YwfEzpZzIKuHEmVL2pRYQW5zBC+t+5nC0hVfGNh6Yzymt6uDedk0uAwbUe1z2++9kPfdv8j/5jJj165Rgrm9PJdu2KBV+fw42vQC9Z8KwmyFmcsMssA7i6+yLh9aDUmMpqSWp9PLuZZd+CO3IUI762xtZY6piivQaiXL9RATr6Onpy/p228z6M+VnWHZ8GVqVlqt6XYWHszPZFolkrQZSt4ugbXdksSgl+wAiGgnajrob/vgA0ncriQkRYzq2fx3AmmDn6DNGhYZE0NYB+Ln44a51p8xYRlpJGrHesfbuUruq+nU5Kjc3tAMnEqT3wtVJTYXBTFpuKf6HdlHy08+EvPwSKlfHXsCoHkkCnV4pl/DFHLh2CfTq/KUkjDk5VB8/gWw2k6xT6qNay4E4uqrjx0m/fTEqT08851yGpG7/E8xjmSUs3Z3CqoOZVBrNAHg4a7hmWBgLRoUT7e/eugNmH4NvFioB2wHXwKTH2qHX3Yef3kym4RQbtb15eN539u6O0NYsFtj3GZbSTFJilO/xcI9wIj0jmdBjgp07J5yP4gojJ7JKOJldagvSnswqpcJgtrXRYGKZ04ukFvnhUW3GrUrCYmi8nnd8dikmswWNuEDdKpKzM05RUXhMm4ak1SobZ76EfPE/kI7/CPs/VwIFJ9Yq/3rPhhtW2KevkkSUVxSHcw+TXJwsgrbdUeLvYKoCr3CuGzyZF9adrPd0kAMsPqiRNCzqt4hKUyUalYZJATdSuFPmZukbZQr9hL/bu4tCW1Op4N6DkLYbghopg6YPhkE3KOVstr/Z7YK2RouRtNI0oDbhrtuSZfjmRogYp8xkcfBFpVtCBG0dgCRJzI2dC4CzpmU1krqyM8+9RFW2idBHb0J/8+NE+7vxV0YJSXkVyK+/gTE9ndKpl+A5Z469u9p5uPrAwh/hm5uU6YBfXg9z34NBnbtemjYggJ7btlJx8CBfS5sBB/iiayGXAQPQ9eqF88ABWMrLUev17fI6RrOFdX9lsXRXKntSCmzb44I8uHF0BHMHh57fwhilWbD8GqWmYMRYuPxd5eKC0KQhAcPYs+cqgsIHNN9Y6HoyD0JpJtkueiotBjQqDaEeofXbFKbAkW/BM1xMN+tETGYLyXnlHM8q5cSZEk7U3GYWN54Z66RR0SvQnbggPVO0Rxh16DgxPT25dYA/KlU1lqrGM23f2ZTIyoOZ3DouiutGhHWrRYnak/vYsbit+RHZZLJtM6SkcPre+/C75270t66HnONKqZ4/v4Re02t3rixSggw9p3ZY9m2UvjZoK3RDJ35SbuMuI9BLWXCwd6A7/zc5lgAPpSRCd82wtQp0C+Sh4Q/ZHg8JjuW/hqGodN8o2Zhmo1LfXeheXLyVGQ1NGXsfHFwK8Rsg6wgEdZ/xbkZpBiaLCReNC4FugfbuTvtK3gLH1ygLove/SgRtW0CM5hzEoyMdZKVNWUayVCGp1OgGKFP8Y/zd+SujhIS8CobdvAhTdg4uQ4fauaOdkJMb3PAVrPo/OPINrLxDKZcwpnMXQld7euIxaRLJv3wGiKCtlaTREP3j6nY7fnZJFSv+SOPLPWnklFYDoFZJzOgXxE2jIxgZ5VNv8ZdWqS6DFddCyWnwjYXrlolFJ1pgcHAUxuIR5ObVBOiNVUrQ273xrDyhizmxFoCkiOFQnUC4RzhaVe1Jq9FsxHxqHc6/Pw8hQ0XQ1k5yS6ttGbPW7Nn4nDIMJkuj7UO9XIgL8iAu2IO4ID19gj2I9HWrzZZd/SEAWbFTSWYPsqzCctLPtr/1U/bSgcHsTMwno6iSZ9ce463f4rlxVAQLx0Ti7yE+P5sjaTRKWYQaeR99RPWpUxR//wP6adMgoA/MfAku+Vf9HQ9/A7/8HfShMORGGHojePZo17462loVDsVshJO/KPfjZpNwqgyAoRHeXD449Bw7dm8xAW6cknvwKjfx8E03IUliNoFD8o2BvnPh6A9Ktu3Vn9q7R20mqTgJgEh9JKru/ve97Q3lduhN4C7WYWgJEbQVupeSTCKn5CCjgUHjAIitmZKdmFuGzzyxMuE5qbVwxQfKQja734UNT4FK2ylXoZdluV5Q8L6h93Gy8CRDAobYsVfdmyzL7E0pZMmuFNb9lYXJoiyP4e+h44aR4cwbGU6QZxtk8297Hc78Ca6+MP9bUZe1hWJqangn5ZVhObgc1S+PQJ/L4Ir/2blnQpuoyb5K9o+B0wlE6qPYlZhPTmkVG7PfY3v2Tzw26G6ul1SQeQAKU8E7ws6d7r6qjGYScspsWbMnspQAbV6ZodH2bk5qegd5EBesp0/Nba9ADzxdzpEtZjbCcSVYbxg0Fv7cg8rsDXLt2gR1p0pXGc18f+A0H21NIiW/gnc2JfDhtiSuGtqD28dHtb5EjQMLfOwxtIGBeEyrLRVlqaykOiERlwH9axvKZiU7rCRDWbxs6ysQOxWGLYKe05SFVtqYCNp2Y6k7oapIGf+EjyJh+yFASUBxJCcLTuLn4oePs5IAEOnrhpPfJj5xKmCOcyi9xdoV3YvFoiRrBA+CcfeDzqPptuPuV4K2R1fCxU+BT/coi2erZ9vdk48y9iuZtpIaRt9j7950GSJo60CKq4s5U36GOJ84e3el/eSdAkDyjQKtklliDWQk5pbZrVtdikoF0/+tZOft/wz6zbV3jxqV+fdHkA0G/O6+G+fevRgaOJShgSKDujHG7BzUXp6odOeXbVVhMLHqYCZLdqVwIqvUtn14hDc3jYlkRr8gnDRteFV44iNQfBpG3NZtBmMdIczbBZ1LNmZdKturI5hgKFMydsQ0wq4vLx7yToJKS7JOmS677ZiK1Rt2A6ALKMLJ18zWzHSujxgLKdvg2CplKqFwQWRZJrO4ipNZJTWZs0qQNimvHLOl4brukgSRvm5K9myQnrhgD/oE6enh7YKqtVOak7ZAVRFV1f6UrdxDP7UFr9F9WDhpFDmlVQ2mSjtr1cy/KILrR4Sz8VgW/9uSxKH0Ir7ck8ZXe9OY2ieQOyZGMyxCXAhrjtrDA/977623rXD5cnJeex2fhTcR+PjjysZRdymLkx1fo9RaTNkG8euVf57hcM9e0LZtaTJr7f6UkhQssqX7Z2U5EmtphN4zQaUmIUc5d7EurOwoFm9cTEFVAV9d+hX9fPvhrFXj7H0QiyaPXWnH6O0f0vxBhK4j5xgkbFQuWjS3fkXwIBhxO4QMAX37zmroSA4TtN3+pnI74BqRWNAKImjrIFKKU7hs1WW4aFz4Y94f5z9tubPLi1du/WoXZrBenU7IKbNlZ1adOEHpxl/xu/v/kFRisNuAJClXMkfcBro6A0WL2W4rJtdlLimhdP16ZKMRv7v/r/5zFpk9yQWNntA6orTFiynfuo2wDz/AfULrFitKzitn6a5Uvt2fTmmVUuvPWati7uBQbhwdQb8Qz/boMmhd4KqP2ufY3ZhGrcIraD8VzltZWXItE1x9oSJfGQRHT7R394QLUVMagagJ7M9WBvYlJd62pw0F4zAWjOWX43qOjvamX8o2JQtFBG1bpbzaxMnsUk7UlDU4caaU41klts+/s3m6aOlTp6xBXJCenoHuuDq10fD62EqlX4b+BKzcwvTeEjkzohgd43vO3dQqiRn9g5neL4i9KYV8uDWRX4/nsOFYNhuOZTM8wpvFE6K5pE9g6wPJDsyYnQOShHPfvvWf0DrDwGuUf3nxSvD20AoIHlg/YJu6E3qMuOCLaKEeoWhUGipNlWSXZxPs3n0XpHIosqwE+wHiLsVktpCSXw44VtC2uLqYgipljYQofW0Ay0MdQjF5ZMavhKxfYMbL7ZLJLthB6g7lNnxUyz4fZ7/Wvv2xg+QSBwja5p6yzR5i3P127UpXIz7pHIR1gOep86SoughvZ+/md+qCcr/8iarDPnh7uWMd3kT6uaKSoLTKRG5ZNX5aSJ2/AEt5Oa4XjcRt5Ei79rlTqxuw/fMr2PcZ3PCl3aerq/V6Ir/7lvJt23Du3ZujeUdJKk6isDCA9zeWcabO4i7BDrDK7rloAwNBkqhOSGxR0NZskdl8MocvdqWy9VSubXu4jys3jY7gmmFheLq2Q9bmiZ8g8xBMfkIsOHYBgl3DSbRAUnGKkqlzcJnysxVB265NrQN9KJa4S0k68gmowWKorQMmm/TIKPVN/340nJ8kFVLmQWVhMu9IO3W6/VzoxTmLRSatoIITtuxZpbxBan5Fo+01KokYf3db3Vlr9mygXtd+F8HrlEbQjZrO8dxiDvjnML4VJ3SSJDEyyoeRUT4k5JTy4dYkVh3MZF9qIfuW7ifa343F46OZOyQUZ639L8h2dkFPPoH39dfhFBlp21a2Ywfl23fgu/h2NN7e4NcTpj0PF/8DKgtrdy5Igs9mgnsQDFmg1L49z/+bWpWWcI9wkoqTSC5OFkHb7kKSYPFmOLUBoieRWlCB0SzjolUT4uli7951GGvGYaBrIK7a2gWKgl0jKK48jJy7AU7kwOB5EDrMXt0U2lLKduU2cmzr95XlLn/eIMuy7e/eOpOiW9rxFiBD71lKnXihxUTQ1kFoVVp23rATF033/tKvOJ5ORaYzHnjZtuk0asJ9XEnJryAxp5yAGF88L78cU34+anfHuXJ9QarLYP2TUJGnnHQs+AE87bsggnPv3jj37g3AupR1fH70cwwFY6gunlOvXVZxFXctO8D7C4Y6ZODW7+678X/wQeVk8hwKyw18sy+dZX+kkl5QCShjoEm9/LlpdCQTe/m3X0ZWxgH4/jYwVoBXuHIyK5yXGK9oEgsguzINBj5YG7Sd+XKXH9Q6tNH/B6PuYvOJRGS1MrWsbtDWSgaOlThzxGMAA41/svar9/kj+EZcndQ4a9W4OqlxqXtfqzy23rpqNTg7qXB10uCiVXfKWQrr/jrDM2uOtfjiXFGFoV7d2eNZpZzKKqXSaG70+AEeujp1Z5UgbYy/e9uWgGmJyiKInQIZB3C/ajEfqlaTUaZi4Xme0MUGePDK1YN4eFpvPtuZwrLdqSTllvPYD0d4bcMpbh4byYKLItrnolw3oouJsd2XLRZyXn+d6mPHkdQqAh5+uLahRgceQbWP85OU9QLKsmDba0rt9piLYdhC5QS2ldm3UZ5RJBUnkVScxJjQMRf6toTOwsXbtohkQk4WoCzC5UgZ8U0Fr2K8ojhRCUd1nkAOpOwQQdvuQJZrM20jxrV8P7NJKeO3/wtYtBZcvNqlex0hvyqfUkMpKklFuD7c3t1pPyNuUWp2j73f3j3pckTQ1oF094AtQMCDf6fq4E5cp15Zb3uMv7sStM0tY3SML4H/eKr7lohoDzp3WLgGll0JuSfgk2lw4w/g39vePQMgxC0UdXVPzJVhDZ6zVhz814/HmNo3qFMGIdqTNijonM//lVHMkl0prD6USXXN6uZ6Zw3XDg9jwagIIv3c2reDRWmw4jolYBszBQbd0L6v180NCuzNhgIot+RgiBiDk9YNSk7DmUNK/S+h65IkjhWkAGAx6sFSv06m1mcLapc0DHlT+bpiGNGakyRk5LA0NfW8X9JJrbIFda2BXxcndf0gsLaRIPBZ+1iDwC5OKlxq7rs6qdFpVK36Ll731xnuWnaAsyvJWi/O/ePSvvi6O9UL0tYN7tal06joFeih1J6tCdL2DvLA1/38an+3OXd/ZWVsiwVUKv497t8kFSfRy6dX8/ueQ4DemUdnxHH35Fi+2pPGJ9uTOVNcxavrT/LupgSuHxHOLeMi6eHt2vzBHJ0kEfDAA+R/8CE+t95q22wuLkZycUHlVLtgHD0vgQeOwcmfYf/nkLQJEn9T/rkFKItuhgxu8UtHe0bzG7+Jxci6MVs9WwdbhKyp2p4DA3vx0xlItk4KSN0BY+9F6OJyTyilvDQurRunSirY96lSD3fvRzDh7+3Xx3Zm/ZsPdQ9Fp+4kY5D2EDoMrl9u7150SSJoK3QrLtNvwGV6w6BPTIA7v53IsQ2ARMD2PAT2hVs3wNIrIT8ePp0O87+DHsM7tBt5H3yIpawUr+uux6mHku0b6TSVoqRzrDQKZJVU0ecf6/D30OHn7oSvuw5fNyd83J3wc9PhW2ebr7sTPm5O6DTdc7potcnML0eyWLIrhQNpRbbtfYL1LBwdweWDQ3Fx6oD3XlkEy6+B8hwI6AfXfC7qk12gISHhyH/pkNTVpFflERM7BY7/qGTbiqBt15T1F/jHgVpDtaRkXlkMAQ2aadxPoHFLxlTaH8/hN7LSczFOFi33GsxUGs1U1NxWNndrNCPXREUNZguGSgvFlcZ2e3v1g7tNZwLrNCq+2ZfeIGALtRfnnl17rNHX6OHtUq/ubFywB5G+bl3iIp6pqAgkiWGBwxgW2HZZZe46DbeNj2bhmEjWHs7kgy1JnMgq5dMdyXyxK4VLBwazeEJ0+9Uu7wYkScJ9/Hjcx4+vtz375Veo+OMPgp59Bvexdab7apyUxV37zYWCZDiwRJkNYaqutxYD+YngGaa0t7KYlZq4ZdngHkgf794MDRjavbOyHEl+Iqy8A/rOhTHKiuqJDroIWVNB2zFhfeAQlGmqqZAkXFN3dZq1NoQLYC2NEDay/mdec1QqJWNz5WLY/T8YdTc4dc2LjQ6zCJlw3sTZsQM5nHuYtw68hbezN69N7H4FvM/FepU6Mbes3nZzURHlf+xBP32aPbrV9XiFwy3rYcU1kLEfvrgMrl0CPad2yMvLRiMFS5Zgzs/HZchQW9A2p7TxTKqzGcwWMooqySiqbFF7D2dNTRBXd9btWffddHi7atGo7buoXVN1HiuPHKHgs8+p9vRm5ehr+HJPGnllBkCp1ThzQDALR0cwLMK74y5omAzwzU3KFXaPYJj/DTjrO+a1u7Fof3csBn/ULqc5khNPzNCFymI4fa+wd9eE81FZBB9OBJ0e7tmL5KTUmbZUNyyNYDEEgFsyen0BD80ect4BSVmWqTZZqDSYqbAGc+sEdCsNptogsMFMVSsCwtZ2hpqsfsB23LbSK9CdEZE+tuzZXkEe6J272JT/vHilpm1AHwqXLSfvvffwWbSIwMcebfOX0qpVXDGkB3MHh7I1Po8PtyayIyGf1YcyWX0ok/E9/Vg8IZpxsX7igncLWCoqKN+1C9OZM+cuweUTBZc8rdRxzz1RG2yQZfjyBiXzbMh8GLoQso/CukehJNO2+1R9CFNnvAx95zTxAkKXcmItnN4LTm62oG1CrmMGbZOKk4CGAaxInwAwu4G6nGQXPf0qiiHrSKsy1IVOyFSllAWJbEVpBKv+V8Gm55VZeweXwUWL275/HeDy2MsZEjAEi2xpvnFXtO0NKMlQFsf1Ehcaz4cI2joQCYk9WXvwd2l4stcdVO9ai/HUAZxHXoKmT/36XjEByhTvpNxy2zZzSQnxkyYjV1XhvHEDTmENp9YLjXDzhZt+VAJuib9B2u4OC9oiSQT985+UbtyI+3jly73KVIXetWVfcm9dN5gwX1cKygzkl1eTV2Ygv8xAQXk1+eWGmsfVFJQbMFlkSqtMlFaZSGlicZqzuoaXi9YWzPVz1+FTJ8Dr5+ZU81jJ9NU7a9u0RllTdR7/eWlffE9l4PHzz+Q763m7YjBIEoF6HfNGRnDDyDAC9M5NH7i9/PQAJG8BrRvM+xo8e3R8H7ohN50GnRyEidMcPHOSuRPvU6blCl1T/EawmJRamG5+nC5LB0BurJ5tTSA3LqyyNmAry8oCSL4xDdo3RZIknGtKHrTXkqVmi1wbzK0TEK4wmGqDwGcFhI9kFPPb8Zxmj3335FguH2zfmusXbPt/4NAymPgYxmwlcHPMOZ+qklQi9BHt8pKSJDGxlz8Te/nzV0YxH2xN4qfDmWyLz2NbfB59g/XcMTGaWQOC0dr5AmVnpnJ1Jebnnyjbug2XQYNs20s2bEDj44Pr8LNmJ6m1EDSg9nFJJlSXKGsI7HirZuGWRpScUcZh1y4Rgdvu4MRPym3cpYBy8cwRM20NZgOny04DjS/I5EIwlSTwp1cM/SoOKCUSRNC2axvzNyVL1lzd+n3VGhhzL/z8MOz8Lwy/udX1wTsDnVpHT++e9u5G+6guU77HqoqUwLwI2p4XEbR1IJGekQDkVuZSaijFw+nc08m7muIvPyV/w3G8Ru8g+LP19Z6Lqcm0zSiqpLzahJtOg1qvx3XYMEz5+Zjy8kTQtjV07nDDV3D4KxjScYtGSRoN+unT6mVGbzm9hYd3PYxXZH+KUxY0OnVWAoI8nbl0UEiLss9kWaak0kReebUtqGsN8ObXBHjzy6prHhsorDAgy1BYYaSwwkhCC96LRiXh7ebUIMDrZy3bUCfA6+uuw81J3WSWU1N1Hs8UV3HX8gNozUau6z2Vg/49uSjKm5vGRDOtX6B9T7wjJ8CR75WSCMGDmm0utJy/Lowz7ONUYaK9uyJcqBNrlds+yon8axNfI7s8m50JJTz2bSJGc+3/ek9tKFVAJUoJBUwGJUs35xjce0jJ7Osk1CoJd50Gd13Lh6G7EvNbFLQN8LDDRai2ZDLU/t4jxxHy7/G8N7acDek/c1daH27uf3O7d6F/qCdv3zCER6b35pPtyXy9N51jZ0q476tDvLLuJLeOi+K6EWG4teL350hULi71xinmsnKy/vUM5oICerz/Hh6TJze9s2co3P8XxG+AfZ9BwoYmGsqARNW6xyB2Cs5O7Vx/Xmg/pdmQvke533sWoIzfyg1mNCqJCF/H+d2mlaRhkS24a93xc/Fr8LyfLox0UwKHnHyZB0pZCaHrU6lAdZ5r7wxZAFtehuJ0+Ot7GHR92/ZNuDD7P1cCtj4x0EdcYDxfYrTlQDycPPB38Se3MpeU4hQG+A9ofqcuRGUpxklvxLlnw4wiL1cn/NydyCszkJxXTv9QpUZbj/++hcrNcQZDbUrjBENvqn1srIRDy2H4rR26Sr21DtCQHiFsSmn4vLUnT1/Wt8XThSVJwtNVi6erlpgWJKabLTKFFTVB3bI6Qd2a7N2C8toAb35ZNSVVJkwWmdzSanJLq4HSZl/DSaPCz1qioabmrp+7Upbhf1uSGg1WWxnVWkwLb+P1MZHEBXWSEgSDroOYyeDesDancGEi9FGcKYfM8jRlg6FCWfwm9wRc/JR9Oye0nLEKEn5V7sfNBkAlqQh2D+bKQUE89X0SRrPME7PiGBDqRaj/IGav/ICUkhRMFhMajRO41Zz0HlsN4+63z/toIyOjfAj2dCaruOqcF+dGRvl0dNfaVvIW5QTHLQAilFlDseGDydVW0se3T4d2JczHlX/N6cd9U3qybHcqX+xKIaOokmfXHuOt3+JZMCqchWMiu36gvL2ZjHhMn0bF3r24j6udAiybzUjqRupxqjUQNwt0HucI2sJj/j787Kbi2f3vMnf0I+3Rc6EjnPwZkCFkqBK0p3YRsghfV4fKbE8uqa3t2ViiQoQ+ivSCTRyUXOChU+AR2NFdFNqSsRI0zhd23qh1gVF3wW/PwvY3YeB1HXoeeqEqjBW8vPdlovRR3Nj3RtTdqUazqRp2vaPcH3ufqD99AUTQ1sFEeUaRW5lLUnFStwva+sWV4OeXC9de1ejz0f7u5JUVkJhbZgvaioBtG5Fl+O5WOPkTZByAy/7b5gtKlazfgKWsFI/pM1C71/7erEHbcRF9CTdH8MWu+iulB3k68/RlfZnRP7hN+1OXWiXh567Dz10HNJ/BXm0yU1huJK+mFEN+TVC3boA3r7y2VEOFQakBmVlcRWYTK6E35/JBofYP2J7er5RBsA6yRcC2XfTz78nucigynkaWZaTKQvj+VkCCEbeLk5yuInkLGMrAI0Q5ma8jo6iSSqMFrVri5rFRaNUqLLIFZ7UzVeYqMsoylGn0/a6A5K1wbFWXD9qqVRJPX9aXu5YdQIJ6gdvzuTjXaR1dpdz2nWM7wVnUfxGL+i+yW5e83Zz425Se3D4hmh8OZPDRtiSS88p5d1MiH21L5qqhodw2Pto2q0moT+3lRfDTTyMbDEhaZequLMukLboZp5ho/O+9F41PIxcbyrLPeVwPiwVZkkgvSWmHXgsd5qwZFVAbtHWk0ggASUWN17O16ucfy/YCyDPliLFMd7D6HqXM3owXL6zMy/Bb4cxhGPV/XSpgC5BSksIP8T/grfO26/d8uzj8NZSeUdYuERnQF0QEbR1MlGcUe7L22AJd3YbZqNTtA/Dv3WiTGH939iQX2GpE1SWbzRiSktD17Kb1ZNqbJClZIafWKdm2Fflw9WdttoqnLMvkvfce1SdPYqmuxmfePNtzdVfczHVSPtKm9gnk0kHB9Rbj6kx0GjVBnmqCPFuWnVRhMNkydeuVaiir5s/0IvamFjZ7jJySSiqPHKF81258Fi1E5dSKFVrbQl4CLL8KnDxg4Y+daqp2dzMsJJaPk1VYpGpyKnII9AxVgn6ZB5SMnuHtP71aaAPWE/m42SBJ/HHmD74++TWjQ0bjJ08EIMrPzZaFpZJURHpGcqLgBMnFyUrQNu4y+OkhyDyorFbfxf/fzegfzPsLhjao390RF+c6hMkAJ9Yo9/tdQf4nn1J14gReV1+N20Uj7ds3wFmrZt5F4Vw3IoyNx7L5YGsiB9OK+HJPOl/tTeeSPoHcOTGaYRFdPNu5nUh1vncrDx2iYu9eKo8cwe+u/2t8B/dzB6UWFxVzZ2ExPuPFyXCXVVUCSVuU+3F1grYOughZ3UzbxowIieODk2BU5WAwmXDSiFBGlyXLkLIdyrLAxevCjuXiBdd+0Ra96nBeOi/uGnQX8jnnTHZBFrNSnx9g9N2g0dm1O12d+KRzMNYvwW4XtC1MURZr0bopWUmNiPFXsjOtAyErY04OKdddj7mwkJ7btqL26F61fjvMkAXg6gvfLlKCt0uvgBu+BNc2OHkzm9HPnk0J4Dl7tm2zRbaQUpNhEuUZxfLsPADG9/Lr+gvR1OHqpMHVR0OYT8Mg+K7EfG74aHezxwhw15G+6C7M+fm4DBrUsQGA8jxYfjVUFoJPdLMnosKF6R3khWzwQdLlcaIggUC3QCWDJ/OAstiJCNp2fhYznPhZuV+TfXUk7wgbUzeiU+uIlocB0DOg/vdVlD7KFrSdFDYJ3P2VhR+St3aLEgmgBG6n9g1iT3IBOaVVnfbi3HlJ3gJVxUpphPDRlG76H5X79qMaOaRTBG2t1CqJGf2DmN4vkH2phXywJYlfj2ez8Zjyb1iEN4snRDO1T2CbLrjZnbgOGULE0iVUp6SgDayddVK2bTuuw4ehcnGBiDFUugShq8iisR+jr8lCtWsQUuTYDuy50KaqiqD3TChKrZd04qiZtmfKzgBNB22HhERhLh2AqdqXjKObiDr8HrgHwRXvd2Q3hbZQkKQEbNVO0GNE2x5blrtMxm2Iewj/N7iJC3dd2fEfoSARnL1g2CJ796bLE0FbB2ML2pZ0r6Bt6bpV5Pzsj3tPPYGqxms/WQc+iTnl9bZr/P1RubpiKS+nOj4e16FDG9tdaIneM+Gm1bDiWkjfDZ/NggXf22p0nS9Jo8Fv8e34Lb693vbs8mwqTZVoJA09PHoQn5MCNAxkdGctrvMY40fOtKmYcnJR6Towy9ZYBV/Ng8JkZcXQG75qswxsoXH+7jpUxnDMFmeyiishDCWD57dna4JCJeDcSWobC42TVDD/Gzi5DiKUgMz40PFoVVqiPKNYu1upg332CX2jF2atJRKOruwWQVtQgoajY3zt3Y22Z11Bvu/loFLjf/fd7Fr/GXdn/puxO47x3Njn7Nu/s0iSxIhIH0ZE+pCQU8pHW5NZeTCD/amF3LF0P9H+btw+PporhoTirBW17M7mOmIEriNqgxWGtDTS77oLjbc3UatXIXl584zxJl7gFcxmqMpzwlSlRuNsxsXPACp4xngT/0aF+Ol2UV7hcN1SJchUh3VWYKy/44xnAT6f8Tl5lXm4aRsvX+ek0RBuupMTuaUUlJqJSvxdSRjpQkE6oUbKduU2dLhSl7YtlOXAtjeg5DRct6xtjimcn/AxMPZ+cPFW6rMLF0QEbR1MtGc0AOkl6RgtRrQqrZ171Daqj/+FoUSLydR0jVprrbXkvHLMFtmWlSNJEj3efhttaAgqnUjdv2Dho+DmdbDsSsg9Dl/dAIu3tMtgyhqYCNeHYzKpSC+sAKBnoONkJrSmzmPw0093bOcsFlh1J6T/Ac6eMP87Uce2A0iSRKy0mIMpRbiP6ads9O8Nvj0hPx4SNkL/xmt/C52EJEHoMOVfjd4+vento2RivZG9A4BegWdl2jYWtLWWSDhzqFuUSOjWZr6irB7vFQ6A2+jR7GQjuack/F1asCqmHcUGePDy1QN5aFovPt+ZwrLdqSTllvP4D0d4fcMpbh4byYKLIvB07R7jzvZgyslBGxiIU3Q0Gh8ffj+ezVdlg/E5czVzDu9ArqwdR2lczOwbFMcP/XNJX3sHL056inB9uB17L1yQOmPkwnKlHBZATIBjrb0hSRL+ruf+rIvxd+dEVimHTFEM07goJdlyT0BAxy7UKFygVGUcQ1vOFDCUw54PQLZA5iEIGdx2x24n+7P3E+QWRLBbMCqpGy066BEIU5+xdy+6jW70lyG0RKBrIC4aF0yyifTSdHt3p8143fEkYc/fg8/tf2uyTaiXC85aFQazhfSCinrP6aKjRMC2LQX2hVs3QPAgmP3GBQVsK4/8RcW+fchywzzSurWvEnPLkGXwcXOqWRDMcVjrPJ5dIzfI05n3Fwy1X53H359VsvtUWuWKdxP1poW2Z71IlVC3hndcTWmR42vt0COhrciybPu9nn2Byhq0TSpOqv3MdPeHKU/DvG9B333KxnRLGifoNQ0C4mybkorPvTBPZxOgd+aRGXHsfHwKT83uQ4inM3ll1by6/iSjX/qNZ9Yc5XRhRfMHchAlVUaOnC7mxz8z+aTYkw9ufYl/9L6C4c9v5JYv9jHh9EEu/WMnlsr64yhTpYpBu+MZm3WAw4W7iC+Mt9M7EM5bQZJS7/8s1jJuoV4uuDqJ/KqzRfu7IWlKOJibDGE1ZWOsWZtC1yDLkFITtI1ow6CtT1RtUsL2N9vuuO3EbDGzeMNiZnw/g4yyDHt3R+jExDeBg5EkiSjPKI7lHyO5ONmWedvVacJ74R7e65xtVCqJaD93jp0pITG3jEi/xq9eW8rLUbk51pXtduEV3jDDtqpYybhshbx336Vs82b8H3gAvzsW13uu7iJk8TmNTxd2FK2p82guK0eurEDj346ZW9VltTU55/wXoia032sJDVj/H8TnFNdujLsUdvwHKvLEVMLOLOc47HxHKWvQ8xIASgwl7MzYSbRXNB6qMMqqTahVEpG+9b+rIvQRSEiUGEooqCrA16WmhEA3KYvgSMr/2IPK1YX0fCVoG+3VtcZr7joNt42PZuGYSH46fIb/bUnkRFYpn+1IYcmuVC4dGMziCdH0C2ndmKArKq40kppfTnJeOan5FaTklZOSr9y3ZlQ2ZEAlW/jboe9p/JNaQkLmls1F7O8nd7uyZw5hx1uw/3OY+ChMfsK22XpRLsbBxrM/Jf3E+pT1TI2YymUxlzXZrlK7H/eeL7KnNBZixilln1J3wMjbm9xH6GQKU5QSBipNbeC9rYy9H458q9Txz08E35i2PX4byizPxGAx4KRyIsSt8TV5upzT+5WknfEPQ9R4e/em2xBBWwdUN2jraGICaoO2U/rUXwzJmJlJ5qOPYTh9mthfNyKpRYWwC1Y3KJR5UFmcbOYrMPDaFu0uyzKagABUrq54TL2kwfN1g7bH45VBbi8HKo1wtpbUeSz44guyX30Nr6uuIviZf7VfZ3TucOt6pSbn4Bva73WERoX5anGNfp0thgLKjTuU+nChw+CBo+DZw97dE87l2I9waJky5bMmaHs8/zh/3/p3IvQR/L3fZwBE+rripKk/YcpZ48y1va/FS+eFJILyXYfJAJ9cAtGTYMIjoHMn+6WXqD5+nMgrVeT0VhGl7xqZtmfTqlXMHRLK5YND2Bafx4dbk9iekMfqQ5msPpTJuFg/Fk+IZnxPvy79N1tcYSQ5v5zU/HJS8ipIyVcCsyl55RRWGM+5r7+HjkhfVyJ83YjycyPC15VIXzf8E/+iYHXVOfaU8Csz0iddRXJPxxvTd2l1F5sMu6jeU7ZFyPwdazx7MOcgm9I3NZtQNCioJ1+lSlSZjLVZmik7xMXorkSlhovuhOpScGrjRKmg/tBzOsSvVxIV5rzdtsdvQ9bz2AjPCNSqbhJ32P4GJG1WZnaJoG2bEUFbB2Qd+HeXoK05K4WS9/+JU58BuF3/93O2jfFXvhjOXowMQO3nR/WpU5hLSqg6dgyXAQPapb8O69CXUFkIP9wO5bkw+u5md5EkieBnnyHwicdROTs3eN4WtNVH8WN2zXRhB1qE7Hxow8LBZMKQmto+L2CoqF1ozMVbBGztpG+QL5K6EiQziYVJDAwYACqVCNh2BSfWKLd9LrVtqvtZF5+tzCo4u56t1VOjnmr8uLkn4dBy8I2FoTe1XX+FC5e0Gc78CaVZMOVpZFlGGxREVWY6icGVBLoG4qrt2gs4SpLEhF7+TOjlz18ZxXy4NYmfjpxhe0Ie2xPy6BOs544J0cweGIxW3TmrtxWWG2wZskrWbDnJ+RWk5pdT1ExgNsBDR6SvG5F+SnC27n13XeOnY8UHi1rUL++y7jOmdxin90F5Dug8IbJ+YMMWtHWwTNsrYq8gyjOK/n79z9luUtRAypY8B7KGAu8B+Kh1ys8yPwH8enZQb4UL4hUOM19uv+OPf1AJ2h76EiY9DvrOmcVq/dzuLjOfyTkBJ9YCEoy9z9696VZE0NYBjesxDheNCwP9B9q7K22i6o9fyfp6L1r9XmKbDdrW1HnMLWvwnMrJiZDXXkMXG4M22E41QLuzGS8pK6L/8T6sf0JZ4fOSf7XoqnhjAdtSQym5lbkARHpGEp+zD3CsRcjOh9uY0cRsWI9TeDssWFJyBj6dBqP/Bhctbr690G7CvF0wZizCaHDH67JGMvSqSsDJXQnkCp1HYSpkHVE+K3vNsG2uW9c0IdV6gaqVn3WpO5XpuMGDRNC2szm2SrntMwdUaiQg7P33WHnqB/J3/pNRXaSebUv1D/XkvzcM4e/Te/PpjmS+2pPO8TMl3P/1IV5df5JbxkVx/Ygw3M4KZpotcotKAJ0vWZYprDDaMmRTagKy1vvFlecOzAbqdUq2rK8bEX6uyq2vkjl79ntpiZaWMCp0h9SaOtZdOVvZoZyoqS3fa5pSy7oORw3a9vPrRz+/fs2283DWEerpQUZRJYmFJnxiJoOpGoyiVrZQI3wUhI+GtF2w612Y/m9796hRdWeMdgs73lJu42aLdUzamAjaOqB+vv3o59v8l2JXIZWfwT2kCk1Q81fRrAOghJyyRge37uPHtUsfBZTg0IwXwT0AfnumtrbmpW+BuuFHkTErCyQV2sCARg9n/aILcAlAK7mSVrO4nMi0PTeVs3P7BGyry2DFtVCUBns+hCELajNuhQ6nUauIcI8jPqeM5LxKwn3qnPx9d4syBX/RTxB+UdMHETreiZ+U2/Ax4OZn21x3YL/DekLfRKatRbaQXZ5NQXVB/e/6PpfBTw8pGZ0FSeDTTTI7ujqToTaA0++Kek8ll6aAJHWfLJyzhPm48vRl/bhvSk+W7U7l850pZBRV8tzaY7z16ykWjIpg0dhIAjycWffXGZ5Zc4wzxbXlAoI9nXn6sr6tWmxTlmUKajJmU/Iq6mXLpuSVU1JlOuf+QXpnIv2U8gVKOQNXW2C2rReNch0+DE1QEKbsbGXq99kkUAcGciq8EJOxnNzKXAJcGx8zCZ2ILNf+n4+7tN5TFQYTGUWVgOMFbVsjJsBdCdrmlDFi3tf27o7QGhUFkH0UegwHrUv7vc6Ev0Pi7y2a2WkvdWdRdXlF6XDkG+X+uAft25duSARthS7P1bsc1wkFMHpes22j/NyQJGVRiIJyA77uuibbioyFdiBJypQVN39Ycy8cXAbl+XDNZw2+uPM++ICir78h4MEH8L3ttgaH6uvbl7VXrKWwqrAmCA9erlr83J0atBXamdmkBAKzDoOrH8z/VgRsO4HYAHfic8pIyCljYq86GVuSCixG5aRRBG07F2vQNm52vc11g7ansrOBput3H8w5yKJ1iwh1D2XdVetqn3DzU+qLJW2Go6uUz2LB/pI2K4t0ugcq2UHUjj+6XRZOE7xcnbjn4p7cNj6aHw5k8PG2JJLyynlvcyIfb0tmRKQ3OxLzG+yXVVzFXcsO8P6CofUCt7Isk19usGXI1l34KyWvnNLqcwdmgz2dbeULrMHZSD9XInzccHHquLqDklpN4BOPk3Hf/cr4qU7gVkZZiizoiScILf8vqSWpJBcni6BtV5B7QrlwptZB7JR6TyXlKuXbfNyc8HFznPFsVnkWe7P20su7F719ms/Q0+oP4RqxmlUpI7l+5NMd0EOhzcRvgJV3KLWcb93Qfq8TO6XB/6/Opu4sqi5v1ztgMSkLT/cYZu/edDsiaOugkoqSOFl4kv5+/QnzCLN3dy5M3inltgVp+M5aNT28XUgvqCQhp6zRoK0xK4u8997HmJFB+Ccft3VvBYChN4KrL3x3s/IBr2r4UWTKygaLBee+fRs9hEalIUIfQYQ+glUHMwDoFeAhAu0tYC4tJfc/b1H51xEiV6y4sEX3ZBnWPabUjtI4ww1fgU83GHx0AyE+Rpz8N7A6fTO38mrtE3GzlZV1T6yFqc+KhTs6i/I8SNup3K8TtC03lpNdoQRq3VTBlFZloJKUi5CNifKMQiNpcNG4YLKY0NT9fO07VwkSHlslgradxdGVym3fy5XFWYDky+eicnGh7OICcO4mJ3Qt4KxVM++icK4fEcbG49l8uDWJ/amFjQZsQQlcAjz2/RH+PF1EWkElKXlKcLbsHIFZSYIQTxdlwS8/t3qLgIX7uOKs7TwLwuinTYO3/kP2Cy9iysqybS/T+xD3/NPop00j6pcVtqDtRcHiQlynZ704Fz0JdPVnTDjqImR7svbw5PYnGRk0kk+mf9Jse293C+ryNE5XeNVuLM0GrTM4e7ZfR4ULl7JduQ3r4M+qTrZQXWFVIUXVRYBS5q9LK8+D/V8o90WWbbsQQVsH9cb+N9hyegtPXfQU18VdZ+/uXBA59yQSgF+vFrWP9XcnvaCSxNxyLor2bfC8pNVS9MMPYDJRnZiILiambTssKOJmwc2/KMF2tbbB02Hvv4chNRVtj+YXTjpVszBPrKhn2yIqFxeKf/wRS2nphS+6t/s92PuRcv+KDyBsRNt0UrhgYb46dH6/k1ytrh+8i71EyfApSFIyfgL62LejgqI4XVkkTKMD7wjb5pTiFAB8nH3ILlSCSZG+bug0jQeWfJx92LNgD1pVw89VUSKhkzEZagM4fecCYC4qovqUcjH61MVKpp2jBG2tVCqJ6f2CmN4viM93JPOvNcfO2b6o0sj7m5PqbbMGZq3ZskrmrBKgDetkgdnm6KdNw2PKFCr27efw4QRe2J2Loe9A1k+bDD8/QlT8JjZ76W1ZW0InN/puCOjbaHDRGrSNcbDSCK2dVdA/oCfrs6HUrCRt8P3tytTsy/4Lwxa2VzeFtmAN2kZ2UEnC0/tg84vQcxpcdEfHvGYLWP/mQ9xCcNG0Y5mIjqDTw+zXIHGTcjFKaHMiaOug+vv1p8RQgodT167/aSnK49QnZpzc/Yn8WzgtWVInxt+dTSdzSWxkMTIAja8vgX9/GF3vOJyiHOtEqcOFDq29L8uw+SXofxX4KwF4p4iIJnaE1/a+hoeTB1f3upr4mkFuLwcb5J4vSaMh4KGHUPv6oIu+gKDNmcOw/knl/tTnoN/cNumf0DaGhEQiH9MiqYycLj1deyVf56EMquLXK9m2ImjbOYQMgXv2QmVRvc3WQEy0Z3TtBapmPusaDdiCKJHQ2RjKoP+VykllTWkElacnMet+IfXgNkpKXsFd646/S8sWpOqOvFs4RXxsrC+TewfYyhr08O5agdnmSGo1bheNJDy2L2UHvqTX/i1UGyeg6zGCqGPLgNoggNDJaV2UxIVGOOoiZK0N2o4O6wNHwKwuoLiqAk/rhc7UHSJo25mVZEJhslKmq+Y7r92d+RMSfoWcEzDs5gYL/9lLtyp/pHFS1jIZssDePem2xLLRDurOQXeyZOYSZkU3PmjoKgyHtyObJUzVGlS+zS9EBrVXr60Do8b4LFyI26iLkMTK6h1nz4ew5SUsH0xHTv7jnE2NFiPLjy/nnUPvYLQYbb/Lnk0szCM05H39deinTkXl1vgU6xYJGgDTnocRt8OYv7Vd54Q2ERvggaVaqW/4V258/Set0++Pr+3gXgnNcvGq97DuwN52gepCPuv6zlVqT0vi+83uXH3gsv/AndtspREkScIpMpLUoUqN1ijPKIcu+xPg4dyidvdMVmriXtI3kNgAj24VsK0ryEXF25v/w/0HviblRDLEzSJKVnJwkgtO2rl3woVKyBVB25bo6RsEZhckSWZ3+kmIGKs8kbKj8UX7hM4hZYdyGzSw48pYDJ6v1IwvOa2UBuskulU9W6HdiRG70KXpRs8i5ttP6PHiUy3eJ6amTlRTmbaCnfS/CkKGUHDYQPzcmyj4zz+bbGo0G7l7yN1c2fNKPLV+pOYrCzf0FOUROpYkwZh7YNarnapOlKBw02nQyUEAHDxz1sl875mABGcOQfHpDu+bcJayHDBWNfpUSkkKoAzsE3KUTNvmPuu2nt7K9Wuv5+mdjSzQMngePHQSxt1/IT0W2lIjn58Xh1/MuqvW8fRox15kZ2SUD8GezjT1DSOhLBw2MsqnI7tlN2pXV1JCe3HAvycpqTng5EZU9CUAZFcXUm4st3MPhXP68V747VkoOdPgKaPZQkqe8vtzpKCt0WIkrSQNUGaUtIRKpUKHMr45kHkKwkYq62OUnIbClPbqqnChUju4NAIodY5H/Z9yf8d/wGLpuNc+h26RaWuqhk+mw96PlXJPQrsRQVsHV22uxmwx27sb503SOuE0YAyuM1uejm8dCGUUVVJpaPq9mwoLyf/kE3Jee+2C+ym0gJsfLFxLeXEQ5moVqoOfwOHGr4i6al25bcBtPDPmGZLzKrDI4Omixb+RheWEphkzMij8+hsqjx5t+U6VRbD2QWW1cysRsO20/HRKTeiTBWfVOnQPgFF3wcxXwOkCsq2FtrHhH/BKNBxa0eCppCLldxepj+RUdsuysGRZ5mj+UY7mNfJ/W6MDtaiOZXfZxyDtj3onkLIsk/PWWxT/9BNUGwh1D23RSurdmVol8fRlyoKkZ3/TWB8/fVlf1CrH+R7aftczPDn2Do46KesyeA6azzUlpdxTWo3ZVG3n3glNqiiAg8tg2+tgqmzwdGp+BSaLjKuTmhDPlmWYdwenS09jkk24aFwIcA1o8X6+TtbxTYIyjgmpKbmWuqM9uim0BWumrTUzuqMMvwV0nsri5Sd/6tjXbkKxQTmP6tJB2z+/hPTdsO0Ne/ek2xNBWwc2/6f5jFg2gpOFjjWdysfNCW9XLbIMyXlNZySYsrPJefU1Cr5YgqmwsAN76MB07oSv3kbYTX3w6FEOP9wGu98/5y7xNZlnvQLdHXoK6fnI+9//yHr6aUp+XNOyHUwG+OZG2PcJfHdL+3ZOaBMRemUweLospeGTM15UFmVw8e7YTgn1mY1w6hcwloN3/cG7yWIitTQVAC9tD4orjaik2hkjTbGeBKSUpGCRm8gqsVgg89AFd184Tzvfhk+nwe/P2jaZsrPJf/9/ZD7yqJjiW8eM/sG8v2AoQWcFsoI8nXl/wVBm9A+2U8/sw1oe5WRNjWuiJvLPaifuyMtGn7bHjj0TzunUepDNENCv0UUgbYuQ+TvWeNaacRipj0TVirI9ER7K91y6dXxjzd5MEUHbTuuqj2DavyFidMe+rrMeRt6m3N/2Rqf4fl0+azk7btjBYP/B9u7K+bGYYcdbyv3R93SaWsHdlQjaOjCNSoOM3KUXLsh/9FoKn78F8+lTrdrPesKbcI4SCc5xcXheeSWB/3gKlU5kcHYUydkV98e+Qz2uZoXP9U9Cbv3f77H8YyQVJ2G0GIm3ZZ6Jerat5TZ+PC7DhuEU04LpaLIMa++H5K3g5A5THHvKblfR1z8WgCJjBnInGKQKjUjdoWSuu/opUzzrKKouood7D9y17hSXKBnR4S1Y+T7UPRStSku1uZrMssyGDUzV8NYg+HAi5Ce22VsRWshUDSdqsn16TqvdbrHgPX8++lmzeHLfs7x76F0qjBX26WMnM6N/MNsfvZgvbx/FW9cP5svbR7H90YsdLmALtUHbhDPFyGazkjnf/yrlyWOr7dgz4ZxO1NSQ73Npo08ninq2rdqvj58yvik0ZigbImuyN61T8IXOJ2SIUlbNHskCF90FGmfIPKCcy3QCeic9WnUTC8d2dsdWQUGS8rsUi/+1OzE/zoFFeUZxIOeArRB2VyMbDeSuOYxskXC7PA91j14t3jfG3519qYUknmMxMoCQF/59od0UWkiW5drMApUKZrykTOF2CwD/+r/b53c/z5G8I7wx6Q1OZStf/D0dbJDbFvTTpqGfNq35hgBbX4NDy0FSwzWfQ/DAdu2b0DaGhfTk02QJs1RBflU+fi5+9RuU5SjBI58oiJ5klz46POticL1n2hajsvJz8WPNFWswWUws363UHm7JBSq1Sk2EPoKEogSSi5Pp4dGjfgONDnxjoDhNGXiPf6gt3onQUkmboboY3IMgrHYFbW1ICEH/eIqs8izWfjcVjaRh8cDF9utnJ6NWSYyO8bV3N+yuV5A7T/3xOcNyTlIydQmeQwZjGrqIzMDelIYMop+9Oyg0ZKiAhN+U+9aFQM9izbR11KBtS+vZWg0P6c2nCWCQsjCZzWjCRsHQmyBinDKTRCwmLdTl7g8THwEXHwi7yN696dpkGba/qdy/6E5RZq0DiE8zB2b9cuyqmbZydjxeseW4hRjQxg1v1b7WAZFYjKzzKFm7luSrrqZ4TU0AQ5KUQMLQG2sbFWcgGypqr8rro2yD3AtaTV04t8PfwqbnlfuzXoWeU+3bH6HF+gT5IBuVCxsnG8uo3PuxkkG956OO7ZigkOXajMs+lzXZTKPScCq7ZYuQWVmzlpr8ju83V7k9uqpFxxPa0NGVym3fyxsNLOjUOh4e/jCL+i9Cq+qiWThCu/F31+EqWXA2Gzm9cz8Au425zD78Ok/tfraZvQW7SPxdqWPrGQ5BjV/0rlsewZGcb6btsB4xyLIaSWXkSHYa6Nxhztsw6DoRsO2Mfn0GDi6H6lL79WH8QzD8ZmVxMjtacXwFizcs5qekzlFft9USfoOsI6B1g5HiwnJHEJ9oDqzZE7pOTlVxmqChJYRfE4SkbV0dlZgA5YpQQjOZtgCy0Ujp779TtHLV+XRTaKHi1T9SdfQohtTUxhuU5cIXl5K37HLKjGWoJBWBrqGk5Ct1iVsayBAakk0mDGlpjT+ZuhNW16y6OuZvMOLWjuuYcMH83XWoTIEA7D/TSP1ya8ZPwm9KJpDQsTIPQmmmMvCNmnjOpvG2C1StDNqWNPEdH3eZkjmfdViUSOhIpmo48bNy3xo4B2SLBcNppYyJt7M3C/st5L6h99mnj0KnJkkSuydfyx0XP8ypUcpF1CjPKHRqHS4aF+ROsjq6UIf14lzc7EYXb7VY5DrlERwna02W5fMO2rpqdWjM/gDsOX28zfsmtKHyPNj+hnI+YTbauzcKO5YMO5R7iF1ndpFdkW23PlwQa5btsEXg6mPXrjgKEbR1YNYvx9SSVMwWs517cx7yauqc+vVs9a7Wq9jJeeWYLef+0C7bsYPT/3c3Oa++imzsJF803VDIq68Q+MQTeF0xt/EGRWlQnk9Szp8A9HAN4nSBEYsMemcNAR6i7vD5qDp5ilOjx5A6f0HjNU91enDzhz5z4BKRwdPVSJKEpyYUgGO5CQ0bBA1UMn9MlZC0qYN7J9hqHPa8pNHMjzt/vZP5P83ncO5h20XGni2s393sbBo3X4iaoNw/tqpV3RYuQOKmRksjGJKTSbzkEhIvmSrqTwvN8hzQjzR9ECdzlIttIW4h7LlhNytc+yO9MwxKGqllLdiPi5dS+7GJerZnSqqoMJjRqCQifB0naJtflU+psRSVpCJcH97q/T01IQAcyYlXNljMcHqfMotI6DxSaxaHC+jXOYJ8B5bCOyMgL94uL39r/1t5dsyzjA8db5fXv2BT/gG9Z8Hou+3dE4chgrYOLNgtGJ1ah9FiJKMsw97daTVz+jHlIplfy2vZWvXwdsVJo6LaZCGzqPKcbd3HjUPXpw+ec+diqao6z94KzdF4e+Nz041oQ0Mbb9BjGNzyC8nuSj276MJMMhOPANAz0MOhVtptS06REcgGAxaDAVN2I1d8g/rDbb/BlR+K6WZdVA/3CABSG8u4lKTabNsTXXSaVlc2eD5M+ScMbbiIgyzLHMk9wuG8w1RWSxSUG5Cklk+dbdFsmn5XKLfW6fpC+0v4Vbk9qzSCITUNNBo0QUHsy97HqcJTGDtLRpLQ6Vgz7uNryqZIkoRKrVEW2ClIgiPf2bN7wtlmvAgPJ0D4mEaftl6Ui/B1Rat2nLFWUVURsV6xROmVTPHWGuw1jaozc1FV9VY2GMrgk6nw00PiwkVnklITtLUuFmdvJ3+G/HjY8R+7vHxvn95c0fMKenq3PvGsUwgfBTd8CZ5NnLMLbc5xvhWEBtQqNZH6SKBrlkhI/2gXJ78Poiy99fuqVRLRfjUlEpqpaytpNET98D2Bj/wdtYeom2pXgf1IHqgEGaIqShi9eR4DpcQWTxcWGlLpdEStXEmvnTvQBgUpG42VyrRtK30waF3s00HhgvX16YuxpD/Opr6NN7Bm/pz8BcymjuuYoCwGNv4hiJ3S6NOfz/ic1ye+TnWlcrEqzNsVFyd1o23PZv1+L6gqoLi6uPFGcZfWlEg4IkokdJSZr8DN6xrUgfO4eDK99+8j9LVXeWzrY1z141UcKzhmp04KnV2vQA8G58bTf+0SKg4cqH1i4LXK7ZFv7NMxoWlqTZMXvx11EbJY71hWXr6SlZef34XDiWETMRaNIqfAU9ng7FlbM9gaKBTsL2W7chvRSYK24x5Qbv/8Goq7XuKa4HhE0NbBdeW6tsaCKmSTCk3k+a2Ta81WSmxBXVuRxdl+Kg4cJOPBhyjf/UeL2idX5QEQ5RaMq6mIL52eZ6zWPtNbugtddBSSuiYQZLHAyjvhk+ligaJuYlSPgVRlLMBQMK7xBmGjlNV0KwsgbVfHdk5okiRJ9PTuybTIaaTmGQDo2YoTeletK0FuyoWYc5ZImPky3LIevFtXT1A4TyoVRIwGv9iGT+l0VPt6kFOZA7S+xqPgOHoFejDx9EFm/PUrhb8ppW02pGzgqtM/8ryfr3IhJlsE/e1OluHMn83Wz3TUoK3V+Z5n2c7lcstrN0bWjHVSt19ot4S2UFEAOUeV+50laBs2EiLGgcUIu97t0JdOKk7im5PfcDTvaIe+bptY+wCsexxKs+zdE4cjgrYOrtmFSjqx2J0HiF7xHroRl5zX/jH+SqZtYjOZtnVVJyRQsW/feb2e0Lii77+j5OefKV7zY4vaW/9Wo2a+wT71YHJkL/zqBu4tZkjepkwNTN6mPBZa7rdnlPqWsgXc/OzdG6ENWE8Ck3LLsTRWw1utUWpTSWrIESf5Hea3Z+Hwt2Aob7apdRGy2FbOKojSt+DC7MjblaluovxJ+2tBrdqUkhQA/Fz80Dvp27lDQlfl7ebEiciB/BQ5irwYZQxkkS2cKk7kpF5ZnElk23YCZ/6EDybAuyOVi+JNSHTQoO2F1u+O9ndD5ZxGoWoHZ0qKlI3WwKDItO0cUncqt/5x4O5v377UNb4m23b/50pguYPszNjJc7uf46MjH3XYa7aJwhTY/wXsfg9Kz9i7Nw5HY+8OCPZlDdomFSXZuSetJzm7ohs6+bz3jwmwZto2f8IMUPLzz2Q8+BC6uDiiV4n6f23FZ/58VDpn9Jc1vjhDXRXGCrLKlat7wV69mVj5EHpLCT+GRyoNjv0I6x6tX8dKHwIzXoa+c9qh991HwZIllHy7hICwP3ENAC5/tzZbQejSwn1c0aqhSs7naM5pBgSFNWw06TGY9lznWCDCEZRmwbbXlfsPngCnhgvPrEtex5nyM4wPHU98dsNFyGSzmYp9+zHl5qLx98d1+LDajPka0V7R7Dqzq0vOpul2TNXw/hiImgiX/AucawOyVSdOkPvWf3EbPZrksV6AyLIVmlc2YizveMcRHtaPQdQZ06tkZEA6/C1c/E9xQcaerLXi/Xqd8/dgLdUW6+9YZdjmrp6LTq3j1YmvEqGPaPX+Hs5a3MOXI6uL2ZY6hWsHjFNmMiApNUtLs8AjqO07LrRc3knltrNk2VrFTFFKaWQdhj0fKuPgDmAdj1kXi+0ydr4DshmiJ0PIEHv3xuGIoK2Ds35gJBUnIcuyQ5UBsE6paa6mrZXbmDGoXF3RhoZiqahA5erant1zGM59+xL0zyZqbZ7FmmXr4+xDYamWKosarc6fQL1OCdh+c2PDnUrOwDc3wbVLROD2HCp3bKQyPoMyrQ7Xax+CQdfZu0tCG9GoVXiH/0Sl8zaWHs3klaC/N2zk1UggV2g/1hP50OFKzehGrEpcxY6MHXg4eRCfo9Trs9bvLtmwgewXXsSUVTtFTRMUROATj6OfNs22bUbkDHp69WSQ/6Bz9+fMn0q2SUBfJfNWaHuJmyA/QcmsnvVavacq9u+nbNMmZJOJpP5K1qQ1S1oQmtIr0IMdCfmcqlmMLEIfgYREibmSAhdPfEtOQ9pOcQHWnk6sVW77XNZkk4JyAwXlSgmcmICGF/C6qwpjBUnFStKQp5PneR/HU+pNXnk+GYU1C0u7eENgf8g+Aqk7oP9VbdFd4XyNf0hZbNVUbe+e1CdJSm3b726GP/4HY+4Fp/Y/t7fNGO1KF2bLcuDgUuX++Aft2xcHJYK2Di5cH64M8AwlFFYX4uPcNbKsil67F0P8CTzm3oDLzJvP6xjRNeURrIMlHzenc7ZXe3nRc9tWVG6OM6DqbKxXJ6M8o2ozzwLdkWQL/PxQE3vJgATrHoO42aBq2SI+DiXrL7x0W3EdYcB9ygyY+Ki9eyS0sUCXEJItKrJKC5tvbKoGTetXcRZawRq0jZvdZJOU4hQAfJ3CyCtTannH+LtTsmEDGffd32CqvSk7W9n+1n9sgdvBAYMZHDC4+f5kHoJ9n0LQABG0bS9Ha2bo9L28Qcad2+gxBD7+GJrAIJKLfwGULGlBOJfegUpWZkZCGobTepx79CDEPYSMsgyS+87CV9aCqyhzZDf5iUrJIUkNPac12cxazzbUywVXJ8c5NXfWOPPj3B9JK0nDy9nrvI8z2ecBluxKpTqqzsXnyHE1QdtdImjbGXTWcmt9L4chC2DQDR224HLdc9kuY/f7YKpSEg0ix9u7Nw5JzJdxcC4aF96Z8g6rLl/VpWqnlWzeTf6WdKoOH2y+cRNcnTSEeikf0EktzLYVAdu2Yzh9mtz/vo0hPb3F+9QL2ubUmS6culO5CtgkGUoyausqCfUdXIqbTxHeU4ahXfCBcvVZ6FbGBFxG2YnnCJNvaLpR7kllAbqPpnRcxxxRVTEkb1XuN5F9VWmqJLNMKfNiMSg14EK9XHDVSGS/8GLjtVFrtmW/8CKyuZW1vPtcpgQWso4ogQahbZmq4eTPyv2+cxs8rYuOwmfhQvQzptd+z4lMW6EZPQM9uPbUb9z7vwfIe+99oM5aFb2nwJy3ISDOnl10bNaLc5Hjzll6yBq0jXGwerYqSUWUZxQTwyZe0HFqFyOrcy43/Ba4eR1M//cFHVvo5lTq2nJwHXDuU2IoIa9SuQgfqY9s99drE1XFsPdj5f64B8Q5op2IoK3AhB4TiPGKQaPqOld3PSMq8Iotx2X4qAs6jq2ubSsWIwMwl5VhOH36gl7b0RWvXEXee++R9fTTLd6nqKoICYkofRTxNdMBewa6Q1l2yw5gbVeWA1Ulre1y9zX9RZj2PFy3FDTnzjgXuqa4QF9AbTs5bJSbP5zeq2SnFHS9OuddRvxGZcViv17g17PRJmklacjIeOm8OFOgzA7oFeiu1LDNOseqvbKMKSuLin37bZsO5x7mu1PfkV1+js9JVx+IrjlxPipqtre5xN+hugQ8giHsoiabGS1G0krTgC6WhSPYRa9Ad1L0wZiRqC4qBuoEbUUda/uzBm3PURoBaoO2sf6OFbRtK7Zydzl1FpPy76XUthWzhuzr93/DF5fBqfX27knLXODCeM2xfi4HuAbg7tRF/r/v+1QZv/j1VhYtFuxCBG2FrsdYhaf/aYKHF+N80dQLOlRMTYmEcwYyzlLy88/EjxtP9vPi6u2FcB7QH7exY/G8quXTlv4x+h/8Mf8Prup1VW2mbaAHuAe27ADWdltfhZcj4MPJsOEpZTBRVdzat9C1Wcy1KxmrVDDmb5hNGkrWb6B4zVr79k1oc7WZKOdYeNHVp7b2ofVkU2h71hqHcU0vvmit8xflGWX7fuoZ6IEpN7dFL1G33Ut7XuKZXc/wZ+6f596p3xXK7dFVLXoNoRWsP9NGSiMYTmdQvmsX5tJSTpeexmQx4aJxIdCthd9rgsPycNaSFTuAqy99nvxHngXOCtrKMpzer9SrFjpWWQ6k/6HcbybQYVuEzMEybVccX8FHhz8itST1go4T4GXCLfYFcv3+Tnl1J6ub6ugSNiozizr7OVZlkXI++NFk5fyonXTJ0ggDr4PR98DER8SilnbUdVIrhXaTUZbB+pT1aCQNN/W7yd7daV5BIiCDs9cF18hpUSDjLLq4PshVVRgzM5ENBiQnkZl4PjwmTcJj0qRW7+eiccFgspCSp/zOegW6g8cY0Icoi47R2FVSSXk+YozysDAVZAtkHlD+7XwbJJWyimjkOJjydPfOOJVl+OVRqCyAy98DrTMAFfv2kXHffWjDwvC8rOmAktD1RPu74+S3gQr3eH5N1nFJVBM1qeIuheQtcHwtjPlbx3bSEcgyVNbUFT5H0LZeKZhjyqyC2AB3NGb/Fr2Mxr+23fDA4eid9LhrmwkIxF0Ka+5XMq3zEsAvtkWvJTSjbmkEa2C8jtJ1v5Dz2ut4TJtG8sOXA8q0SZUkTo6E5kWH+pBWZuZkdinDI31sCwwnFydD3in4+GJQaaHPnHNO0RfamIsPLPwRMg6AZ+g5mybmOGbQ9ttT35JQlECcTxwR+ojzPk6sbwCSuhpJsrAn4xSTowcoT2QegoPLlIVWx97XNp0WWq6qWFnkFCBirH370hyVGg4shaoi5cJ638vb5WW6ZPkjfYgoM9IJiBGhwJmyM7y5/01WnFhh7660iCnhAKZKFbJvzwuuqxJ7HuURdNFRRK1eTdTqVSJgaycp+eWYLDIeOg1Bemfly3bGyzXPnv03UfN4xku1i5DN/wYeOAZXfgRDbwKfaCWIe+YQHPuxfsB2/xdKAKuigG5j93uw9yP46wdI22Xb7DpiJLpevXCfNAnZYLBjB4W25q7T4OZegNolnd0ZfzXdMK4mIyj9j2bqRAvnRZLgptXw4HEIGdJks7oDe9uiiwHuuA4fhiYoqOnvPklCExSE6/Bhtk0PDn+Q/039H2NCx5y7b64+ED1JuX9MlEhoM8ZKGLZIKYvQY2TD59UatKGhuAwa2DWzcAS76lWzGJn1c8L6t5NZlkmldzgE9lfKsRxbbbc+OiS1BqImwLj7z9msvNpERlEl4FhBW7PFbMuwvdDPO5VKhU4OAmBfxsnaJwpTlLHu4W8u6PjCeUr7Qzm38o5q9sKF3ek8YORi5f62N9qtTELdWVSC0BoiaCsQ4xXDzKiZXBF7BXI713JpC4WrfiZ+dRBZ2y+8r9ZM2/SCCqqMLZ8O4dy7F5IoxH1ezGXlFK9ejaWyslX77crcxcJfFvLh4Q85VVPPNjbQvfb30HcOXLsE9MH1d9SHKNv7zqm/3TMUBl6rLNRx70EliHLlxzDp0do2FjNs/Ad8PR9eiYb3xyoZqsd+hPL81r71zuH4Glj/pHJ/2nMQM9n2lNrdjegfVxP05BPigkQ35OukrKx86lwLTXn2qAkmyrXZgULb04ecc5qZre6Zcxg5pcp0z56BHkhqNYFPPK40Ovs7qOZx4BOPI6nV59evfnPBKwJ0nue3v9CQi5fyWXvrhkZ/5743LyL2t1/xuflmEbQVWq1XoAcxRRkM+OglMh97HG+dN546T2Rk0krSlHEOiMBVJ5VUM9PPx80JHzfHGXdllmVitBjRqXUEuwU3v0MzfLQ9ADiRn1C70Zrdmf1X90q86CpStyu3kZ08y9bqojtB46Ik8CRtapeXSClOAbrId/yR72DZ1ZC+x949ERBBWwHwdvbmlQmvcMegO7pEINJcVAySjFOPsAs+lp+7E3pnDRZZyd5sLdliwVLe+v0cWen6dWQ++hipNy1s1X4nC05yIOcACYUJ9TLP6uk7B+7/Cxauhas+UW7vP9IwYNsYfQgMvAaGLKjdZiiH/lcpCwYhKwO/P/4H39wIr0bDqv9r1Xuwu9P74fvbARmG36rUKBIcRkTNSrWny5qpH2edti/q2rYti7lFdd0ssoWUkhTlgTEAgBBPZ9x1SkUr/bRphL71nwYXVtQ+PoS+9R/006Y1etzi6uLmL8wOmgf3/QkXLW62n0LbklQqKk2VthXVBaElegW6IyHTN34fpZs2gSzbpt4mFydD/6sBCdJ2KqWhhPYXv1G5wH96X7NNE3JrkhAcbBGy5BLlAlWEPgK16jwvMtYR5qGUV0gvTand6O6vLJ4EkLrzgl9DaKWUHcptxDj79qOl3HxhWM256fY32/zwRrOR9NJ0AFsZm05LlpWM44SNkLTZ3r0REEFboQsK+uhneu/9A++HX7ngY0mSVFsiIad1wdeSjRtJnDad7Ndeu+B+OBJJo0HbowceU1u3iNwlEZfw0viXuLrX1cTnKIPcngEeDRuq1BA1HgZcrdxeyGDQWQ+Xvgn37IWHTsHVn8GI28A/Tnnes0dt24oCeG80/PSQsvp6WcsWDOowhanw5XVgqoSe02DmK01OsZZlGUNqKrK5/YrxCx2vj28MAIWm0800vExZOGXANR3QKwdyei+8EgPf3nzOZpllmVSbq9GqtBSWKN9PsYH1P+tcR4xArllwRddH+Tzyuu66RgO2FtnClG+nMO6rcWRXZJ+7j2rNBZcdEuo486cSwDE1Xm7m7CD665NeZ+/8vUwOm9xoe0E4W2yAOymewXzcbzYerymBBmvQP6k4SZlVZK1hfuRbe3XTsfz5lXKB//iPzTa1LjQZ40ClEQCSitp2mngfX6UGe4HxrPGNdXHV1B1t8jpCC1WXQeZB5X5XybQFJZlFpVEWTzu9v00PnV6ajlk246pxJcA1oE2P3ebiN0DOUXByh5G327s3AmIhMqGG2WImsywTGZlwfbi9u9MslXvbTd2M8XfnQFpRq+raAqhcXTGePk3Zli3IFguSWFGxRTwvvxz9ZZchm0yt2q+HRw96eChB0ieztwDQM7ADB7kegdD/SuUfNAzKpu2CnGPKv70fK9v8eiuDlchxEDVJuYprDxaLUuKhPBeCBsDVnyrBmUbIskzyFVdSfeIEkd9+g8uAAR3cWaG9DAvpxRdpYKKMwqpCvJ29G2/o3xtu+LJjO+cIjq9Rakuqzj30sk6Rj9BHkJBTATScVSBptQT/+3mMZ7JwHToEQ2oqbuMbX1xOJalw07oBShAnyC2o+b6aDHB6T+0Jr3B+dr0Lh79WFvWb9nyDp/PeeZeS9evwXbQIr6uvBsBJ7ThTpIUL5+qkIdTPg++lyVwV0pMQlYrLYy9neNBwBvkPUhoNvE4JQhz+GsY/JC7MtCeTQQl4AMRd1mzzBAddhMyaadtWQdthIb1ZkgRVUhYWiwWV9Zwscizs+wRStrfJ6wgtVFkAMRdD6Rnw6vxxBRuvMBhwLfy5Ana8Cdcta7NDV5orGeA3AFeNa+ef2WzNNB5+M7g0ca4gdCgRZRIAWHJsCbNWzuKdg+/Yuysdznp12zpwaim3UaMIfeN1Yn76SQRsW0lSqVCdZ81Uo9lCcp6SFd0zsJFM247i7q/8s4oYq3y5X3QnBNYEOvNOwr5P4btb4GSdqeYVBVCS2XF9VamUhdgC+8O8b5SC+02QJAltj1DQaqlOPEftU6HL6Rfsj8XoBcCpAvG77VCyXFtuos+l52xat66p9Xup11kXqNTu7nhddRX+99yN25gxeN9wA049ejQ4llW96dLNMVbBm33h89mQl9B8e6Fxxio4+YtyP67x33nln39iSEjEIhZ+FC6AddaRtXTUsMBhzImZQ4RemTJOn8tA4wzVpVDWTLa9cGFStkJ1CbgHQuiwZps7bNC25ruoraaJj+zRC1lWIamqOZ6bUfuEdWp+1hGoLGqT1xJawCscFnwHd3bBYPm4+2HkHTD9xTY9bD/ffqyYvYKPp3/cpsdtc6m7lEQktROMutvevRFqiEiTANRe6bRe+eysKn76nNOXD6XgX62rh3ou1sXIWptpK6nV6GfNQuXi0mZ96c5ks5nKI0fOa7G7EkMJy48vZ1fmLlLyyjFZ5P9n77zD46iuPvzOdvXeJavLDRcwuJtmg00H0+ELHUJvCSEQWkgwIRAwEAiEYCChVwOmGUx1ww3jbvVi9d61db4/RiO5SNaW2SJp3ufxs+PdmXOP5dXMveee8zuEGLQkR5i84KmbBEVKC6NTHoMbVsMfSuDCN2DmjVJ2a/p+5UHb34Mnx8MzR8Int8Cv70DrECXrnpIxF377k6TdOwSJf/oTY39eT+TZZ3vXJxWfEhdmRGNNAGBz1d4hzgaaSqRMwUFKu1VcoG43NJeA1gjZ8w97alxwHLOSZjElbkpfECZnICkYF+h7xjsTtNWbIHGydLzrI4/GHdUUfSsFb8KSIXX6gKckP7qE1OefJ+yEE/ik6BMu/fxSXt+lXGaPyuhgbGIoOoeN5vUbaPngg0NPMEXAtd/CHTshzIlMexX32b1Ceh176mGbTYKUhFDWKFVTjNagrVKZtqFGEzp7LAA/V+zu/yAsAWJypAzKlnJFxlJxgUDPKB2IuLFw6t+l78xoZPWT0uuUiw9t7q3iN1R5BBWg/6FZ2lqKQ3SgEQIznt+zeR3te7tBk0+0QjbliVJxfScOh4hG494DRhTFwC938COd69dTcfU1BB11FOlvvO7Szyq/KZ+/bfgbKaEp3Jr3CiBpPAb0zzs4WsqoGyirrrUCBA00FUt/tvxXej8qQwqunviANNF0FYddarbQUStledTugMzjIGGC9LmTGeH6JPUhPRIRBIFwXQpt7GVH/RAZlA4HLFsofZfixkLOAt84OVLZ07uQzz4BjIdfnJ+SeQqnZJ5CW4+VB9ukMtuDF/Sd69djSEtDl5yMIAjYOzro2rAREAk78cRDbLoUtAWYeA4UrYKdH8Oxdzl3jcqB7FouvU44a9B7ry4ujrATJf3a3Rt2s61+G0fGHekjB1VGCnkJYYSbO1n4779QrdEQvmgRm9t3UdhSyMnpJxMTFAMJE/3t5sjH4YC9n0vHQ1RUAJQ1SkkIwYGWhOBlmnuaaTG3APRngytAmC6ZFurYVl8A7Kfvfu230saFim+wmaGzQdLTHgk4HE6vnw6H3WFXpOmeV6nZIcm7CBqYc5u/vVHZDzVoqwJASmgKOo2OHnsPNZ01JIcOnY3nD4KT7MRPbUV/jHI6e2lRQei1At1WO1Wt3aRGBbt0fef69TS+9B9CZs0k5pprFPNrpGEtL0cwmTCNG+tysHV/7av8WqkJWd5wzko4+a8w7/dQvh7KVktaW9W/QnOplHF7yn5N9nZ8IE2A0udA1GEmt7s+gS/vPlR2QRcEt25xKsNWZeSTEpJOmxXKhqqq0GikTKHNr0hl/WrQ1jPkoO0gZfIDIWfZJoabiAjS970vWiyUX3sdWK3krPoGfUoKHd99R9Vdf8A0caIyQdtxp8GK26F2uySREJvjtN8qSNIIe3qDNxPPceqSS8ZdwpT4KYwJG0b6fyoBQV5CGE1BERREj+HIaWOxt7ez5OclFLYUkhaWxtyU/ebMdpuUAR6sVOqDSh+Vm6SNTmM4ZBw75Ol9TcjiQgM7CUFh5OdQckgyQTrlqhWTgsfQ0rWVkt4mZ32oAVvfUr4O/nuWJE1x5WdDnx+o1OyA7x6ByHQ45W8emRJFkePePY4oYxQvnfySc70F/EFMjtSAu6kYepsXqwQGatBWBQCdRkd6WDpFrUWUtJYEbNDWpK/BNK4TFhy+vNQVdFoNGTEhFNR1UFTf6XLQ1lpZSeeaNVirq4m++upRNfFyhaiLLyb89NP7Op67wv5lVAUF0iTXp03IvEFQJIxdJP0B6GmTgrjNpWAI6T9vzTNQvVU6jhjT39gsfY6UmSsIUsD23cuAAaQnbN2wbxNMONMl9zrXr6fptf9iGj+OuFtvdf3fpxKQ5EZnsbsW6nsqhj553Om9QdvP4dR/KJJpMCppKZc2ZQQNjD3lsKda7VZ67D2EGcIorJM2qA6+19mamjBmZ2Orr0eXLD2rg2fMQJ8+BtPkSQM2xpSDtvXd9bRb2gkzDCG3EBwtZekXrZIkEtRsW9co+hYs7b3SCMcMeErb559jb2sndN5c9CkppIWnkRY+SssxVTwiKy4ErUbg1mNvZf0989FHmJiZNJPUsNQDg2K7PobPfg858+GcF/zn8Eilsx7CU2HMTNAN3bdhtOvZKiWNIJMTmcXuLqgbbH7jsEv68oM04lVRiNI10utwTxbprJcy53VBcOzvISTWbVP13fW0mlvpsHQQbQrgDTO9CY6+yt9eqAyAugJT6SMrUhKDL24tHuJMP9KQL73G5ipqVp4wFbnYjAwgbOEiYm+6ibQX/qUGbIdAGxaGLtb1h94BQdtaOZDhxyZk3sAUDnknw4zr+t8TRSnDMfUYELTQWg6/vgUf3wTPTIX/LJAmoV/ezYABWwAE+PKP0nkuYG9upuO772j/+ht3/0UqAciRiXkAdIv1mO1DbKBkHitlDHXUQOVmH3g3QjGGwalPSPrWQ0z6dzbuZPZbs7l4xcX76dkeuKDXJyaStfwjcn/8oe+Zo4+PJ+err0h68MEBG2OGGcKIC5IaJ5a2ljrnt5whunO5c+er9FPWu2idePagmx1Nr79BzUMP0bVpk+/8UhmRGHVaMmKkhAO5Gunu6Xfz7InPMi1hv2ZYYUnQWQe7PwVLpz9cHdmMOw3u2CFlqjnBaA3ayutMpYO2UxLGAtApVh/64Yo74bFMaUNNxbvIz7+MOYc/L9DJOh6SpkrJLz+/6JGpuKA4vj7va15e+DIGrXuNuL2OGz1nVHyHGrRV6SMjPANwoXzSxziaa+gqacZmFiA2T1Hb7jYjA9CGhhB3y80YxqgljYNhb2nx6Hr5O5kWmkFJg7TQyB0Nk1xBgPn3wzXfwB/L4f8+hLl3QtoM0OgkuYSytYdKIhyACG2V0nkuEDxzJnG/u5OkvynbPVXFv0xNTkO0m0AQKW0pPfzJOgPkniQd7/nU676NWIKiYPq1sPCRIU+taJcyhMIMYeT3LuhzB2lCJmhd00ZzueHouNOk+0ztDmgscmmsUc/Jf4Xr18CM3w56SsjcOYTMnkXQlClUdlTy2s7XWF+93odOqowk8no3svNr23F0dQ3c9DX1GKlCx9IBe7/wrYOjBUGQNuGdoLC+Xx5hNJEdmc28lHlMjpusqN3jMibTVXEF7SXX0mG2Hfih3QzmVkmSTMV7WLth30bpOF05KUO/IAgw707peMOLYG73wJRAYkjigZtogUR7LbwwV+qx4nD42xuVAVCDtip9uKx552PMG76jbFUsJSuTpMwlBcmOl8rRC93ItFU5POaiIvLnzmPfLbciuvEg6LZ1U9UhBSX19gSsdrlpg3I6WMMCY6hU0rjgQbh6pRTEPfkRST/NGZw9rxddVBSx115L0ES1eclIIj0mBGvjAnqqz8Fuc+I+Kmuw7l6h7sL7gDOyz2DtxWv58+w/Uyjrd7soBWOtHfh3XX7GFx+s9zcYwdFwxjNww1qIznLJh1GPIEDiEVKAbBDibryRMcuWYcjI4Ne6X3li0xP8a+u/fOejyogiLyEMQXSQ9+db2Xv0MVgrqxBFkcbuxv6TBAEmXSAdb3vHP46OVFr3SXrBTuJwiBTVSUkIoy3TdnHuYp5f8DynZB5eLshVEsPDiWIKojWG4oOTcOQAoly6r+Id9m0Cu0VqhjwSNFHHnQExudDTCpte8bc33mP9c9IG/Zb/Sc8JlYBDDdqq9JEVIS3KAjVoa29pQBcKxjjXNGedoT/T1v1ysZ49e6h5+C+0ff21Um6NCDrXrAWbDdFqHbBsdyjK28oREYkwRlDbLOlQ5caHotGM8oeKIQTCk6SJkTM4e57KiEav1ZCmXYS1ZQYNrfqhL8hZAFqDlK192IxulQEp+g42vixlMThJmCGMUF0sVa09wIELelEUKVl8LhU33Ii1ru6A60SLhaKFiyg87nistQd+Bm5uzB55qdR1Xp3EO4+LUjRwYLNNFRV3GJsYhiho6LHYweGgY/uvzH17Lse/ezwtPS39J07uDdoWroKOer/4OiJ55//giRypsawTVLV20221o9MIpMcov64ZrWTHSUk4h1ROyqX6Vb94lDGpMgR90ghzR8a8QaOBObdJx+uekxpDu8G/fv0Xz2x5hoo2J/pJ+JruFti4TDqee8fI+H8bgahBW5U+5MVCY08jreZWP3tzKKEX3kLupt2kfbpOcdty0Lahw0xrl9UtG+1ff0Pzm2/S/OabSro27Im+7DdkrfiUuDtud+v6Pj3b8EwK6+WshBGmZ+sJ6bN7xf4He8gKEJ4inecios1G54YNNLzwwsCllirDkkEXNQNhCofLP4U/FENEipc9G4FseAk+uxM2LXPpMnkDMS7MSGRwv/6Zra6enl276PjhB7ThB5bgCgYDmtBQ0Gox79l9iE2X5RFUXMfaA08dAR9cI2XmDHZabR2irT8rT85+VoO2Ku4iZ+QvnXoeWd9+S/QppxKsl4KBpW2l/SfG5kLyUSDaYeeHfvB0BNK6TwoGdrc4Ld8mV/ZlxIag146e5XiXtevATQSFiYmpwxC7ilXlByXQRI6RmvmKdqj42Wvjj3rkTYv0Ya5nuz+TL5SainbUwLZ33TLxQf4HvLT9JRp7Goc+2dds/I/UODVuPOQt8rc3KoMwep4SKkMSrA8mIVjKxgvUbFsAQa+8gHeIUUdShAno15hylYhzziH81FOJueYaJV0bERhzcjCNHevWtfs3LMh3s1x4RKPRwqLHev9ycOC29++L/iad5yKi3U7FtddRv/RpzAUFHrmpEjhkxBrQBpXyY6WTDTnGzJQyu1Vcw9LV3/Rk/OlDnm62m7lu5XUs+XkJu6qbgEPvddqoSNL/91+SljyCxmQ6xEby438n7+f1hB533CGfydU0FW0VWB0ubE6W/wzvXw1r/+n8NaOVolXQXiVpiBsG31ys+O1v2XvMdDo3bADUTFsVz0mPCcGg1bArOJE6UwQgbXbDAHP6yRdKr6pEgjLs+Vx6TZsBofFOXSJvzOWMMj3bHyt/ZN4787j+m+u9Yl8wlWCM+5odLT8c+mGGKpHgdaZfB0dfJTXxGinoDDD/Aamh7KTzXL6809pJbZdUbRVwz3hLF6zvlWWae/ugjVNV/I/f/2eee+45MjIyMJlMzJgxgw29E9jBWLp0KWPHjiUoKIi0tDTuuOMOenp6fOTtyCfQdW29iSfNyAAMqSmkPPkPQueMoN1FD1EiO7Mv0zYisy8zIVcN2h7IhDPhgv9Kcgn7E54svT/hTLfMaoxGwhaeTPjpp/d1qVcZ/sREdhGc8QKbu57HIbqoM61mXDtP0Sqp63DkGEg4YsjTy9vKWVe9jhVFKyipl+Y1Bzch0xgMBB9zDJFnnz2gDWNWFtrQge+PCcEJvLDgBT5f/Dk6Qef8v6OxAHa8D1vVKpIh2blcep1w1qCLH9FiwVpdjdjdjSE9HbvDTllrGRCACzqVYYNeqyGrt4qioE7a4M6KlDZq5M3vPo44F2beCKf9w6c+jlj2rJBendick5Hns6NNz7a2UwpexQXFecX+MYlHYm2ZhrVj3KEfyhIJZWrQ1mtMOBNOf2pk6Nnuz9SLpYayetf7qZS2lgIQbYomwhihsGMesvUN6GqQstCPONff3qgcBhdm7crzzjvvcOedd/LCCy8wY8YMli5dysKFC9m7dy/x8YfuVL755pv88Y9/ZNmyZcyePZv8/HyuuOIKBEHgySef9MO/YOSRGZHJ+ur11HTV+NuVAxC7Oyk9+WgMcaEkvbwCTZTy+pw58aGsLmxwO2irciD2lhZKFp9L2KJFxN9+G4LBvQxpOWibHpZBcW9mwmDd1Ec1E86Uur2XrZWajoUmSJIIbmTY7k/K3/+ukIMqgcLRyTk4dkSjc8TRae0k7DAZgX388gasfx6mXSFNXFWGZs9n0uu4053SCBtog0rJBb0gCMxJcWNTceypoNFB3U6oz4c458p/Rx3WHtj7hXQ88ZxBTxMMBvLWrcVSWoo+IYGK9gosDgsGjYHkkGQfOasyEslLCGNPTTtNn31J9VsljJ8ZBQyQiBEaB4se9YOHI5Cupv6S8LGnOn1Z0SgN2l4+8XLOzzufHrt3Eq4WZB/DQ+93UKcVsNkd6PaXnsiYKzUkyz7eK2OrjBIcDmlO52Qyy/4VowGF3QZrnpGO59wKWif6XKj4Db9m2j755JNce+21XHnllUyYMIEXXniB4OBgli0bWPtt7dq1zJkzh0suuYSMjAxOPvlkLr744iGzc1Wc5/op17Pu4nXcMOUGf7tyAJbta+mph46CdoQI7+zO9uk81rnfjAzA1txM03//R/eOnUq4NWxp+/IrrFVVdK5d63bA1iE6+rTYDGIiFruDIL2WlEjXdzpHBRotZM6Tyncy53kcsFUZmeQlRtJZ9AdaS67EYT+0xH5AupukzrJyRpHK4bHb+gN445zLvpIDKxkRGeTX9lYVHLSgb3n/fTrXrsVhHrwZRvuqVey75VZaP/3UDccHIDgask6QjnctV8bmSKRolaQLF54CKUcf9lRBo8GYdWDz1/SIdLTqPVvFA2Q5lZAfv6blnXcYUyDpKo/G6jmfUbBS0kmNn+BSdqEsxTbagrYgyfFFm6K9Yjs5IgiTXoPVLlLR3H3gh1EZcOVncOxdXhl71LPtXShfDzaLvz3xHtvfh+dnSo0cnUS+/8oSVQGDRgtnPy9tMk+91N/eqAyB34K2FouFzZs3s2DBgn5nNBoWLFjAunUDN5qaPXs2mzdv7gvSFhcX8/nnn3PqqYPvbJrNZtra2g74ozI40aZoQg2BN4HQaZpJndtEwgkRCF7SW/FUHkGm/sknqV2yZNQ3JIs8dzGpz/2TuNtuddtGu6WdI2KPICE4gdZ2KRswNyEUjUYt1fc1tuZmbI0BKKCv4jKhRh2J4VKw1un7nZxBVLoaupu95NkIomwN9LRAcIykc+gEsq5pakg6lS3SYjMvoT8L2tHTQ/UDD1J+1dXYWwdvctWzew/tX39Nx3ffHfJZQXMB//zln7y9520X/jHAxLOlV7n8X+VQdn4kvU442yVduP2bbaqoeIJ8v1iTNpXoq68iYYakbb2vYx8W+wCBlNLVsPxG2LfZl26OLOSNTCc35wCaOi00dUr/H7KkhYoyaDQCmbEmNIZatuwr87c7owebBT65FZYthMZCf3vjPSo3Q8NeWP2U05fIyUcBl2krCFKCz/mvgiHY396oDIHfgrYNDQ3Y7XYSEg4sc09ISKCmZuDS/EsuuYSHH36YuXPnotfryc7O5vjjj+fee+8ddJxHH32UiIiIvj9paWmK/jtUfIO2q5yw1B4ij5/qtTGye3e7y5u6MNvsbtuJOPtsTBMnEnzUkUq5NiwR9HrC5s8n7IQT3LYRYYzg1UWv8s3531BcJwUxRmNWgr+pe2opBbPn0PS///nbFRWFyI6XFop7a5wMwMZkS5lEDhvkr/SiZyOE+j0gaCHvFNA6p0QlB++MJAIQG2ogKqS/SsHR3k7YggWYJk9GFzd4xUnYgvnE3XYr0VddPeAYL257kU+LXMzCHXcaaPT9EgkqB2Lt3k8a4ezDnlr5+7uoffzxvk2wviycyADLwlEZdshB2/ciJxL7u9+TNP1YQvWhOEQH5W3lh16w5X+SpuGvb/nY0xHEvN9Lfw4jiXIwsvxNSmQQwQa/KhX6lJrOGq788kr+tuFvXh3HEv0mIdlPsbL8s4FP2F/SQkUZqn6RNPyDYyBuAD3hkcKsm6W5UNlqqHCu0nt/6auAQe1PMezweyMyV/j+++9ZsmQJzz//PFu2bOHDDz/ks88+4y9/+cug19xzzz20trb2/amoqPChx8OTpzY/xTUrr+kTzg4IGno718fmem2I+DAjYUYddodIeWOX23aCjjqKzA/eJ/I81ztMqgxOvtyETNWz9TmG9HQQRaz7Kv3tiopChEUWEZL7CP8pHHzT8xDkTKI9CpXdj2Rm/BbuKoQT7nHqdIfo6JvYW7qlgOzBG1S6uDhSn3mazHffOWxjQNO4ccTecANBR0w85LPx0eM5N/dczso5y9l/iURQVH83aFUi4VDsVphzO2TPP6w0gq25mbYVK2h6eRmCTgrWqJm2KkqRFh2MSa/BbHNQ3tSFIAj9DYbbBpBImHyB9LrjA+k7rOI6yVNh/v2QMMHpS0ZrE7KiliI21W5ifdV6r46TEpIOQFlvhuMBNJfB3zPhf+dIm20qylDWGwRPn+1SpcmwIyIFplwoHTuRbWtz2AIv01YU4fVzYeX90gaGyrDAb79VsbGxaLVaamtrD3i/traWxMTEAa+5//77+c1vfsM111zDpEmTOOecc1iyZAmPPvooDsfAHbCNRiPh4eEH/FE5PBuqN/Bz9c8UtgROeUP7hm10N+pxeHFRIwgCWb0TKHlC5a6d0YxotVLx2+tpfvsdHBbPdI3sjv6M54JaqRuyrNmm4jvCTj6Z3J9+JOWJx/3tiopCZMbEodG102zd5/xF406TXgtXqYsdZwiOhohUp06t66qj29aNTtDR2CLd4/aXRlCKtPA0Hpr9EBeMvcD1iyeeDQlHQFiS4n4Ne0zhcNxd8JsPD7tgFfQGkh75KzG//S3aCKmLdEBm4agMS7QaoW9je29VK+bCQo6wSo2di1uKD70g8zipaWl3k0sajSqeMVqDtr66143r1RZutAwwv4kcI33n7RbYt8mrfowqStdIr+lz/euHL5hzOyDA3s+hdtdhT63sqMTmsGHSmkgKCZC5U9kaSYP/5xfVzbphhN+CtgaDgWnTprFqVf8kweFwsGrVKmbNmjXgNV1dXWgOmgxrtVLTBlFN81aMy4+4nL/O+StHxB7hb1cAEB0Oqj5rpvTrOKwW7wbd+5qReahrCyDabHT8+CPWQeQ+RiodP/1Exw8/UP/ssx4HsK9ZeQ0L31/I2sp1FDdIDeLUTFvfow0NOWw5tsrwY1rSWACstNJmcVLrPWkKRKSBtQuKDtVLVenFNniTsMGQuwunhadRXCd11T64CZlodX5yLVqtdG3ZQuuKQcpD3WHqpXDDGjjqN8rZHGVoQ0OIPPdc4u+4HYDmnmaazZJESXp4uh89Uxkp5PZubGuee5Li08/gmHVSJtWAmbZaHRzRWxW27R1fuTgysNtgxZ2w53Pp2AVGaxMy+Tnn7aDtUb3zmx6qD03qEgTI6A0slq3xqh+jBrsNKn6WjjPm+NcXXxCbC+PPkI7XLD3sqfs3mNUIAZKBLGcIH3kphCUc/lyVgMGv354777yTl156iddee43du3dzww030NnZyZVXXgnAZZddxj339JcWnnHGGfzrX//i7bffpqSkhK+//pr777+fM844oy94q+I5izIWcVbOWSSGDJzx7GscLXWYkoLQhYBh0myvjiVPoIrqOz22VfWHP1Bx3W9pefc9j20NJ4KmTCH+rruIve5aBL3eI1vFrcVUdVbR0a3HYnNg0mtIjQpSyFMVldHLpOQEHFZpE6ygaYAMrIEQBJhyMUy+UJ3oHY5/nwD/OQnq9zp9yf4l8gV9WVj9G1SiKFIw71iKTjsda3X1kPZ69uZTdsml1Dz4IKLtwIBCj62HvU17B9a4PByjvIpkUPZtgh0fgsX1eUOHtYMZSTOYHDeZYL3aCETFc8b2ZujnR6QgBAURKUjJCPI95hBkiYS9n0OP2qzZacrXwaaX4eMbXb60SM209eo4M9PGIooCaLspaqo99IT03sCiqmurDNW/gqUDTJEQf6gs04hk3p3S6/b3oXXwirWAkz+q/hUKvwFBA7Nv8bc3Ki7gV/XzCy+8kPr6eh544AFqamqYOnUqX375ZV9zsvLy8gMya++77z4EQeC+++6jsrKSuLg4zjjjDB555BF//RNUfIA2OpH0lb/4ZKzsODlo63mmbegJJ9K5dh2C0eixreGELiaGmKuvUsTWR2d9RElrCdV1cUAtOfGhaDRq4MAfWGvrqF+6FEtFORmvv+5vd1Q8JD7MiGBNAH0bmyr3MC1xqnMXnvgnr/o17Gkskpp1aXQQGu/0ZfLEPi00g0+aJU313P2kYGxVVdhbWrB3dqKLiRnSnmn8OAwZGRjHjcPe1oYuOrrvs+e2PserO1/l0vGX8sfpf3Taxz7MHVD6E4w9xfVrRyI/vwDb35MalCwcfD4qiiId332HaeJE9L3z3LSwNP5z8n985anKKECWVfkibjI3bdxASWc5LF9NSWsJoigeWgGVNAVi86AhH3Z/KmVfqQzNnt4qBheaTQJ0mm1UtvQ21o0bnUHbrAjvNl2MDApBa4/GoWtkXfkucmMPKkuXM233bZQqY3Sja52mOHLGcvqcka1nuz/JR8LcOyDzWAhPGfQ0X2WXO42cZTtxMUSrzU+HE35vWXnzzTdz8803D/jZ999/f8DfdTodDz74IA8++KAPPBu92Bw2ttRuobStlPPzzh9VGq19Qdu6joEnty4Qvmgh4QtPRjAYhj5ZZUCiTdFEm6L5506pEV2eKo3gNzQhIbR++inYbFgqKjCkpfnbJRUPEASBCF0ybRSwoz5w9MuHPXtWSK8Zc6XmXU4iN/4MEpIQRYgOMRAb2r+Q1CUnk/PDD1gryp16pghaLVlffD7gM6yvMdFgmXeHw9oNT44HcxvctAHixrpuYyRh7Ya9X0jHE84+7Km26mr23XgT6HSM3bQRjcnkff9URh15ib2ats1mrGhIC0tj2cJlgwcNBEHKtt36Fmg9q44aNYhif9B2/OkuXVrcW8kXE2IgKmT0rA9aza009jQCUqm4twnVJtNGI9vqCoD5B34YmwchcdBZD5WbpeZZKu4z8wYYM2v0BGxlFjw05ClT46bSae1kctxk7/szFI1FsOtj6XjuHf71RcVl/B60VQk8RER++/VvsYk2jks9joQQP5fB2m0u7WJ7QnpMMDqNQKfFTk1bD0kR7pfieyoNMBypW7qUkFmzCT7maAQFH9595cJqEzK/oQ0NIeHeezCkp6NLUEvjRwJJIem0WaF0IK3DwyGKULMdOmoh9yTvODdckRfy41xcyPdmY9gtsYD5kLJZQRDQJ8SjT3A+e3ewTUePgrb6IBgzEwpWws7lcPzdrtsYSRSukspCw1Mh9ejDnmprbsY4bhyCXt8XsLXYLRi0oydwo+J9kiNMhBp1dJhtlDZ2kpcQxjGJxxz+otm3wbzfqxIozlKzDVrLQRcEWSe4dGlhvdRUN3uUSSOUtpUCkBCcQIg+xOvjJQaNoa1nO8UtAzznBEEK1O76WGqgpQZtPUOrh7Qh7jEjHYcdNIdKdZ6bdy7n5p3rB4cGYO0zIDog92RIDIy+RSrOM8q2RFScQa/RkxomdbwesHGBj9m3eBYlx46na/kLXh9Lr9WQHiPpyhXVea5rK2MpK8PRqZy9QKRn924aX3iRimuuwdHmuS7a8sLlPLHxCbbWbSW/Vgraqpm2/iX6kksInTMHjZo9PiLIjZJKo+p6Kly7sOBreHGe1IRFbQLaT3stVGyQjsed5vRloihy2YTLWJy7mLZWKTv34CZknmBvbz+giZmsrVbdWU2Xtct1g3JG6a7lnjs33Nn5kfQ68ewhA15BEyeStfwjMt5+q++9sz8+m/nvzmdP0x4vOqkymhAEoU9apWLVj5RfdRU1Dz98+It0BjVg6wry5lzOfDC4pkVdqOrZ+mS87EhpflPbM4h2+7Qr4cx/wtSLfeKPygjF2g2rHoanpwa+JvjcO2H6dTDvd/72RMUN1KCtyoDID9XiFicb1HiR7n0d9NQBQeE+GU+WSCisa1fEXtU991K0cBFtX36piL1ARRMcTOQFFxB+1ploIyM9treqbBWv7XqNXY27+zSGc9VMWxUVxTgyUSpt73LUYnVYhzh7PzLmShlGreVSxpGKxN7PARFSpkF4stOXCYLAFUdcwZ9n/5nSeikILutSytQtXUrz229jb3ftuVRx083kz5hJ1+bNfe9FmiKJNkkat3L2k0uMOxU0eqjb5VKztRGHtRvye5/rQ0gj7I/Q2zjXYrdQ2VFJXXcdsUGxXnBQZbQib3BX1rXRuXYdTT9+z5Obn+Tdve8e/kJrDxR84wMPhzluVlTAfkHbUaZn62ttz8kJuQC0O6oGPiH7BDjqNxCR6hN/Riy/vgOf3i5lLI9GtEZJC7y1XGpMuB+t5laqO6pxiA4/OXcQUelw6uNStZTKsEMN2qoMiEflk0pibidtXh0pc5owHX2cT4aUS5aK6pXJjDVkZYJGg6XE/1nL3sSQnk7Sw38m+a9/VcSenOUdLCRjsTkw6jSkRqndtf2NuaiIxldfxVzs/w0dFc84Mjkd0W4AwUFFmwvZtoZgKcMI+hevKvst5J3Psj0YebNw/0xbe0cnjS+8SM1Df0a02VyypwkJBoeDnp27Dng/IzwDcPMZHxQlLXhBkkgYrRR+I0kjRKQNKY0wEAatgR8v/JHXT32dGNPQzeVUVJxF1rXdFJJC4kMPUfaH83llxyusLF05+EU2Myw9At44F2p3+sjTYYi5HexWELSQt9Dly9VMW98EbWelTQBA1DbR3OV5c2mVQdi1HDa/ApWb/O2Jf9BoYM7t0vG656WNr15Wlq3k5A9O5vbvbveLa32oFXEjAjVoqzIgfUFbf8sjNBRgirQRPi4cTczg3RmVRN79lrM7PSXq/PPJ+e5b4n//e0XsjQYsdgsV7VIQydotLWZz4kPRatTyPX9T99RT1P3tMdpXfu1vV1Q8JCM2BNEiaaRurnKxPFvOMFKDtv1MuUj6uYw7w6XLCpoLKGguoK2nm/ImSa5gf/1u0Woh5tprCD/tNHRRzjc3A4i76SZyvvuWmKuvOuB9jzdmJ54jvcryAKORql+k1wlnDVlabi4qouD4E6i6+0AN4AhjBFPipoyqhq8q3mdsb6b+jlYHURddyNjpC7l43MWcmXPm4BfpjJA2QzreNkRG7mjGGAY3b4Dbt0NwtEuXWu0Oyhp77/GjLGgrN9v0mTxCTALYpUSPtRWDzG+aimH9v6RMSRXXcdihrDfDNn2uf33xJ5POl3TtO+tg6xt9b7eaW9FpdKSF+blx809PwFuXQPWv/vVDxSPUoK3KgGRFSFpAfs+0bSiQXmPzfDZkf6atMkFbbWQk+hHeuKn1s8+wVg1SguQG5W3lOEQHIfoQapulDupKajyquE/YCScQMncuhox0f7ui4iF6rYYgIQmArbUFrl2ct1DKNKrdAU0ju4rAaSadBxe9AXGuPa/++cs/WfzJYl7a+iYOESKC9MSFGvs+10VFEf+735HyjydcdsmQno4+KemQ9/skkFrdzJgf2yuRUL979P7/z38AbvsVZt445Knd27Zjq6nBUlnpA8dURjt5vZs+pY2d9FjtZEVmce+Mezkz+zBBW4DJF0iv298HR4CU9AYqEa4nkpQ1dmJziIQYtCRFmLzgVGBitVv7EjHk9aUvCCIJURTYXls68An5K+HLP8LmV33m04iidif0tIIhFJKm+Nsb/6EzwOybpeO1z0gN1IFrJl3Dhks3cOPUoecIXsPSKWUA7/0M6vP954eKx+j87YBKYJIRkQFAXVcdndZOn3T6HIjO1T9iKwsiOCsNvY/GzIqT/q21bWbaeqyEm5Qb2dHVhWA09mnajQRs9fVU/eFucDjIWfUN+mTntRwHQ87wzorI6isly01Qm5AFApHnnkvkuQHSCVXFY+JNY6hgI4XNRa5dGBwtdVwu/UnKtpUnrCouY9QaCdWHglXKes5LCPV65qXHG7NBkXDufyD5SEknbbQSleHUaeEnn4Q+JRkc/WWKL29/mfrues7KPovxMeO95KDKaCQuzEhksJ6WLitF5XWMqdiLvbGJyHMXH/7C3IVgjIC2fVC+VtIvV+nH2i296oPculyez2bHe/8eH0jYRBu/P/r3lLeXExcU57NxZ4Xezse/tGBMmTDwCRlzpNfy9VKgTauGRVxCzrIdM1P92R11Gfzwd2gulSQjJp0HSM3d9RpfRTAGYMv/oLtJmqvIFVIqwxI101ZlQMIN4X2NMfyZbdv8zRaq1kXRXuo7PZZwk574MCnLqVghXVuA2r89RsHceXSuW6+YzUDA3tJC8LRpBE2erEjAFg7UviqQg7Zqpq2KiuIcFXMcXeVXkoobgfjxvTIAxd8p69Rww2aGtc9Co4uB717+ftzfWXvxWhydOQDkxB+4QWXZV4lot7vtXvf27VQ/8CANL73U956caVvWVobd4abtiWeP3oCtzezS6ZqQEEKmTydk5oy+974q/Yo3dr9BdWe10t6pjHIEQehrRla+LZ99199AzaNL2F776+H1y/UmmHiWdLztHR94OszY9i78PRu++bNbl4/WJmRBuiD+b8L/ce+Me30arJ6YmAaifvDKyfiJYIqUtMnV0nHXKV0tvabP8a8fgYAhBGbeIB2vedq/vsjYrdLcFGD2rWpgfZijBm1VBiUQmpEZczIISjVinHKMT8eVtaaK6pQTrxctFhxdXXT88INiNgMBY24u6f99jTH/fU0xm/J3Lj0so2+Se3A3dRX/4jCbsZSV+dsNFQ85MikXe+dYqhqMQ598MBPPgd98BBe/rbxjw4mSn2DlffDKqW6XFAuCQGHvJuH+G1Si3U7xGWew96hpWPa5V1pv3bePlnffpW1Fv/5wUkgSRq0Rq8NKVYdy0jajAms3/GMcvHkhdDe7ZcIhOihtKwV8p/GoMrrIS5TuI9sNsZgmTKDkyASuXn4pHxR8cPgLJ/VKJOz8+ICmOipIVSXWTtC71xR3/0xbFe+T3RscLxxsLafR9Accy1b7yKsRgsMBZWulYzUjX+KYa2D6dXDBf8lvzueiFRex5Ocl/vNn+3tS1URIPEy91H9+qCiCGrRVGZTMcP8HbeMef4uMb7YSctY1Ph2370GvkK4tQPTll5H+xusk3HuPYjYDCY3RjaDPIMg6i2HaZMw2B0adhrRo9ybJKsrTuWED+dNnsO/mW/ztioqHyItHt+51ofGQfSJo/Vj6FQjs6W1iMu5UaRHoJgW1h25Q2erqpCZXgoA+KdEtu8EzZhB9+eXE33lH33tajZb0cClL1m1dW4DiH6Tg5U9Pum9juFH4jVRuWLtTytIaAnNJCY0vL6N769a+92o7a+m2daPT6EgNS/WeryqjFvk+sre+i8wPP6DpzkvoNglDz+nT50B4Cphb+8ufVcDcDsXfS8fjT3fLhPycHW1NyLbWbWV34256bL7dBEiOEjAmfkCF8UksNtvAJ8kSCaXqd90lOuskmSR9sCSTpCLJhp36OERnUtRSxM7GnexpcrHJr1I4HLB6qXQ860apikJlWKMGbVUGJRAybf1Fdq+urZKZtob0dIKnTRtROlY9u3bhMLtWJjoUoij2fefsZknjMTsuFK1m5PzchjvG3FxEiwV7ayv2DuUkRFR8T3ZcCNqQPbQaV7Clepe/3Rl+OByw53PpeJzrC/lXd7zKGR+dwbLtr1La2Jtpm9C/oNcnJTF200ayv/jcbS10XXQ0Cff8kdDjjjvg/ayILLSClvruerfsAtBWCflfjq5u8zuXS68TzpIC6kPQuXYtdY8/Tv3zz/e9Jz/jxoSN8a/encqIRQ7a5te1A/vN6duGmNNrNHDWP+HmzZAz36s+DisKvwG7GaKzIG6cy5c7HCJFddI9frQFbf+24W9csOICVlf6Nps1OzYafcQWNMFF/FpTOvBJcqZt+TpwVypoNBKWCLf+AnfsVDfuB2B/mT+/kP8FNOyVNMqPvto/PqgoiipuoTIocqMSj7JwPEDsakHQG90W/PcEWVNwUB0kDxFFcdgHbx0WC2VXXgVAxltvYsxSpiNsbVdvBpKgo7ktDKg7IIih4n90UVFkf/kF+jFjhv33eLQTZtITFrcRe9BOVpVM4qikQRp2DIbNLOn7FX4D164C4yiTMancJGWcGCMgY57Llxe0FFDaVkp1WxsOMY4wk65PU11G0GjQJyUp5XEf9828jyVzl6D3ZME19lTQ6KF+N9TtgXjXgxnDCms37P1COnayqYc+JYWwk04i+Jij+96TA2eqNIKKt5CDthVN3XSabWRFZBHSLVIhVGB1WA+/WZB9oo+8HEbs6ZWXGXe6U5s1B1PV2k231Y5eK5A+yirHokxRRBmj+taVvsKg0xHZvZiaFh31rRoYqKghcZL0/LZ2Sbr0cXk+9XHYExztbw8Cj8Yiire/BeDz73wfWSfAKY+DwwamcP/4oKIoaqatyqBkRmQSZggj2hSNKPquEZhM0yO3k3/0FOpvO8vnY2fHS5m2ZY1dWO3uaRQORuOyVyg+9TTMxf4JhiuFtawMTVAQmqAgDOnKNaORdydTw1IprpNKqdQmZIGHIT1dDdiOEBINU7G0HI3DEu/6xVpD/45+4TfKOxfo7O6VRsg7GXQGly8vbS0FQLAlANK9zhu/V6IoYi4upuWj5X3vRRgjPAvYglQeKQd4di0/3Jkjg8JvJE3LiDRImebUJWHHH0/qs88Qfdllfe8Vt0jPfzVoq+ItokMMxIZKG0CFZXV0nH0ZLz9tR99jZV/7PucNuanTPaKwWSB/pXTsRkUF9OuqZsSEoNOOruX3vxb8ix8v+tEv97vJEadha5tCVdMgJ2i0cOVn8MdyNWDrLKII9kHkJlQAKOmpAyDT4ad1kiEYZlwnSSOojAhG11NDxSUSQxJZc9EaXln0il+CM+bScuxmrXTj8TGJ4SaCDVpsDpGyxi5FbXdt3IilpITW5R8ratfXGHNzyVn1Den/+6/bZbsDIWd2Z0ZkUtBb1perNiFTUfEa02NOx1x9HmJ3rusXCwKMO0063vPZ4c8daYgi7FkhHcs/A5cuF/vud90dMcChDRcrf/d7ah/7O7amwVaczuHo7KT4jDOpvucerFUKNx6TM05l2YCRjIvSCIMhZ9r6LQtHZVQwtrcZ2d42OzgcaEQYU++k7FlDIbzzG3jduYzyEU3pT5LGb0g8pLrXGFkO2o42aYT98cdaUu5RctjKycRJfllrDlsa8uGxdHjn/6R5kMoB2KMyKDNIG2aZu7/yvQPq/8mIRA3aqgyKIAh+zaRLnCOQubCOyFNP8vnYgiA496B3g5irriTpkb8Sc911itr1B4JWiyEtTVGb6eHpnJ51OrOSZvVNctVM28Ck/plnKD77HMwFBf52RcUDPL7XjTtDes1fKWUkjRbaqqCtGrRGyFng8uUN3Q10WDvQCBrqmqT/g/0X9PaWFto++4ymV15B0HuWEasNDSV42jSCZ87E3i5thjlEB/etvo9LPruE5p5m942PPeVAiYSRygHSCIudusTe3o6ju/uQ9/2ud6cyKsjtlfoqqG0n9fnnefPpU9mbKjgne2YIlioJir+H5lKv+hnwxI2DBQ/B7JvdbjZZNEqbkPmjUnN/UmMEtCF7+aXxe7/6MaIoXQ2WDuhu8WjzcqRS3VmNGRG9KJK8+3No8mFvoKpf4PmZsO09342p4hPUoK2KU/j8oSuKaNoKMUXZ0I+f7tuxe5EnVkoHbYOPOYbIc89FGxqiqF1fYm9p8ZrtuSlzeXTeo8yJP4seqwODTsOYUab/NVzo/nUb5j176Fijdt0dzuTEh4JgZW9jPha7G0HX1GOkDCRzq5SRNFqISIE/FMMVK9zS8u2TgglNpaheauh4QFWBTk/iXx4m5obr0YZ5Xm0w5rVXSX/1FUxjxwKgETRsrNnI9obtnmnXB0X2NywayRIJogPmPyBl2aYc5dQlzW+9zd6jj6H2sb/3vddmaaOhuwGAjPAMb3iqogLA2ETpvrG3tgPT2DxSE6Xyb6cybcOTIfNY6Xj7KA8ARKTA3Dtgzm1umxitmbaPbXyMRR8s4qOCj/wyfnBwC8FjXqFS+9bhT/z2EXjxWKjZ7hvHhjNlvXP+jLn+9SNAke+v6YIRreiAtc/4bvDVT0H9Hij82ndjqvgENWircli+KPmC0z48jYfWPeTbgTvroacFECAm27dj95IdJwVV5YmWioQoipRefInXMyxlaYSs2NGn/zVciL7iCpKfeIKIs3yvO62iHNlxoYRk/53m6EfZ3ZDvugGNBsadKh2PNokEQzCkubex2DexD8+gtEHqKp63X9NFbWgIUeefT/xt7gcK9megypnbjrqNJ49/kuwID5+zExfDmFkQk+OZnUDGEAIzr4cL/ut0dpGltBTsdnSxsX3vyf/v8UHxhBpGVwBHxbfI95P8Gmk+JWd2y1raQzL5Qul127tqya2HyGsJubJltFDYUkhlR6Xfxp+Z1tscU9tBaVPd4CdWbYHqX6UsUpXBEcX+n1H6HP/6EqD0VdLETpTe+OUNaK/1/sANBbDrE+l4zu3eH0/Fp6iREJXDohE0lLeXU9hS6NNxzZu/p2FnKB3taaAP8unYMv0lw52K2xZFkbavVlJx083Ymj0oS/UD1vJyrPv2YSkrQ5eUrKhti91CaWspNoeN/FppgnuwxqNK4BA6by4Rp5+GLirK366oeEBCuBHBFgfAxqrd7hmRm7Ps/Xx0NK5R4N8o65pG6VOxOURCjToSw00e2x0K0WrtK9k/NetUTko/iUhTpGdGp1wIV30Jk87z3MERRNIjfyXn+++IOOfsvvf6FnSRqjSCineRM/dr2npo7baS/u1ebv3YTltZoXMVdOPPAJ1J0rCs3updZwOVbe9JQevuFrdNNHaYae6yIgijL2gr3++yIv2j3x0XGo5giwRg/b7DyPfIAUg1aHt4Gougo1ZqQpt6tL+9CUjkyqWs5OlSJZrdDBtf8v7Aa54GRMg7BRImeH88FZ+iBm1VDssxicfwn5P/w9Ljl/p03K71q6nfHk7TXu8vYAdDLmEqrutQXB5CEAQaXniBjlWraPv8c0VtextDejq5P/1I2nP/VFziIb85nzOWn8EpH57S34RslJWSqaj4GkEQCNemALCjzs3s+cxjITYPxp4KVuU3ugKOjS/B87Ngy//cNiEvZvWOREB65uyfDdu5YQPWqipFnz91Ty1l74yZtHz4oWI2RwVl62Dzq9DZ6NJlgiCgT0xEFx3d915f0DZcDdqqeJdwk56kCGkeXVDbjn7lGubuEkkr6aCxx4nvsilcuqeDFLgcjfzwGHx4LRStctuEnGWbEhlEkEG5xr2BTqe1k7ouKbvVn1IwoRopwWRrzd7BT5JL/cvWjo6NZ3cp6w1qpx7jt6SqQOcAzfoT7oWFS7yf+dpaCb++LR3Pu9O7Y6n4BTVoq3JYok3RzEiaQVxwnE/HNeROIOKoBEJn+m8Xb0xMMFqNQLvZRl27WXH70ZddRsy11xI6d/hpAmkjIwmZPVtxuw3dDZi0JtLC0ijozbTNVTNtAxpbYyMty5fT9qUfOqSqKEZS8BgASpwtmz0YnRFu3ginP+mWvuuwY88KqNvVK+PjHvLE3twVAxy4QSVarVRccy2FJ87HWlHhkav7owkKQuzqovvXXwHotnXzfcX3vLtXoYBMZyNsf18ZW4HEhn/Dp7fBmqc8NjU5djJn55zN9CT/6PWrjC7kaqX82g6izzuPLxZEUZwoOKdrC/0SCdvfB7vNS14GKPX50FggZRXmuN8UuXCUNiGTZThiTDFEGCP85kd8kDS/KWo5zHc++UjQB0N3k9RUU2VgSnv1bFVphEEpbSsFeoO22SfCrJvA6OXf/XXPgcMq/b+4KdmlEtjo/O2AispAhCy+npDF1/vVB6NOy5joYEoaOimq6yBB4bLVyP3KJYcLoigOqIuoFMenHc/Pl/5Mm7mdWavXAZCbMLomucONjh9+pPreezFNnkz4ooX+dkfFTXKissivh9qecn+7Evh0NfUvXMad5p4JaxfVndUANDZHAu0HSMHYmpoxZGRgq61Fn5rqocP9RJxzNqHHH4cxL6/Pj1u+vQUBgbNyzsKoNbpv3NoNS48AaxckTIT48Qp57Wes3ZDfuyk14RynL2t++2169uwh4owzCJ42re/9+enzmZ8+X2kvVVQGZGxiGD/k15Nf284l551HcdT31Fet62uGNyQ586VKityFUlBAO4qWjns+lV4zj5Oyjt2krwnZKJNGkMvEZS1lf5EVkUVRHdR0HWZ+o9VLwa7i76Xne8JEn/k3rEibLskjZB3vb08CkpaeFpp6moABsstFERx25e+h3c1SJRDAXDXLdqSiZtqqDMn66vUs3byUNZWjr0O83IysqF5tRgaw78ab2Hfb7ZiLnczQcAONoKG9S0+31Y5BqyE9OthrY6l4Tsic2ZiOOIKQObMVlxFR8R1TEqQgXqejBpvDg2wqh10qJW+rUsizACT/KxDtED8Rot3T6ZMzMaJN0ZTWS783OfttUOkT4sn65GNy16xG0Cg3VdMnJGAaN67PZrQpmnBDOCKi882JBjUe1N9tfudyz2wFEgVfS5IfEWMg5SinL2tf+TUtb7/j1YadKipDIWfw59dKklNL5i1hw6UbOCXzFOcMaPVw+acw++bRVw4tN9Z0c3NOpi9oO8oybfv0bCP8o2crMyleapDZZh9iXtInkaDq2g7K9Gvh8k8gQ820HQidRsdDsx7ixik3Eqzfb/2690t4YS5sfkX5QU2RcOH/YNqV0iabyohEDdqqDMnqfat5ecfLrK70zUNM7OnCXrIF7FafjHc4suO914xMpmdvPnVPLUW0+v/fezisdXV0fP897V99haD3bqaFrGebFReCTqvepgIZfUICme+/R/xtt3k1C1vFuxydmono0INgZ1/7PvcNfXA1vLIIfn1LOecCjT0rpNfxp7ttQl7MZoRnUtIgPV8G0u8WdN691wqC0LeglhujecTE3kzUXcs9txUo7PxIep14Frhwj4v6zf8RffVVBE/vL1Vst7RT2FyINQDmNyqjg7GJsjyCNK8KxYRl23asNTX+dCvwaauCys2A0K/r6yZFozxo6+9M25lpUtWHXdtAa0/X4Cemz4WwJAhN8JFnKiONUEMo5+adyw1Tbzjwg5ZyqN0Ba55RPr4hCFKw9oylLs1RVIYXajREZUjkh63T+lceYt74DfmnXErx3Ek+Ge9wyF1e5V1ypRFtNsqvuorGF1+k46fA3tnVx8eT+eEHJNx7L4a0NMXtWx1WLvj0An73/e/YWV0PjL4JroqKv8iMDUO0SNrlW6rzPTDUm2kpZyiNNCxdUNjbkMaD7KtjEo/h8eMeZ1HaRVjtIsEGLckRvslis1ZWUvePJ6l99FFA4Wf82FMk/cf6PVA3AnQB95dGmOi8NAJA2AknkHDXXRiz+rPMNlRv4JxPzuHyLy9X0ksVlUGR51ENHRYaO8xU/f4uSi+6mLbPXGyC29MGv7wBxT94wcsARH6GpU2HMPeDeJ1mG1WtPcDom9MGStB2bGwy2E0IgsiGisPMb8bMhDt3w6mP+8654UTlZuio97cXw5Mj/w+CY6G1HHYo2AhWrXAcNahBW5UhkR+2sjaRt7Hs/QUATbCyGrLuIAdtvSWPIOh0RC5eTNhJJ6GLjfHKGEpiGj+e6Mt+4xXble2V7G7azU+VP1FSJ+1C5qlNyIYNosNBT74HwT4Vv6LXaggSEgHYWuPB/+PY0wBBmtyPRImEom/B1i2VyidOdttMfHA8izIWESlOAaQsW42mP0Oi5IILqbjpZqy1dR67fDD2jg4aX3qJ5nffQ7RYlA3amiKkxhswMiQSZGmEyDGQ7Lw0wmC0WdoI0YeQHp6ugHMqKkMTbNAxpldmKr+2A8MRE+gOM/DGjv/SZT1M1uHBrHsOPr4R1j7rJU8DjMZC6XWc+xUV0L9+iA01EBls8NSrYYPNYaOsvQzwf9BWo9FgJAmAzdV7Bz9RENRMxcPx7uXwRA6UrfW3JwHLmso1bKndcui91RAMM3uzb1c/BQ6H54PZLPCvObDqL2BWZRxHOmrQVmVI5NLJ6s5q1yZ4bhKebSBvcTXJvzna62MNhdw0oLq1hw6zd7rmxv/uTlKffYagye4HAEYC8qZARnhGnxzFQOXCKoGHaLVSeMKJlJx5FhYFO92r+JY4o5RBn99U6L6RsARIPUY6HonZtuHJMOVimHqJIou7gr6y2f2akDU00LNtGx3ffos2TPl7oDE3l8iLLiTxgQcQRVH5apqRJJHQsBcEDUw426X/7+7t2zEXFSEetDA7J/cc1l28jgdmPaCwoyoqg5OX0K9rG3fFldzxuwhemtrYp63tFJPOl16LvoUO5TeTAo5THoM790gZch4gV+plj7ImZPva92Fz2AjSBZEYkuhvd4gxSA099zYWDX2ywzEyN509obkMWitAo/Now3qks+TnJVz+5eXsaNhx6IfHXAOGMKjfDQVfeT7Y9nehbif88rqkPa4yolGDtipDEmmKJMoYBUBZW5n3B2zYi9YgYsib4v2xhiAiWE9sqNRNu3gUNyOre2opdU8+hbW62mtj9JVRhWf2BTJy1UzbYYGg16NPS0UIDsZS7JuMfBXlyZQ36A7XYdkZZK3XkRi0TTkKznkBTrjHbRN2h53Xdr7Gj/t+ZG9tKwC5+zUh04SHM+a110j661/RBCvfiFHQaEh66CEizzkbjdHYF7QtbS3FISqQ/SFLJDTkSzpuw5lj74Lf5cOsm126rHbJoxSfdjptnx36OyAIAkG6UdbQScWvyFVL+bXtCAYDd03/A8+e+CxpYS5IXcXmQMo0qQmjkuW9gUx4EgRHe2RitDchywjPQCP4P9yQFiZVN1R0lB7+xLrd8Hg2/Pt4tfR8f8p6m5EnHwnG0fVddhZRFEkPTyc5JHng7PKgSDjmKun4pyc9+3457LB6qXQ86ybQGd23pTIs8P9dVGVY4FNd24beTsuxed4fywmy40IA70kkyNhbW2n9dIVXx3AHR1cXzf/7H43//jfWykqvjSN/t2KMqXRZ7Oi1AukxygcsVLxDyhNPMHb9OkKPO87frqi4ycS4XABabZWInkwm5XLS0p+gu1kBz0YWlR2VPLHpCe78/s6+BX3e/kFbg4GQGdOJPHexT/xJCU1Bp9HRY++hplOB5kSmCLjoTfh9gSQrMNwJjXNJ01IURTRBQQgmE6YjjvCiYyoqzrF/0Bbg9KzTOT7teMIMLm6MT75Qet32jpLuBR7WHsVMjdqgbW9jy4yIDP860sv4mFwctjA6e7SHPzEqAywd0FELzmTljhZKe4O26XP860cAIwgCzy94nq/O+4q44LiBT5p5I2iNsG8D7Nvk/mB7PoPGAmm+dfSV7ttRGTaoQVsVp+gL2irRXfowiFYLVSuqadgViiMsMBZ72b0TraK6Tq+N4ejpoXDBSVTddRc9uwOreYug05G0ZAkR5y4maNo0r40jf7d0dqmMKis2FL1WvUUNF/SJiQiG0aPXNhI5JiUPURSw00OLucV9QzHZEDcOHLb+pl0jgfyVULXV4+wbh+hgYcZCjk05jpL6bgBy431fVWBrbKT1s8/Q9FhI781CUmxjNvckCIlVxpa/MLe7dZkgCIxZ9jJjN23EkJHR935dVx1nLj+TO7+/07NNERUVF+kP2nYgiiJtX35FyQUXUvePJ10zNHExCFqo2tKfYDHSsHbDP/Lgv2dDV5PH5grrPQ/ainY7nT9voHXFZ3T+vAHRbvfYL29zauapLD1+KReNvcjfrgBwVt5JdBb8iZbys3A4DnP/1Qf1SzyVBXaDaJ8i/ywy5vrXj+FOWCIseAgueRdS3ZSBFEVY3Xvvnn4dGNWq1NGAzt8OqAwPfJVpa92zidYiI4LGQExstlfHcpYcLzcjA9CYTITMmYOluBh7a5vXxnEHwWAgfNFCwhct9NoYoihS0iJ9t3q6o4EechJGV1aCioq/GZcYTWfxnYjWKLR4+Pu3cImUAaBA86aAQBThszslTbdL35eCkm6SEZHBE8c9QXF9Bx9+9QMmvYaUyP5y+ZYPP0KfnETQkUeiMXqv5K30gguxVlaS9tJLZEVmUdRaRElrCXNSFM6kEcXh19zF0gX/GA9Jk+HC190qkRZ0B06xi1uLKWktQRRFhOH281AZ1mTFhaDVCLR2W6lrNyN0tdKzbRuNlmbif3en84ZC4yBnPhSshG3vwol/8p7T/qL4e+hplYLSQVEembLYHJQ1Sr1A3A3atq1cSe2SR7HV9FdB6BITSbj3HsJPPtkj/7xJYkhiQGjZyqTHhKDTCHRb7dS09ZAceRiJmvQ5khxA6RqYdoXPfAxYWiuhuVTSd0+b4W9vAharw4pe44S27KwbPRuo5Aeo+gV0QTDjes9sqQwb1DQ2FaeQg7ZysyhvoQmNJPbMI4k6YSyCPjCy9uRMW7nEyVskL3mEzI+XEzJz9D0QG3saabe2oxE01DeFA2oTsuFI29dfU3bFlTS+vMzfrqi4QbhJT5wxFUQdRZ7e73LmS1kEmhEyzaj+VQrY6oMVyzQp2K9sVqORgngOs5nq+++n/IorsTd5nuV1OIJnzsA4bhyizUpGeAag8DM+/ytYdgr88JhyNn1FwUqwtEv/5x4GbmT6NB4DpFxYZfRg0mv75Kbya9vpOXIsT5+p4U/z67A7XMzanHwhIECb9+Sy/MqeXpmycad5vNlU1tiJ3SESatSRGG5y+fq2lSupvO32AwK2ALbaWipvu522lSs98m80oddq+n4HCuuGqKLI6N24LFuj6tpCv55t0hQwhfvXlwDm/jX3c9w7x/Fp0afOX2SzuD6QrGV71GXDv6JJxWlGyGpKxdvIQduy1jLXJ3guoMucQNzf3yThuY+9NoaryJq2pY2d2OwKNGkZBE1wcMBl37R+8gktH36Eo9N70hDQv5hNCU2huF56gOWpTciGHfaGBrrWr6fjhx/87YqKm8jZQEX13v2dH3bITdVy5kvlkx7Q0N2AQ3RQ0KsvmbefNIKjvZ2wE0/ANGECukTvZiklPfwwWcs/IuyEE7xTTdPdAuVrYccHw2/hu2u59DrhbJcDN2W/uYzyq67CXHSgHmJfs82BGpSoqHiZsb1zqr017aSkH8GmyUFUhdup6qhyzdC40+COnXD2817w0s847LD3C+l43Gkem5OTPbLjQlye34t2O7VLHh343tn7Xu2SRwNSKqHV3MrL21/m+4rv/e3KARhivyEk51E+KBhCkzl1Omj00sZEsw96uQQ6WcfDOS/CnNv87UlAU9xSTFNPEyH6kKFPFkX4/m/w5HjXtZNP+wdMuxJmu9YgVWV4owZtVZwiOSQZg8aAxWGhqtPFCd4wJzkiiCC9FqtdpKK52+vjiQ4HPXv2eH0cZ/yof+ZZqu+9l/ZV3tWl7FvMhmdS2BvIUDNthx+hxx1Hwv33kfjnh/ztioqbxEY3Ykp6j3eK/+m5sdqd8PHNsPJ+z235m77sq9M9NnXOx+cw440ZbK2RNCH3l4LRxcaS+uyzZH74gdc38QRtf0OWrMgsEkMSiQsapHmGO4xdBFoDNORLHbmHC5YuKUsYYOLZLl3q6Oqia/NmOteuQxN64Mbj/s85FRVfk9sbtC2o7UCr0ZIeLulYu5xdrw+CiBSl3QsMytdDVyOYIhVpuNQXtHVjPtu1afMhGbYHIIrYamro2rTZXfe8Rn5zPku3LOWxDYFVZRERLKDRt1LcNsR33hAMKb3STnIDrtFMaDxMuQgmnuNvTwIWh+igtK0UcHJjVhCgcjN0NcDaZ1wbLCYbzlg6Mhq9qjiNGrRVcQqtRkt6hMKNSgbAsm45jn07pd3uAEGjEcjqzbb1tkSCva2NogUnUXLuedgaGrw61lCIViuR552HaeJEwk5yX7/RGeRFQ5wpjU6LHZ1GICPWiZ1KlYBCn5xM9KWXYsxUgxLDlfgIEX3kZkq6fvbcWGcD/PI/2PpGQN3TXaaxCOp2SQ148jzT9m7qaaLF3ILZbmZfvaRX648mZPsjiiLjg7P4+ryv+ftxf1fOsCkCsudLx3Lm6nCgYCVYu6QFkYuazILRSOb775G0ZAn6hPgDPlMzbVX8SV+mbe/G+HhtKnN2Omj/+BP3jbbXulfeG6jIFRVjTwGt521fPGlCZquvV/Q8XxKsD+bUzFM5Pu14f7tyACemnE5n6Q0EtTux+Tr1Eph7ByRP9bpfKsOfuq46um3d6AQdqWGpzl009w7pdeub0FY99PkO71X7qgQ+atBWxWnmpsxlUcYiwg3e07MpveGP7F1wHj1rVnhtDHfI9kEzMgBteDjauFg0wcGY8/O9OtZQaIxGYq//LZkfvI8myLNy4KGQF7NGUSoHzowNQa9Vb08qKr7mmJSJmOsXYGw703Nj6bOljKWuRimDabgiL+Qz5nqsbyrf65JCkijpk4LpX9CLNptH9l2lZflyCo87nrrHn/DOAHJmzs6Pho9EggfSCIJWi2nCBCIXH5iR1GntpLarFlCDtir+Qb7PFNS2I4oiExuDuO0TBwnvuCln9NH18OQ4KPJuJZbPEMUD9WwVQE70kBsau4IuzrmqB2fP8yUTYyby2LGPcff0u/3tygFMTxuLozud0nonnkXTroAFD0HiJG+7FdiUrYW1z0LtLn97EtDIyUdp4WnONSMDaY6cNhPsFljvhNzMp7fAu5dBvX/jAyr+QY2KqDjNndPu5PHjHmdq/FSv2LfXV4Ao7SIZJincvdpD+oK2Xs60BUh5/HFyf/qRkNmzvT5WoCAHMizd0uRT1bMdvjgsFjp++IGGF//tb1dU3GByciKWhgXUVo/F6qmGt1YvZSxBf+BzOFL8vfSqgDSCfK9LDB6D2ebAqNOQGiU1RxFFkYLjjqf4jDOwVvlGhkgTEoKtro6uLVv63nOICmZzDDeJBA+kEQ5HaWspANGmaCKMEYrZVVFxlozYEPRagU6LncqWbqKOmk5+MuweH+qeLmpQtDRn3zaEPuhwwWGH2bdAzkn9FQKemHOIfYke7mTaBh89TdI1H2zjSBDQJSYSfPQ0T9wcVchVk3XtZtp6rH72Zpiw/T1YeZ9UNaUyKPLcLisiy7UL590pvW5aBt3Ng5/XUgG/vg27PgZzm5teqgxn1KCtSsCg7a4k75xa8i7XoIkMrG6I/c15vB+0NYwZg8bkepdZJenavJmuLVsQfZQZdcuRt3DtpGtpaZWy2NyZ4KoEBo7OTiquv4H6p57CWlfnb3dUXCQx3ESIQYvNIVLWqEAzMjljac+K4ZNpeTCXvAOXfayInps8sQ/VJAPShqBWIy3KbTU12BsbMZeUoo31zTMwZNYsxrz6KhnvvM3ru17nhHdPYOmWpcoNMNwkEgRBavIx9VKXpREAGl9eRvt33+Ewmw94X87CcXlBp6KiEHqthqxYaW6VX9tORvIE7rtcx79PtIHGjeXg5Auk171fQE+rgp76Ca0Opl8L//e+pGnqIZUt3fRYHRi0GsZEu25P0GpJuPee3r8MHLhNuPeeA7TJA4V97fuwOXxbNeIM4SY90fE7Mcav4KeSnUNf0NMK+Suh6hfvOxeoyJq+Cmg8j2Tclj/KPRniJ4KlAzb8Z/Dz1j0HDhtkzIPUoz3wVGW4ogZtVVzCITqo7Kj0jvEGKd1fm5rnHfsekB3fr2nrq0AmgL3D+0Higah76inKLrmU5jfe9Ml4Z2Sfwa1H3UpJnfSzVTNthy+6qCjCFswn8vzzwapmMgw3BEEgPcGONnQP3xRv8Nxg9nzQBUFLGdTu8NyeP9Dqpe7JoZ6XocrBO8GaAEDu/k3IEhPJ+f47xrz8MhqDweOxnEEbGkrIzBlojEa0Gi0N3Q3K69ZPPh9yF0Lykcra9Qb6IEnL8OznXZZGsDU3U/f44+y74UbEg4K2qp6tSiCQlyjNrfJrO0gPT0dAoNXcSrP5MBleg5E0BWLHgq0Hdn+qsKfDH1nPNiM2GJ2bcl9hxx9P8NHT0IQfKEsnmEykPL2U8JNP9thPpem2dXPKh6cw/Y3ptJoDL5hviNqIIWY16yu3Dn3yT0/Cm+cfPpg2kumog4a90nH66Kn+dAe3n/GC0K9tu+FFsA+wbupshC2vScdyZq7KqEMN2qo4jcVuYeabM1n0wSKae9yY4A1Fb9CW2MAL2mbEhCAI0NZjo6HD+00XbA0NlF1xJUXzFxySseNtRJsNw5h0NKGhhJ20wHfjimKf/tf+gQyV4Ufqs8+S9JeH0aeM0A7TIxxT5FaC017ly4r3PTdmCIbsE6Xj4SyRoBDyxL6jIxo4cINKEAT0iYmEzJjuF98WjFnAW6e9xaNzH1XW8BHnwqXv9ktljFDEnh4izjmH0OOOQ3tQkMWlrtIqKl5ibO/cKr+mnSBdEMmhySCKlBRvGeLKARCE/mzb4S6R0LoPNv7HuWZATiLLqXlSOda5cSNdGzeBVkvaK8uI+92daBPiibvzzoAM2AKUtZUBEKIPCUgpmHhTGgAFzUVDn5wxV3otW+1FjwKYst4s24QjIDjav74EOH1B23A3nvETz5GkWa78QkoSOJgNL0rNUZOmQtYJnjmqMmxRg7YqTmPQGogyRqHX6KnqVF5vr+6dH6neEEFPZ+A95E16LWm9uoO+kEjQRkVhKS3F3tpK9y++LcsRdDqSlzxC7to16BMSvD7etvptrKtax566GjrMNnQagYyYEK+Pq6KiMjA5UVIJd213uTIGx58OMTkQHKOMPV9hbodnj4Yv/gg2zzfPemw9VHVIz87aRimoFwhSMPaODhpfXob1z08wMWYioQb/++QXSn6UGq60VLh1uT4pieRHl5D24guHmlYzbVUCgNzeTaL8unYAJtkT+c/TdoyX/M49XdtJ50uvJT9Bq5eq8HzBro/hs9/Bh9cqZtKTJmQyhrQ0Yq69luj/u5TQWbOIvfZacr/7jpjLfqOUm4oT6Pe6jN6gWlWXE/ObtBkgaKC5dHh/v91FlUZwinZLO/Xd9YCb33utDk7+K8TmHvqZuR1+flE6nnuHyxVAKiMHNWir4hKvn/o6Gy/dyMSYiYrbbtvRQEtxCHZ94HVChf7FdaEPmpEJWi3Jf/sbOau+IWTmTK+PNxC+Ks99bedrXPf1dby1+0NAapZh0Km3ppGAtboaW7MXsvJVvMrkeGni2OmoVqYp1eSL4OZNkl7gcKLga2gsgMKvpWZaHlLWVoaISLghnJJa6b3c/YK29c88S/M772Jv822TCUGjoW7pUlo//gRrhXsBS6doLpOabQSqtvGmZVLDlY0vKWrW5rD1ZZ8FaiBDZXQwtjdoW1Dbgd0hEjtmLDo7YLO597sflQ5jZgMi7FCgMsNfyFUgsga7AshrhWwPNuYMY8YQ/7s7ibvxxr73BHf0h32ILAEUqPe6Sb3zmzabE0FYU7iU3Qj9WaejCfnfnKEGbQ+H3Gg0PihemU3v/ZMEtvwPelqkxIfxZ3huW2XYovO3AyrDi7hg7wVU46+6APPe3Rinnei1MTwhOy6Eb/f4JtMWIGTmDJ+Msz+WffsQDAb08fE+GzM+OJ6M8Awc5l6NxwDIPFPxnOr776flvfeJ/+PdxFxxhb/dUXGBo1OyEbdrQWOlsr2StPA0zwwG+CJzUPZfyCuQ3SBnIKWEZFBpFTHo+hvUOLq6aPjXv0AUCZvv22egJjiYmCsuRxsTw0/Nm/i5+nUWZSziqATXG3ENiqUT/nkM2M1S9lKC8hu/HmHpgvyvpGM3Gs6Jooi9pQVdVNSAnz0y9xFKW0tJCkny1FMVFbdJiw7GqNNgtjmoaOoiMyqbP12uJWfCHP6ZkeGe0Tm3wZSLYMKZivrqMzoboHyddKxQ0FYUxT5NW29UU4iiSNeGjSA6/JbYMRjycy5Qmy5OTx0Hu8CmrafLaiZYbzz8BRlzoGoLlK7ulwMZDZjboUkKwPsj01a02+natBlbfT26uDiCj54WkE33AEraFMoub6uWNo5rd8Epj0FnHcRkw0l/gcg00ATmv1/FN6hBW5WAIfy3D/nbhcOS3VviVFSvQEd1FxFFEcEHJRH1Tz9D22efkfCne4m+9FKvjwdw9/S7uZu7ufv9bUBFX/meyvDGkJkFGg226hp/u6LiIllxEYjWWARjLb/U5HsetJWx9kD1rzDG9xtSLmOzQMFK6Xjc6YqYlBez4dpkALJiQ/oa1IgWC9FXXYm1qgpdbKwi47lC/O9+B8C3q//EJ0WfEBsUq2zQ1hACOQtg72ew86PAC9oWrJQ04yLT+zOrXMBWVUXh/AUYMjPJWvHpAYtLvVbPKZkjW89XZXig1QjkJoSyo7KNvbXt5MTmoMkcQ2yEB5sJYxcp56A/2PsFiA5InAyRYxQx2dhpoaXLiiD0rx1cpXPdOrSRkRjHjTtk/t/y7nvUPPggxgnjyfzgA5+sD5wl0OURJiWMQXQYEDQWNlYUclzWEM+i9LmSbE7pKNO1NYbB3b0NZEN8OydpW7mS2iWPYqvpXz/oEhNJuPeegNRyLm6RgtsZERmeGTIES8kCtm74735ZteHJsOgxz2yrDHuGafqLir+o7qjm7h/v5pZvb/G3Kz5H3i0v8oE8goytoYHqBx+i7OJLEL1cUiqKIvbGBnA4CJo0yatjDURBr8aammk7Mog871zy1q8j4Z4/+tsVFRcx6DSYkBbxv1TnK2O0qwkez4FXFkmZTYFO6Y9gboPQBEg5WhGT8mJWY++tKthvg0obGUnCXXeR+tRTiozlLvJCW/ZVUSaeLb3uXB54Egk7P5JeJ57tVlZ1T0EBIGUtB2o2kIoKQF58r65tTTtHJRzF54s/56HZD/nXKX+yZ4X0qmDpsSyNkBoVhEnv3v2g5i9/peScxbR//fUhn4WdfBLaqCiCJk9GtHi/ObKz2B32fikYdxoy+QCNRoNRTARgU9WeoS8YMxMQoKkI2kdZEoLeBKnKzH+cpW3lSipvu/2AgC2ArbaWyttup23lSp/64wz/N+H/eHHBi5yfd75nhop/kAK2B9NWDe9eBrs+8cy+yrBGzbRVcQm9Vs/nJZ8jIGC2mzFqhygrcRLzmk8Qu1swHDUfTXRgdpyXd8srW7rpstgINnj/10cwmWj9+GPEnh56tm8naPJk740lCIxZtgxLeTn6NIUy64bA6rCiE6SfY0GtNMnNUzNtRwQHd09XGV7EGdOoZCv5TU50WHaG4GiIzoSabVJm01GB20gF6JdGGHuqYvIOcgldd6fUkC3QNqgc3d2MK+ghuk30TtA2bxFojZJOcN2uwMm2tXT2Z1W7IY0AEHb88eRt+BlbQ+Mhn62tXIsDB0fEHEGkKdIDR1VUPCcvUW5GJs25RIeDxhdfpGvrVlIef9y9Z7elC375HxR/Dxe+MXwkccwdUPSddOwFPVt3m5A5ursxZGViq68nZNasQz7XRUWR88P3Pus94SxVnVWY7WYMGgPJocn+dmdQovWp1DjK2d1YOPTJQZFw3jJIOELaxFXxGqLdTu2SRwfe1BVFEARqlzxK2Pz5AbU5GhsUS2yKh9nIDjt8efcgH4qAAF/+UbpPqTIJo5Jh8lRVCRRiTDGEGcIQEft2U5Wg4ZnHKbn5UZr+ca9iNpUmKsRAdIg0QSr2kUSCNjSUhD/+kTGvvoLpiCN8MqZhzBiflVq9uftN5rw1h0fXP0W72YZWI5ARG+yTsVVUVAYnvTdLprJTuft8XyaTHBANVBwO2PO5dDxeGWkEgP+c/B9eP/V1mhqljcm8hP4FvbWqyr3u7QpSefsdxNz7HDP3SEFbxas7TOGSRAL0Z7YGAh5KI8how8MxZh2aXfb8r89zwzc3sL5mvQdOqqgog3zfya+RqpsEjYaWjz6i84cf6f51m3tGBQ18+wjs/Xx4NWyq3gqiHaIyIX6CYmb7grZubsxpgoJI++c/yVuzGm3YwIkMgRawhf4KjfSIdLQBHFhKDU0HoKLdyfnNEYshLk8RbfthQXcz/GsufHE32G0+G7Zr0+ZDMmwPQBSx1dTQtWmzz3zyGWVroa3qMCeI0FYpnacyKlGDtiouIQiCV8onBWs7Gr0DY55ykyZvkB0XAviuGRlA1EUXEjJzplc7xto7OvxSYlXcWky7tZ2WLilYkRETjFEXuBM9FdewlJay7/Y7KL/2On+7ouIiE2KzAWhxpsOys8iZTEXfShlOgYq1Uwowx42DjGMVMxthjGBSzGRK6hwA5PSWKYsOB8VnnMneaUdjcaeDu0IETz8GXWICgiDQZeuirqtO+UECUSKhdR/oTG5LIwxFdmQ2OZE5AduYR2V0IVczFTd0YLU7eGnbS7x6RBMFVxyLMSfbPaN6E0w8Szre9o5CnvqAjLlwVyGc/6qiv/tFCjUhE5wIzFqrquj4KTD0Vvv0bANUGkFmXEwOAI2WfX72JEApWwe126FwFWh9V5Rtq69X9DxfUNNZw7O/PMvKUg9lGzpqlT1PZcShBm1VXEZ+GBe3Fitj0OEgeVoteYtrCF10ljI2vYQ/dG19QeOL/6bguONpef99n47bF/i3xAOQG69KI4wkhKAg2r/8ks7Vq7G3tPjbHRUXmJ4yFgC70E5LT4syRuMnSBlNdjMUrVLGpjcwhsFpT8BNP4NO2WymqtZuuix29FqB9BipqsBWXy9l2drt6JM8aAjkIdGXXUbOd9+xc37vxmybFyUSWiukrJFAYPYtUuBmzu1uXW4uKaHy93fR/M67A37+59l/5qOzPiIvKs8DJ1VUlCElMogQgxarXaS0oROtRsvHU8ysnhXh2f1n8oXS666PpaaTw4WgKEieqqhJTzJtHZ2d2DucW2N0/fILhQtOouoPf8BhNrs8ltIEehMymWnJ0vzGLFTjcDiGvkAUYdMyeO9K6PDCZmagIWfLZ8zx6bC6uDhFz/MFOxt38u9t/+blHS97ZshZ6Q1VomPUogZtVVwmK1LKFlEs07atEqxdCFodQqybu/w+Qta1LfKRPIKMvbWVxldepebhh71iv3PdOuzNzWjCfKtDKn+HOjqiAMhNCCyNRxXP0CckkHDvvaS/8TqaUPX/djgxISkehzUCgF0NTui+OYMg9Gfb7l6hjM1hwqryVfxtw9/4ZK+kn5gVG4peK03B9AkJjN28iewvv0DQ+a/VgKDXH1BNI3dEVhRTOPzmI7irCCJSlbfvLsYwSXfZDbq3/ELbihW0ffqpwk6pqCiPIAh9TRDzazs4LfM03j/jff4858+eGR4zG8JTpQaO+V8q4KmXcSZY5wYdZhvVrVLQOifO9USE1k8+IX/WbGof+/uQ5wZNmoQuMQHj2LHYGw/V0/Y1wyVoOyMtD1EUQNPD3obDlaT3Igiw4T+w88PhJf/hLqW9mdvpc306bPDR09AlJg6e9S4I6BITCT56mk/9OhzxQfGcl3ce88fM98xQ+mwITwYGy/gXIDxFOk9lVKIGbVVcRs60LW0tVcZgQ2938uhs0OqVsekl+oO2vs20tbe3U/fYYzS/9TbW6mrF7We8/RZpL75A6AnHK257MJp7mmkxtwBQ3SAFi3PVJmQjjujLfkPwUUf5NRil4joRQXp0dmlHf2PlXuUMy7q2+V+B3aqcXaVorYTSNVJTCAVZU7mGN3a/wcYaSYst56ANKkGrRZ8SGE04MyMy0dq91IwMpOwdY4Bs4nR4XmYZNHkScbfdSsTixYd81mPrUV4bWEXFQ8b2zrX21raTEJLA2OixGLqsdKxeg62hwT2jGg1M7u2evm3grPOA4su74T8LoPAbRc3KlXixoUYigl1f03Rv3wFWK7qYoTeRBJ2OrOXLSX/1FfTJ/m/8VdpWChDwUjBhxiCM5slYW46iqL7VuYsyegOYpSM8aNvTKjWMBZ9n2gpaLQn33jOwdFJvIDfh3nsCqgnZpLhJPDjrQa6b7KEMnEYLix7r/cvBgdvevy/6m9qEbBSjBm1VXEbeQS1tK8Uher5T3fLRx5R9G0NzWZTHtryNXOpU3NCJ3eG7hZghNZWoSy8l8cEHvJINK+h0hB53nE8bG8gBgeSQZIpqpbKuQOumrqIymonSpeGwhlPTpmBlQeoxcNLDcPVK0ARgIP/Xt+DVU+GDqxU1e3za8Vwx8QqEnlwgcO91XZs2ccK9H3PPuw7vyCMcjMLBcZewdMLTk+GFudDpfpaaMTeX2BtuIHLxOYd89vSWp5nx5gxe2/maJ56qqChK7kHNyAAqbriRimuu8UwfVZZIKFgJXU2euOhdHA6p2mPfRsUzbvulEULcuj55ySNkff4ZEWef7dT52nDfVsgdjs8Xf87bp79NdmRgV00CTDXeRk/1BTS3O/kslgOYIz3Ttnw9iA5Jyirc9xsB4SefTOiJJx7yvi4hgZSnlxJ+8sk+98lnTDgTLvgvhB8kUxOeLL0/4Uz/+KUSEATgikkl0EkJS0Gn0dFt66ams4bkUM9u6t279tJVZyTI7N4Ex5ckRwZh1Gkw2xzsa+4iPcZ3Pifef5/iNkVRRPBTN9T+oO0Y9vbY0AiQFRf43wEV1+nevp3O1auJOOusgMgGUXGOOdFX8sbPJxGZquACTKOFObcpZ09p9vTKNmQdr6jZY1OP5djUYzn75zVAywH63ZV/+AO62Dhirr4KXUyMouO6ijYqCmNZLWN18HKjF+QRZHZ9Aj/8HcYughOVf7Y5RcFKsHaBud1taYShKGkrodvWTYhefbapBA5jE3vlEeqkoO2nRZ9ii+tgbEqCZxsp8eMh+SgIS4SeFq/9XnlM1S/QXgWGUMg6TlHThQo0ITNmuZ6p6jCb6f5lKyEzZ7g9rqeE6EOYGDPRb+O7QnZ8KKv21DnfoyS9N2hbt0va5Avx77Paa8jSCBm+lUbYHzmTNvamm+jevh1LcTGp//oXptwcv/k0EKIokt+cT3p4OiadSRmjE86UZMTK1kpNx0ITJEkENcN21KNm2qq4jF6jZ0zYGEAZXdvoG35P0g1nErb4Uo9teRutRiAzVlp8+VoiwRu0vP02JedfQNuXX/l8bLmRXZhWKgnOiAnBqFMfSiOR2sceo/7pZ+j48Sd/u6LiArnxUgbPSGu8OCit+6TFPAKMPVVx86Io9mVh5fVmutnb22n75FOali0LCAkRQ1YWUU/+jRtv0lJtqafT6iX9dptZ6k69c/nApZC+YOdH0uuEs93uHG+tq6N72zYcFsuAn8syUoGu8agyusjrlUcobeikx2rn2/JveXBKIZufvZrIc8/1zPg138DFb0F0AJfIy5tzuSeBzqio6b5M2zjfVVPYmpspPP4Eyq++GmtNjc/GHc5I/z92dteXO3dBSCzEjZOOR3K2rTEMItL8GrRNffYZsr/5hphrrkbs6sK6bx/dW3/xmz+D0dDdwHmfnsesN2dhVVLuS6OFzHkw6TzpVQ3YqqAGbVXcRNYrUiJoa5x+EpG3PUbQ8R5OFH1Edu/ueVGdb5uRAYhWK+3ffkfzW28pYq/1k0/p2b4da43yOrlDIX93NFZJN1NtQjZyCT/pJMJOOgl9agA1HlIZkr57nTc2qHavkDoxV21V3ra77P1Cek2bAaHxiplt6G5gc+1m9tTV0GG2odMI/VUagobEh/9MzHXXoY2IUGxMdxEEgcRTz8IQHQsoqF1/MGMXgdYIjQVS5pKvsXRC/krpeOKhsgbO0v7VSkovuJDK224/5LNuWzdVHVKTGzVoqxJIxIcZiQjS4xChuL6TzIhMRI2gjI71cAgwyEHbcacrbrqoTx7BtR4NDrOZ4jPPovbRv+Ho6nLpWl1UFMacHHQJ8Vj37XPpWqV4L/89/rr+r2ys2eiX8V1FG1RJ6Lj72c0S5y9KHwUSCcf9Ae7YAZMu8KsbhtQUNEFBxFx7DSnPPE3YggV+9Wcg+ipGQ5PRB3hPHpXhj//TOlSGJfICxGuNSgIYefe80A/ZZ92//sq+G29EExJCxFlnoQkO9she6j+fpW3FCsJPV37iOhTyd6erUyoxynVxgqsyfIi+/HKiL7/c326ouEh2XCimlP9RG1TBnsZXGBeTp5zx7e/BruUQlQHJU5Wz6wm7P5Vex52mqNmf9v3EA2sfYGzENOB8MmJDMOikPXNtaAhRF/h3cTQQCzMW0mPvUa7k72CMYVKW254VUsZrgo9LavO/Alu39P1LmuK2GdFiQRsVhWniof6XtZUhIhJpjCTaFKBl4iqjEkEQyEsIZWNpM/m17YfM6RWRzmoqgfYaSJ/lqbvKUp8vNUDW6KV7kIJYbA7KmqSAq6vyCJ3r1mHOz8fe1kb8H+92eezkfzyBLjrabxUb31d8z4/7fiQ3MpdjEo/xiw+uMC05C0Fw4BC6aerqJDrYCQmbjLmw+RXobva+g/5G45+8voPvPaHHKStfoiRyxai6KaviC9RMWxW36JvgedioxLp3E+3/uhvL+k+VcMsneDX7bAiCpk0jaOpUIs8/H4fZ7LE9XUwM0Zdf7nMdRbPdTGVHJQB1jVIJtpppq6ISWCRFmNAZ2hD0bWzYt0dZ4+PPkF73fKasXXfpbu7XclM4aCs/J01iItAvjRCoiA4HN+wby3XLu8nQJXhvoAlnS6/+kEjYtbzfBw+CUzFXX0Xu2jXEXHftIZ/JATB1QacSiMgSCfm17X3Vc2lf76To1NNoevllz4zv/QKemQqf3uo/+ZPBkLNsM48Fk7LVDaWNUpPiUKOOhHDXZBdCZswg5dlniL/jdrcC5vr4eL9K7CzOXcw1k67hyIQj/eaDK2REx6OvfICO/IeobLI5d9HYU+DuMlj8b+865y866hRvzOcK1upqCo89jpqHH0YMtPvGAMjPePn+qaLiTdSgrYpbTIqdxJUTr+SCPM8yhDq/fI99T39C9YMPKuSZ98mO85+mrSAIZLz9Fgl/vBtdVJTPx1cKOQMpzBBGcZ00OVUzbUc+9o4OLKWl/nZDxUkEQSDJfgGdJTcSKUxS1njuSVKmU8NeaChQ1rY7FK4C0Q7xEyBG2c7X8sTe2iNJDuxfNtu1aRPWmpqAWqAIGg0NL75I2yef0rVpk/cG2l8ioXan98Y5GIWkEWQEQUBjMBzyvhq0VQlk+pqR1baTEZEBgKWnE0txMV2/bPXMePoc0JmkjNZqD20pTdJkGH+mpBepMHIFXnZ8qMuBV01QEOEnnUTEWWd55IMoinTv2OnzZ8r8MfO57ajbyItSsCLHy+REpwKC8+s5fRCYwr3qk195+xL4eyYUf++X4du//RZbfT09+fkH/P5Y9u2j5YMP6N62zS9+DYb6jFfxJWrQVsUtMiIyuPPoO1mUucgjO0J3PaYoC6bMRIU88z5ZsaEIAjR3WWnqHLj5SKDT8dNqKn9/F10b/aM9JT/oUkMzaOu2oREgK07trj2SaftqJfkzZlJ9/wP+dkXFBY6ImYqjZwwVjR50FB8IU4SU6QT9mU/+ZOJiuPZbWPiI4qbl+11rq7TRlttbrSHabJRffQ2Fx58QcJsZURdeSOR111Af7cXMLVkiAfozX32BRg/nvgTTr/NIGmEo+hZ04eqCTiXwkDfK82s7CNGHEB8cz4Y8Aetjd5H0l4c9M24K72/muO1dDz1VmJwFcOH/YOolipv2RxOy/REdDsou/T9KzzuP7l8Cr3FToOFR5aRD4TmRv7F0So1Ye1ok2SA/EHn++aS99BJxN954wPtNy16h+k/30fppAMwV90OuolKDtiq+QA3aqviViAwzmQsbSLjSs51lXxJk0JISGQT4R9dWxlxcTOfatW5d2/Lee7StWEH7N6sU9so5ZifPZtnCZSxKuhKA9JgQTPph0LxCxW1M48aC3Y6tuQnRPsImuyMYr1YWyDIEuwNgIq7RQMo0yD5RUbMWu4V97VJjmMp6KUgiS8HYm5sxjElDExaGYcwYRcf1FNtFp7Ew5jXO/fUWrA4FuyIfzOQL4YjzfNupWmeQ5DlOfdwjaYTGV1+l5MILafnwowE/7yudjFRLJ1UCD1mmpbypiy6LjayILOojBYomRKGLVkCDefKF0uv298HuZPn5MKcvaOuinm3L++/T/M672JqaPBpf0GgwZGUiGI2YCwo9suUKFW0V/Fz9Mw3dDT4bUwmCwsoxpfyPr2tecv6imu3wnwWwbKH3HPMHFT+DwwYRaRCZ7hcXNAYDofPmEjJ79gHvB8+aSdDR0zBmBU5wtMvaRU1nDQAZ4Rn+dUZlVKAGbVXcpqWnhY01Gz1rRtaQL73GDp9yGpAa9IB/JBIAOn74geJTT6P6gQcR3dAfirn2WqIuuZiIcxd7wbuhCTOEcUziMWjMOYDrE1yV4Yd+zBhyvl1F9ooVCFo1QD9cGBOjRx+5ng2trypfbikHbSs3QVu1srYDhIr2CuyinWBdCO2dwWg1ApmxUiBcFxdH1qefkrd2TcD9TsQFx2HSmQjSBtHQ5cWF+IQz4byXIet4743hJbo3b6bn123YW1oO+cwhOihtKwXUTFuVwCQm1EhsqCTrUVDboVivij5y5kNQNHTWQckPytj0lB0fQGOR18y7E7QVRZGGF16k5sEHFZGjibvlVnK+/46oC33X4PLzks+5ZuU1LN281GdjKkFMuIg+fCe1VhfK7oNjYN9GqNwMPa3ec87XlK6RXtPneLSZ6Q3CTzqJjNdfJ+rii/3tSh/yfTLaFE2kKdK/zqiMCtSgrYrbPLf1Oa766iqWFy53z4DdCk1S58VhG7T1U6Zt8PTpaCMiMObm4mhrc/n6oElHkPjAA5jy/Ptzz+/9+QV6Yx4VzxEEAX1ysr/dUHGR3PgIjImf0GpY1ZdVoBhhiZA6HZKmQofCtl3hqz/B8pugZofipuVNzTiTpJ2XHhOMUXdggFbQ6xUf11M0goaVi79i1eSXiK7r9rc7ylH4DXz7iNRB3kPi776b5CeeIGz+odnZVR1VmO1m9Bo9yaHqfU8lMNm/GZkctK0v3kXzW2/R+qmHDYK1ejjiXOk4ECQSulvgw+vg2aOgSaHA9H44HCLFDW5k2lqtRJ53HkFHHUXoXM8rDvQJ8T7veVHcKq3lhluZ+IyUcQBYNXX0WJ2UuwtPhqhMEB1Qvt6L3vmYst6gbcYcvwxf/+w/afrvfz3ONvcVqp6tiq9Rg7YqbpMdmU1KaApBuiC3rjdv/YnC5dHsWxsnPQSHEfKErNBPmbaaoCByvv+OtH89jzYy0i8+uItDdLB081I+KviI/NpmQG1CpqISqGTFhSNaYgD4pcbzQNchXP4p/PYHSPZTx2m7Dba+CVtfh27lFwvyYjYY6RmXO4yqCizPL6P0vPNoevU17w4kilIjsrXPer/T/ObX4Me/w69vemzKkJpKxOmnYUg/tJRUXtClh6ej1QRWFrWKisxAQVvtjgJq/vwwTa+/7vkAskRC8Xf+l0go+Foq/44bB9HKB1oqW7rpsTowaDWkRTm/LhIMBmKv/y0Zb76BJjhYUZ+stXU4zGZFbQ7EcA1gTUnKRHToEDR2tlQVO3+hLOdTuto7jvkaSxfs683yTvd90NbR3U3jyy9Tu+RRbLW1g54n2u0BE9Qdrt95leGLGrRVcZuLxl3El+d+yfVTrnfresv2DVi7dFh7ggKuFGMovKrz6CSaINeD5ebCQur/+RyWffu84JFz1HTW8PKOl3l4/cMU1HUC/RqPKiMbURSpefgvFC5ciLW2zt/uqDiBUafFRBIAW6r2Kj+A3qS8TVeoWC8Fa4OiYMzsoc93EXliL1rigQM3qEovuZR9t9yC9TCLFH8SdNRRaEJCQOvlqaKlE/59Aqy8D2qVz3buw9whBW4AJpztvXHYT882QtWzVQlc+oO2HX3f1Z9jWgiaPYuwE07wfIDUo+GC/8GtW0HrxaaGzrCnN3NYluVRGFkaITM2BJ2375lOUPu3xyicP5+2z7/w6jj7S8EMt/udXqvFIEqNsDdVujC/kYO2cnbqcGffRnBYISwJov3wf+hwEH/H7YSdsgjjuHEDntLx44/kz5xF5e13+Ni5gVEbjar4Gj8/QVVGM8FnXkF6XBKizYtNTryE3HF0X3M3PVa7X5to2Ts6sTfUY8jIGPLclvfep+m11zDv3Uvqs89437kB0AgaLhl3CS3dnbyz04Eg9MtNqIxsBEGge9s2rGXldK1fR8RZw6cB4Wgm1phGFdvY2+Q9LUB62iR9uMg0740xEHITtLxTvBJUkCf2be1Suaq8QWVrbqZ7yxYAkh79m+LjKkHVEQk89+gMjMYOHvfmQMZQyD0J9qyAncshcZJ3xin4CmzdUmlr0hSPTHX88AP2tnaCp09HnxB/yOfn5J7D5LjJGLQGj8ZRUfEmsjRVfm07cUFxhOhDqInoxP7kn4iNzPZ8AEGQdKv9jbUHCr6Rjsed7pUh3NGztTU0YCktJejIIxXXNddGR4PNRvevW4k852xFbe9PXVcd3bZudBodKWEpXhvHW0TqUqgX97Gr0YXGbXI2atVWMLeDcZhXC0akwrF3gdbolyQqTUgI0ZdfTvTllw96jj4lBUd7O+aiIkSHA0Hj340RNdNWxde4vELp7u5GFEWCe0s4ysrK+Oijj5gwYQInn3yy4g6qDA9EUURw8UavjU4k+LTBb9CBTEyIgchgPS1dVkoaOhmfFO4XP9q//Y7K3/0O08QJZDhRyhZ8zNGYC/L91oAMIDEkkXtm3MPawgbe4WfGRAf7Neit4ltib7wREAmePsPfrqg4yZiwDKraobKzzDsD/PIGrLgdxp4KF3i5FH9/RBH2fCYdeyH7ShTFvol9dYP0jJAzbTUhIYx59RUsFRVoQ0MUH1sJBJ2O76p/JMwQ5tYz3iUmgYnofQABAABJREFUntMbtP0ITrzPOwvHnct7xzrbY/tNr/2XzrVrSXzoIaIuuvCQz8MMYUyNn+rRGCoq3ia3N9O2urWHdrON07OkgKZB44XNBlEEh90/GbfF34O1E8KSvSbFIwdts10I2rZ9/jm1Sx4ldP580p77p6L+RF1wPqHz5mIaP15RuwcjSwCNCRuDXhN4+uxDkRKSTn3Hz5S50oAvMg0ix0BLOZT/DLkLvOegL4jJlp67AYwhK4vMDz/AmJfn94CtQ3RQ2VEJQFbk8MouVxm+uPzkPOuss1i8eDHXX389LS0tzJgxA71eT0NDA08++SQ33HCDN/xUCVAeWvsQ35Z/y4OzHmR++nx/u+MzBEEgOy6UzWXNFNZ1+C1oa5owHtFsxt7UjKOzUyplPQxhCxYQtiAwJhcFvRNcVc92dBF2ogIllyo+ZWJsDuvbodla6Z0B4seB3SI1ibL2+E4yoWY7tJaDLgiyD20m5Sl1XXV02brQCFpa28LRCJDVK62jMRgImTmTkJkzFR9XKdLD0xEQaLe009BZR1xogvcGy1soZfk0FUkSCUpn25o7oGCldDzxHI/NmSZPwtHZSdBUzzJ2VVT8SUSQnqQIE9WtPRTUtnPfzP7AjWizYa2pxZCqQPbkr2/DT0/C0VfBTPck1TxiT29FxbjTvJZJKPe4cCXT1mE2owkPJ2TGdMX90UZG+qTnxXDPOBwbk83WDmgwuygbN/5MKWg73LNs/Yy5qAhrdQ0h049BMAy+WSQIAqYJE3zo2eBoBA1rLlpDRUcFSSFJ/nZHZZTg8lbFli1bmDdvHgDvv/8+CQkJlJWV8d///pdnnvFPubWK/zDbzTSbmylxZYcSEB0Omv50MR0v34/Y1eYl77xLIOja6hMTyfrkY7I+WzFkwDZQKG4ppt3STkFdO6Dq2aqoBDpHp44FwCa00G5pV36A5KOkDChLB5T8oLz9wZAX8jnzwaBsAxiQgrYh+hDiTMmAjvSYkGFVVWDUGjnCmsBDr9uoO+N8RG82CTOGSRIJ0J8RqyQFX4GtR5JGSJzssbn4228n4523MQ2gv9dmaeOxDY/xfv773v2ZqagoQO5+urYy3Tt3sveY6ZRd9htlBulphYa9sO0dZey5gij2P1fGe0caQRTF/kzbOOfn4rHXXkvemtVEnn++V/yScVgs2Ds6vWJ7uAdtj0qS5jc9VOFwOJy/cOEjcOH/YMwwrxprLII9n0N3s1+Gb377HSquuYaaR5b4ZXx30Wv1ZEVkoRH8r1+tMjpw+ZvW1dVFWJj0gF+5ciWLFy9Go9Ewc+ZMysq8VDqpErDID2n5oe0strLd1H6wlYon3kNkeDUhk5F1WIvqvTMRchZjTs6QZau2piZaP12Bo6fHR14NzlVfXcXst2bza91OoF9TTWX0YK2qovmtt2j/7jt/u6LiBJOSEnFYpef+rnoXdN+cRRD65QnkQKovCImD2DyvNaaZFDeJdRev47zEx4ADM7BaP/mEzp83+KSztyfEpmSTUwWa6nosJaXeHUzOgN35kRRoUZKuJjBGSGN4WbOvqKWI13e/zovbXvSupISKigKM7Z2D7a2RNuS6rF1URYqIZjOO9g7sra2eDzJxMQhaqNoCDQWe23MFQYAbf4aL3urXIlWYhg4Lrd1Wt3o0CHq9W42FnaXlo+UUnnAijf95ySv2h3vQdlbaWERRAG03Jc2jsEHujg/g7YthhX8afGnDwtDGxhJ63HFDnmvv6KD+mWeouPEmdUNUZdThsjxCTk4Oy5cv55xzzuGrr77ijjukX/K6ujrCw/1TIq7iP+SHdHFLsUvXiTV7CEvrRtSEoAkenqUl8gJc3l33N6Io4ujsRBt66ISx9ZNPqPvbYwTPnEn6q6/4wbteP8ytNPY0ArCvTspGUOURRh9tX35F3d//Tsix85TpUK3iVSKC9OjsCTj07Wys3MuMFC9oAo4/HTa+JGV8nL4UND7ISJ1+rfTHlewaFxEEgQrplkdu7zNDtFio+tN9YLWS/c03ypQfe4kxsTksPfsnps04m3GZGd4dTJZI6GqA9moIT1bO9vRr4ajLpUZkHmJva0MTFjZoQDbSGMllEy7DqDV6PJaKirfpz7Rtp7S1lDOWn0GwLpgfv/gcQ1qaMvqRoXFSRUPBStj2Lpz4J89tuoIhGMad6jXz8jogNSrI6WoKe0enT/TMNUFB2Bsb6fjxR+Juu03xjSQ5aJsVMTy1PaOCQ9HYoxB1Tayr2E12TKLzF4siNBWDzgQRgfscPyylq6VXL21oDEXcrbcQe/NNTm3UagwGGt58E4cpiPbCQgxpPm5c28t7+e+xr30fJ6efzMTYiX7xQWX4oNfr0SrQaNLloO0DDzzAJZdcwh133MH8+fOZNWsWIGXdHnmkd8TdVQIX+SFd0lbiUqMSg7aJ1DnNkBe4en5DIe+mF9d34HCIaDT+y6jp+Oknav7yV4KmTCHl8b8f8rkmOBh9Sgrhixb6wbt+SttKAYgNiqekQ+NWVoLK8Cdk7hyCvz2akFmz/e2KipNE6FNpppBdDV7ItAVpwWCKkAJ2FT9Dug+/G15uatGn392b0Wbv6CD0uGOxVuxDn6JgYNILZEZk8lqeBmNQo/ezRo1hcPVXED8RdF5ohKQzKGK34trrsJSXk/LUkwNqEmdGZHLXMXd5PI6Kii8Yu588QkpoClpBi0lnoishHKOS98bJF/YGbd+BE+71S5d6b9GnZ+vkfNbR1UXBvHmYxo8n7V/Po42I8JpvYQvmk/LM04SdcILi9/B2Szv13fUAZIRnKGrbl4Rpk2mjiW21BYALiQSf3yVtNs/7Pcy/32v+eQ2bBSo2SMcZc/3mhjMbQ6IoUtPYiP2ppQh6HfssFoQS16p8lSLDlkGyIRmhWaCk3T8+qAwvIiMjSUxM9Oge7HLQ9rzzzmPu3LlUV1czZUp/A4b58+dzzjmuN3d47rnnePzxx6mpqWHKlCk8++yzTJ8+uCB7S0sLf/rTn/jwww9pamoiPT2dpUuXcuqp3ttBVRmctLA0tIKWTmsn9d31xAfHO3ehXB4Vm+s957xMalQQBq0Gs81BZUs3adHKayI6izYyEmt5OY7OTkSL5RAx96gLLiDyvPPAbveThxLyjnysIZUSIC0qmCDD8NF4VFEGU14e6a//z99uqLhASkg6zT1Q2uZaVYXTaPWQt0ha0O/5zPtB27J1UhdxLzY9u+qrqwjTh5HfMBsI7asq0EVHk/ZPZTuFewt3JZDcxhud3ZtLITJdkSCRaLdjLirC0dGBPtGFjCwVlQBFrhpr6DDT3iPy00U/EWbwQgXU2FPBEAotZVKgyBdaoDU74IOrYdJ5cKz3NlKK6lxrQta9dStidze2+no0Xq5SFXQ6wk8+2Su25edCfFA8oYbhm4CRYBpDm3kHRS5WjZLUq49etkZ5p3xB1Rap+iQ4BuIO1Wf3NtbaWvQJzjU4rampoaWlhaS8XIKDg/0qPRRvicdsNxNqCMWg9cIGs8qIQRRFurq6qKuTpFeSktxvXOdy0La1tRWDwXBIVm1OTg46nWvm3nnnHe68805eeOEFZsyYwdKlS1m4cCF79+4lPv7Q4J/FYuGkk04iPj6e999/n5SUFMrKyoj0QXdMlYExaA2khqVS1lZGSWuJ00FbsW6vpGQbm+dV/7yJTqshMzaEvbXtFNV3+DVoazriCFKefprQeXMH7b4paDRezygbiuJWaUIUhHTTUvVsVVSGB2Ojs9hR5UaHZVeYdgWkTPOaxmwfHfXwyilgCIE7dkBQlPJDWDrYWLMRgPbOecO2qkCuponbto99ux8m7qKLMeb6YLNVFMFm9jyobu6A52ZAWCJc/TWEOrmxPAiCVkvu2jWY9+xBn54+4Dk7G3aSEppCpCnSo7FUVHxBiFFHWnQQFU3d5Ne2MzMrBpCyQev/+Rw9u3Yx5j8vIbi4xjsEQzCMOx22vQ3b3/VN0HbPCqjfA5VbvDpMoYtB25DZs8n57lusVVU+Dz45enrQmJTZrGwxtxBuCB+2erYy0+Jns2NLD8RMcO1CWVKgcjNYu0HvPW1ir7C/NIKPv4eWsjKKFi7CNHkyGW++cdj7i91up6Wlhfj4eGJiYnzo5cCYFPr9URkdBPVqltfV1REfH++2VILLEZyLLrqIt99++5D33333XS666CKXbD355JNce+21XHnllUyYMIEXXniB4OBgli1bNuD5y5Yto6mpieXLlzNnzhwyMjI47rjjDsj4PRiz2UxbW9sBf1SUJTO8V9e21fkdyqLn8ilcEY+5a/gtYvcnO17So/K3rq0gCIQvPBlN8IGBY4fFQvf27QEj2C7vytvN0sI5R9WzHdWIVis9u3f72w0VJzgqcRz2rnToyfLe/SR9Nsz4LUSkese+TP4XgAgxOV4J2IK0ofniSS9yafad4Ag6oKpA9HPFgytEmiKJMkaxaItI+xtv0fHjj94f9Nd34Okp8OOhUj8uk/8l2HpA0EiN5xRAYzAQNHnygMEWi93CJZ9fwrx35lHfVa/IeCoq3mbsfrq2MoLJRMt779G1fj3m/HxlBjryUph0AUw4Sxl7QyE3tvTyRqCrQVsAfVISwdOmeculQ+j65RdKLriQ6nvvVczmsanHsvqi1Tw7/1nFbPqDBRlzsTScTF29i3OP6CwISwK7BfZt9I5z3kTOEPaDNEL3tu2g0aAJCR5yQ8hqtQIQ3LvGFW027G1tiDab1/1UUVEC+bsrf5fdweWg7c8//8wJAzSOOf744/n555+dtmOxWNi8eTMLFizod0ajYcGCBaxbt27Aaz755BNmzZrFTTfdREJCAkcccQRLlizBfpgF0KOPPkpERETfnzQ/iVaPZDIjXSufdDTXYW0Ha4cObdZUL3rmfeTMqaL6Tj97ciByUKXj2+8oPf8CKq651s8eSZS2lgLQ0hYJ9DfmURl92NvayJ85i5Jzz1OmO7WKVzk6LZOushtoKjsTmyMwNoHcZre8kD/da0MYtAZmJ88mWSvNl+SqAlEUKTzhRIrPPAvLPi9mLStIZkQma8cLtJ02m6CpU70/oEYrlVDv/Mip5iSHZddy6XXiOT7JJCprK8MhOgjVhxIbFOv18VRUlGD/ZmTb67dz/TfX86c19xF3yy0kPfooOg9KOg8g81g49yXp1ds0l0HNdmnDJu8Urw3T3mOlpq0HgJy4wE1E0JhM9GzbRvv3P2DvUC7RRBAEgnTDLMP0ILJ71yL7mrvpsbqwqSoI/QHP0mEmkWC3Qnlv3MYPTcgizjid3NU/kXjffU5fI2+UWkpLsfRKAvqaHlsP7ZZ2rHb3g28qow8lKipcDtqazWZsA+xsWK1Wurud78rb0NCA3W4n4SAtk4SEBGpqaga8pri4mPfffx+73c7nn3/O/fffzz/+8Q/++te/DjrOPffcQ2tra9+fiooKp31UcQ4509bZoK0QEUvOF+8z5u+/R5c8vEtq+oO2/s20lenatInya66l/umnAaTSK6MR0wQXS368gNVupaJd+v2rrpcmtnkJgTvBVfEu2vBwdImJaMPDsfipmYCK8ySGmwg2aLE5RMqburw3kLkdNr8qNfjwlv3i76Vjb8swAAW1cgaWdK+z1dVjq6vDXFiILnZ4BPUyIzL56QgNm35zlG8yw/IWSd24m4qloIu7mDug4GvpeMLZirhW8/BfqH/+eWyNjQN+vn8ndX9q7qmouEJfpm1NBw4crKlcw881PxN92W+IPOdsdFHeqUjwKns+k17HzIYQ75VUy0kbsaFGIoL1Q55f/8yzVN9/Pz179njNp4EwjR9P0iOPkPP1SrShasLE/sSEGAgP7UQTnM8vlS5upsoBz+Gma6vRwfU/wVnPQbx/1oi66GiM2dkuX6cJCUEwGr3g0dC0mFsobyunoafBL+OrjF5cFiiaPn06//73v3n22QNLIV544QWmeXky73A4iI+P59///jdarZZp06ZRWVnJ448/zoMPPjjgNUajEaOffrFHC642KhE0GvSZE9FnTvSmWz5BLoUq8rM8goytsYnO1avp3rkTY3Y2pokTyfn+O3+7BUBFewV20U6QLpi6NmlXXpaXUBmdjFn2Mrq4OKc6x6r4F41GICsuhB1VTWyrqiI7zkt65DYLrLgDRAfMuhmiBtYNdZvCVWA3S2WN8eOVtb0fnxV/hkN0sKtOWsTLVQW6+DhyvvsWS2mpYrqC3sbnzciMoZB7Euz+VMq2lZu9uIosjRCdBYmTPHbL0dlJ89tvg8MhNfYcAPlnlBGR4fF4Kiq+Ire3EiC/rp2MMCmAU9dVR6e1kxC9F+ZpNTtg+3sw73dg8lIjLjlo6zNphKF/TqLDQcsHH2CrrSV0/nxM43zb/Cny3MWK2bI6rJz7ybmMCRvDY8c+5p3viY8QBAFj8huI2mK+LU1gVoYL8w4507ZiA1h7vNrcVFEEAWKypT/DDF1iIno/bYqa7WYAjFo1tqTiW1wO2v71r39lwYIF/Prrr8yfPx+AVatWsXHjRlauXOm0ndjYWLRaLbW1tQe8X1tbS+IgHXmTkpLQ6/UHCPiOHz+empoaLBYLhkEaMKl4F3lBV9tV670JXoCSGSv9Wxs7LTR3WogK8e93ULTb0ISG4mhupuquPwDSwy3h3nu81j3WWfq6zJrSqEMgLTqIYIOHjS1UhjXOdo1VCQyM0WsJDXuDNwqO55wpz3hnkJAYKTOqbLW06J51o7L299c49OKk/6VtL1HUWoS+9bdAZl9QRBAE9ElJ6JUqN/YB0xOnc+PUG5kSNwXLvkocba3er96YcLYUtN21HOY/4N7/1c6PpFeFpBFEUSThj3djKS1FP0CzXICSNuk5N9wb86iMLrLjQtEI0NJlxWwxEmOKobGnkdLWUvJsMXRv/ZWQ2bPQhisQYBVFeP8qaNgrNSM+8lLPbR5MZyOUr5WOA0zPNvnRJbR/+x0hs2Z5060hEe12BDcb4oCUiFHSWkJNZw3BOv81YlaKOGMmpZ0d1LaZXbswJgfm/R5Sj5akOFSGpPJ3vwdBIPb632LMyXH5ek+qWOwOkQ0lTdS19xAfZmJ6ZjRajfP21KCt8mRkZHD77bdz++23+9uVgMblu8ucOXNYt24daWlpvPvuu3z66afk5OSwbds25s2b57Qdg8HAtGnTWLVqVd97DoeDVatWMWuQB9mcOXMoLCzE4XD0vZefn09SUpIasPUjEcYIYkxS6ZGsWXo4mpf8lqb7LsHy6w9e9sz7hBh1JEdIu6rFDf7Ntm1buZKq3/0ex0FaVbbaWipvu502FzZVvIG8mA3RJAOQqzYhU1EZVowJT0QQROq6B5YwUgx5kS1nSimF3Qr5vfdBL+rZ2hw2ytrLAGhulcqKXWlQE2iMjxnPDVNuYOLGeooWLKBmyRLvD3qARMI21683t0PhN9KxQtII2tBQoi+7jMQHHhj0nOIWqSGrGrRVGU6Y9FoyYqQkhL217X3f3+LWYsqvvIrK22+n+5dflBlMEGDyBdLxtneUsXkw1i6YfBFkHqd8tcZB9AVt44a+xwsaDSGzZ5N435/Q+KkK1FpdTeUf/kDZpf/nUVPRpJAkli1cxl/m/GVESMGclXoLXcV3InZOde1CQYD598PYU0A3TGIRdht8cA38/KKUHezLoTs6aF+5krYVKxTbTHWWL3dUM/exb7n4pfXc9vZWLn5pPXMf+5Yvd1Q7db1DdPRp2boatF23bh1arZbTTvO+LJev+P777xEEoe9PXFwcp556Ktu3eyBrpTIobm0JTZ06lTfeeIOdO3eyadMmli1bRm5urst27rzzTl566SVee+01du/ezQ033EBnZydXXnklAJdddhn33HNP3/k33HADTU1N3HbbbeTn5/PZZ5+xZMkSbrrpJnf+GSoK8qeZf+LfJ/3bqZLA5hWrqX3/Fyy7f/W+Yz4gu08iwX/NyES7ndoljw7ctKX3vdolj/q1a7m8mMUiZSjJmWcqo5uWDz6k5IILaX73XX+7ojIEx6bNo6PgXiLbbvXuQHLQtnytlDGlFFo9XPMNnPxXSD1GObsHsa99HzaHDYPGhGgLJzWqv6qg/vnnaX73XextbV4b31sETZ0KWi2CRou43+a5V5AlEgB2Lnf9en0wXPIOzL1TEWkEZ3CIDkrbSgFJ01ZFZTiR19eMrKPv+1vSWkLQtKMwThiPaFfwd37S+dJryY/QVqWcXZnINDjnX3D5J8rbPgi5p0XOMElEEEwm2r/8iu6tWzHv3u22nSBdEMckHsPCjIUKeuc/+nqUBIjcnVep2SbJk3z7iDQv8iGa4GDGvPYacbffjiHL/eekramJnr17sdXVOXX+lzuqueH1LVS3Hhikrmnt4YbXtzgVuLXYLQBoBS1awbUs9ZdffplbbrmFH3/8kaoqL9zzvIjFYjns53v37qW6upqvvvoKs9nMaaedNuQ1Kq7jVNC2bb/FRVtb22H/uMKFF17IE088wQMPPMDUqVPZunUrX375ZV9zsvLycqqr+3+J0tLS+Oqrr9i4cSOTJ0/m1ltv5bbbbuOPf/yjS+OqKM9J6ScxK3nW0NIIdhthKR2EpnRjnDrbN855GflBX+jHZmRdmzZjG6SBHwCiiK2mhq5Nm33n1EHs65DE/dvbowE101ZFwlpbQ8+2bXT+tNrfrqgMwcSkOERbOCV1nR5l6AxJVLoUaBMdkP+Fsrbj8mD2LaBxvyx0KGQpmCh9CqDp07N19PTQ8M/nqHngQRzdvs1u8ZTqjmrWa0oJ+eYD0v/7mm90qKdeCtOulDKYXEWjhazjYcGDislgdG3ceNhge11XHd22bnSCjtSwVEXGVFHxFXmyrm1N+wE61kl/+QtZH35I2IknKDdYVLokg4MI299Xzq6PMdvslDVKCRtDVVNY9u2jcdkrWMrLfeHaoOiioki4/z4y3n0nIJoUBwpyAk5xQzt2VzcoHA4o+k4KgtqGQbBKbpqWPsurc6GBEDQago86ktjrf+tRhrYoinR2m+lo7aDLYjvsn/YeKw9+spOBZq3yew99sov2Huth7TR3d9FjcaDX6l3yvaOjg3feeYcbbriB0047jVdfffWAz+WM1VWrVnH00UcTHBzM7Nmz2bt3b985v/76KyeccAJhYWGEh4czbdo0Nm3ahCiKxMXF8f77/ffRqVOnkrSfBNfq1asxGo10dUlNhFtaWrjmmmuIi4sjPDycE088kV9/7U+ke+ihh5g6dSr/+c9/yMzMxDRE/4X4+HgSExM56qijuP3226moqGDPfo0WV69ezbx58wgKCiItLY1bb72Vzs7BE90O519+fj6CIBxgH+Cpp54iu7epnd1u5+qrryYzM5OgoCDGjh3L070N2mWuuOIKzj77bJ544gmSkpKIiYnhpptuwmq19p1jNpu5++67SUtLw2g0kpOTw8svv4woiuTk5PDEE08cYHPr1q0IgkBhYeFhf17u4pSgZFRUFNXV1cTHxxMZGTngF1UURQRBwO5iJt/NN9/8/+ydd3wUVduGr9mWsum9kAZpVAHpKl3AjqIgL0pRsSAWkFdFX+sn9i6KDVEURayoKIpIERDpvaZDSO9163x/TDYQSNkkW1Lm4pffsrNnznmSbGbP3Oc598PcuXPrfW3jxo0XHBs6dCjbt29v1hgybYjidAJ7FkvbHuP7Ozsam9CtDRQjM+bl2bSdPVg2fhlZFVlc97a0xS5ezrSVAbwmXIE6OATtJR1jEacjE+XvjkKAMp2R3DIdwV52LLiReDVkH5QsEvrdYr9x7IDFCkZjlvz5LRlsok6H38yZGE5loAoKdFp8LeHVXa/yR/ofLBiwgBmhCY4ZNOGKlgm2dsBYWEj6rdNBoSB+xw6UHhcuUKeUSLtJIrwiUCscm70kI9Na4kNqMm1zy7h26FnR1m5b3/tMlnZTHFgFl9hw90buUakAYWhfu/qWA6TlV2IWwcNFRbBX49uly9auJffV16jYupXIpR/bNa6m8L3pplb3seLoCrRqLSO7jMTH1af1QTmZLj6uaKPeR3DJYm9WPAO6NMNrVRDgu9uhsgBix0DkEPsFagvSLKLtJc6NoxXoXd25eOWpmmfHGm3bFCKQXVpN76etsxH88+Hm7d5ZtWoViYmJJCQkcMstt/Dggw+ycOHCC66tjz/+OK+99hqBgYHcfffd3HbbbWzdKv2upk2bRr9+/ViyZAlKpZJ9+/ahVkvi8fDhw9m4cSM33ngjRUVFHD16FDc3N44dO0ZiYiKbNm1i4MCBuLtL3tM33XQTbm5u/Pbbb3h7e/PBBx8wZswYTpw4gZ+flFiVlJTEd999x/fff1+nllRjlJSUsHLlSoBa29Lk5GQmTJjAc889xyeffEJeXl6t9rds2bJ6+2ksvvj4eAYMGMCKFSv4v//7v9pzVqxYwX/+8x9Aslvt0qUL33zzDf7+/mzbto0777yT0NBQJk+eXHvOhg0bCA0NZcOGDSQlJTFlyhT69u3L7NmzAWnH/z///MPbb7/NRRddRGpqKvn5+QiCwG233cayZctYsGBBbX/Lli1j+PDhxLbAp9karBJt//rrr9pf4oYNbaMSvUzbokxfxsZTGynWFXNrj1sbbph/Unr0j4MOUjG+W6B085bsxExbVaB1AoC17eyBUqHEXRFEQZn0e+9mhf+XTMfHpWsMLl1l/8f2gItKSXDYIYqEPaw8XMEDQyc3fVJLSbwaNr0MZqNk8dLam+/MPbD1Leg1CXpca5sYG8BiBaOrCgDOZmApvb0Jfvi/dh3bXiT6JZJWmoabyg04u1DfJjm+FlI3QZ8pENbXJl0as7NRR0QgqNX1CrZwNsM6xku+nsm0PxJqFpdO5pQT7dULgPSydIxmIyqFStpdYTa3qnhVHXpcB7/+F3IOQs5hCO5pm363vCF55Y54BEY9Zps+G8DiZ9styKPJ66E6MhL3oUPwHO/cosDn05JruSiKvLP3HSoMFfx43Y8dQrRVq5RoNDqMSh07Mo81X7SNugSO/gRpW9q2aGs2nS3SF32pQ4eu3LuXin/+wWv8eFxqsiJbiqB23sJoc/1sly5dyi23SMkHEyZMoKSkhE2bNjFy5Mg67RYtWsSIESMAePTRR7nqqquorq7G1dWVjIwM/vvf/5KYmAhQx5Z05MiRfPDBBwBs3ryZfv36ERISwsaNG0lMTGTjxo21/W7ZsoUdO3aQm5uLS42v9quvvsqPP/7It99+y5133glIlgjLly8n0ArdoEsXaWeRJXv22muvrY3zhRdeYNq0abVFxuLi4nj77bcZMWIES5YsuSCL15r4pk2bxuLFi2tF2xMnTrB7926++OILANRqNc8880xtnzExMfzzzz+sWrWqjmjr6+vL4sWLUSqVJCYmctVVV7F+/Xpmz57NiRMnWLVqFevWrWPs2LEAdD3HzmPmzJk8+eST7Nixg0GDBmEwGPjyyy8vyL61JVaJtpZftNFoZNOmTdx22221vyAZGYBSfSmPbXkMlULF1MSpqBT1v7VMGQdQmEEIjHdwhPbDckOeUVhJtcGEq9qxW00A3AdcjCokBGNOTv2+toKAKjgY9wEXOzy2czmZUwZAuI8bWherLj8yMjJtCE+vfMo5wt7cboAdRdvgnvDfJHD3s01/R3+CIz9K1Z3tLNpaMm2LSnwAiAtu/1Yws/vMZnaf2Yh6Pafvu5/KXbvotvY3lN7e9h3YbIbTOyBloyTCWCMu7P0cjv0CajebibauPXoQu+4PzNUN21pYRNuuPrKfrUz7IzpAi1opUK4zYjb64KZyo8pYxemy02g/+5nib74h8IH78bnxRtsM6O4H8eMh4x8oSrONaGsywIm10v+7jmx9f03QnCJkXuPG4TWu7Qi25qoqCj/9lNJ164heuRJFMwp651XlUWGoQCkoifCMsGOUjsVbFU4BWRzOa8H25uhLpXlG+lZgQZPNnUbOIaguAY0nhPRx6NAlP66m+OuvMebkEvrM063qy02t5Miz1vkp70gtZOaynU22+3TWQAbFNDznTClORWesxtvVzeo4jx8/zo4dO/jhhx8AUKlUTJkyhaVLl14g2vbpc/b3YbE3yM3NJTIykvnz53PHHXfw+eefM3bsWG666aZaO4ARI0bwwAMPkJeXVysGW0Tb22+/nW3btvHwww8Dks1CeXk5/v7+dcauqqoiOTm59nlUVJRVgi3A33//jbu7O9u3b+f555/n/fffr31t//79HDhwgBUrVtQeE0URs9lMamoq3bt3r9OXNfHdfPPNLFiwgO3btzNkyBBWrFhB//79a4VigHfffZdPPvmEjIwMqqqq0Ov19O3bt06fPXv2rJNFHBoaWltEbd++fSiVyloN9HzCwsK46qqr+OSTTxg0aBA///wzOp2Om2ywi6EhmqWaqFQqXnnlFaZPn26veGTaKaHaUIaEDiHCM4JqYzUemvonMDmfraVkTyjBggk/G837nE2ghwuerirKqo2kF1SSEOL4G3RBqST4sYVkPvCgdFN7rnBbc5Mb/NhC22VINJPf037nz/Q/Uet6A/6yNYJMHcwVFZRv3ozhzBn8b7/d2eHINEKkRzRZ5XCqPM2+AwmC7QRbgKO/SI+J9q3cK4pirXhXXOILnF3YM2RnowoKcowfrJ0QNBp0ycmYioqo2LEDr8svt++AhkpYfp203TnhCgi9qPH2ujI4uU76f8/rbR6OohFvt9pMW28501am/aFWKuga4MHxnDKSciqI9ormaOFRUktS6WnQY8zLo2r/ftuJtgBXvwFuvrYrhpS+VRKk3AMgYrBt+myEpNoiZO1vTiuo1RR9vQpjdjZla9fifa31i5mWa10Xzy5olNaLvW2dMG0UBRW7SK8pKNksLFYDGf9KiwcOLvBlNRZrhMghoHRs8ox2yGCMOTl4TWh98TpBEHAVRMwlJYiiiLoRgfGyuEBCvV3JLqmu19dWAEK8XbksLhClov6FYVEUUSgNuCoUuKisz7RdunQpRqORsLCwOn25uLiwePFivM9Z+Fafkz1syX431xR9ffrpp/nPf/7DmjVr+O2333jqqadYuXIl119/Pb1798bPz49NmzaxadMmFi1aREhICC+99BI7d+7EYDAwbJhkQVdeXk5oaGi9FqQ+Pj61/9dqm6hRdA4xMTH4+PiQkJBAbm4uU6ZMYfPmzbXj3XXXXdx//4UWOJGRkRccsya+kJAQRo8ezZdffsmQIUP48ssvueeee2rbrVy5kgULFvDaa68xdOhQPD09eeWVV/j333/r9Kc+L1tbEITan7ebW9PC/B133MGtt97KG2+8wbJly5gyZUqtBYU9aPZf6+jRo9m0aRPR0dF2CEemvaIQFHw07qMm2xlyi8AsoArrODc1giDQLdCDfaeKSc4rd4poC9IqPm+9Sc7zL9QpSqYKDib4sYVOXeHflb2LtWlriXdRA5d0iMwzGdthzMsjc958UKvxvflmFM2YLMg4lu6BsfxbDkWGTMcNWp7buhv7vBNQcBIUaoiz73WwoLqAMn0ZAgrMen/CfdzwcJG2Fqdccy2i0UjXH75H007nUGbRTOCjj6D29sa1p422MzeGi4f0Ozv6Exz+sWnR9sTvYNKBXzcI7mX/+M4hu0L63JXtEWTaK3HBkmh7PKeMaO8a0bY0lUsmTcJjxAjb/817BNm2P8viXMIVDimwVJtp24RoW7V/Py7duzcrm9XeCCoVQfPngSDgNb55IlpHtYKJ9+vKwQrI07WgWFxQD2meUlUEZ/ZBxECbx2cTynNAoYJox/vZel1xBV5X2NCn3mDAkJ2NoFSiCgho0OZDqRB46poe3PPFHgSoI9xaznjqmh4NCrYABrOh1kpEo7Du79hoNLJ8+XJee+01xp13Dz5x4kS++uor7r77bqv6AoiPjyc+Pp558+YxdepUli1bxvXXX48gCFx22WWsXr2aw4cPc+mll+Lu7o5Op+ODDz5gwIABtSJs//79yc7ORqVS2UXLu/fee3nhhRf44YcfuP766+nfvz9Hjhyx2ufV2vimTZvGww8/zNSpU0lJSeHmm2+ufW3r1q0MGzaMOXPm1B47N4vYGnr37o3ZbGbTpk219gjnc+WVV6LValmyZAlr166tFartRbPTPa644goeffRRFixYwFdffcVPP/1U50tGpjEif91J7E8r0E6609mh2BTLhC3JicXIQBJuY9f/SeRnnxH26qtEfvYZsev/dPqWrKu6XsVDFz+EsVyqVBvXDrMSZOyHOioK7aWX4vefqZh1OmeHI9MIA8KkIlQGoZBKQ6X9B1w5DV6NP1vtuCUcq7mR7zoCXL1sE1cDWG5mvdXBIKprPxuMeXmIej2i0Yj6nIyL9sS96+9l8IrBJCd64nbRRQgqB2Xp9JwoPR7+oX77n3M5/EPNOdfbrAiR/nQmyVddTdYTT0q+ng3wy/W/sO7GdST6JTbYRkamLWPxtT2RU1abMZ5akoomOhr3AQNQWJF91CLMZiho3k31BYiiVLgSJE90O2Myi6RYkWlrKisj7ZZbOTlkqFOLAdeH97XX4n3NNQjNFJM76q6CfiHS/KZSzGr+yQoFRNYU1E3fYsOobMzlz8Aj6TDgNmdH0moEV1eUnl4oAwKanBtM6BXKklv6E+Jdd7dMiLcrS27pz4ReoY2erzNJ9yYapcZqD+hffvmFoqIibr/9dnr16lXna9KkSSxdutSqfqqqqpg7dy4bN24kPT2drVu3snPnzjrWAiNHjuSrr76ib9++eHh4oFAoGD58OCtWrKizxX/s2LEMHTqUiRMn8scff5CWlsa2bdt4/PHH2bVrl1XxNIa7uzuzZ8/mqaeeQhRFHnnkEbZt28bcuXPZt28fJ0+eZPXq1cydO7fe862N74YbbqCsrIx77rmHUaNG1clkjouLY9euXfz++++cOHGCJ554gp07m7bHOJfo6GhmzJjBbbfdxo8//khqaiobN25k1apVtW2USiUzZ85k4cKFxMXFMXTo0Gb+tJpHs2fcFtX69ddfv+A1QRAwmUytj0qm3VJpqKRUX0qINqTe1wWVCnV8fwdHZX8sRbWcWYzMgqBUoh08yNlh1KFvUF/6BvXl3dV/Ajo501amDoIgEPlx05n6Ms6nb1g4ZqMWhaqCo/nJXBzavCq6zcbNFxClDKqWehRaRFsH3MhbbmZdkT4DLQtU6qAgEvbsxnDmTLNvkNsKRrORalM1KcUp9Avq57iB48aDyg2KUiFrf8M+tXWsESbabPjqgwfQJyejcHNr9GZNEIQG5z4yMu2BuHNE21mjLkWtUNM3sK99By0+BcuukGwNFpwEdcMWJI1yZi+UnQG11iF+tplFVeiMZjRKBRG+DYvZ+rR0VL6+KDw8nFoM2JaklEjFNjuaaDsssjvsBJTlZBTnEenTzN9X9KVwfI1U+LQt4+LYxBlRFCnfsBHt4EE23UknKBRooi7cYt8QE3qFcnmPEHakFpJbVk2QpyuDYvwazbC1IIoiaoW6WUXIli5dytixY+tYIFiYNGkSL7/8MgcOHGiyH6VSSUFBAdOnTycnJ4eAgABuuOGGOsW2RowYgclkquOTO3LkSFavXl3nmCAI/Prrrzz++OPMmjWLvLw8QkJCGD58OMHBwVZ/b40xd+5cXn/9db755hsmT57Mpk2bePzxx7nssssQRZFu3boxZcqUes+1Nj5PT0+uueYaVq1axSeffFKnj7vuuou9e/cyZcoUBEFg6tSpzJkzh99++61Z38eSJUt47LHHmDNnDgUFBURGRvLYY3WLW95+++08//zzzJo1q1l9twRBbCxtoANSWlqKt7c3JSUleHnZN+Oms/FXxl88sOEB+gb25fMrP3d2OA7lj8PZ3Pn5bnqFe/HLfZc5O5w2SXGlnr7PSjfUh54Zj4dciExGpl3S54PrEF1TmNPzSe4ZYD/TfQCOr4WvpoBnGMw/0vzsydIz8Hp3QICHjoOnbSalDfHSjpf44ugXBJrGkXJiNC9P6sPkgR2jUIvle5vRYwZztVdStmED7v36oa3xSrMrX98qWSRcOg/GPl1/mwPfwPd3gH8szN1ls0xbU0kJlXv3gijiOWqUTfqUkWmLpOZXMOrVjbiqFRx+ZkIdMUN38iRlGzeiiYi0iSdlLWYzvNUHSk7BTZ+1fMFlwwuw6UXocR1MXm67+Brgr2M53PbpLhKCPfl93vBG24pmM8a8fNTBNraDsAGiKFK2di1FX60k7JWXUVsh3Iz9Ziw5lTl8fsXn9A3qa/8gHUifTy5DVBbzv37vMaVPM+/nynKgIheCekqZt20Ns9kpcemSkki5+hoUnp7Ebd3SbJuQ6upqUlNTiYmJwbURX3lHYLFIkJEBqQjbmDFjOHXqVKOid2PvYWu1yWb/5S5fvhxdPdtX9Xo9y5fb/0NSpu0S7hEOSCuw9a0FlH/xMtkzR1L+6f85OjS7060mmyo5twKzuVOtgzRJTkUOf6T9wea0wwC1Ho8yMucjiiK6lBRMZWXODkWmEbxV0rX+cN5J+w/WdaSUOVV2Rsqkai7luRDWXypKY2fBFsDX1ZdYn1hKSqXKt7EdqOhi7Xbp0lRK1qwh/53FlKxZ45jBLUXFGrNIMFaDRwj0mGgzwRZA6e2N58iRjQq2K46uYN6GeWzI2GCzcWVkHE2knzsuKgXVBjOnCuva31T88w95r71OyY8/2nZQhQJ61xQ3O7Cq8baNMXwBTP8JLnnANnE1gbV+tiBlBLZFwRakzLbCL1ZQuWMHRStXNtm+wlBBTmUO0PEybQG0grTNen/Oieaf7BkMIb3bpmAL8N1t8OFISNno0GGNeXmoIyNx69/PLr7OotmMudIBdl0gC7YyAOh0Ok6fPs3TTz/NTTfdZLMs5cZo9lVl1qxZlJSUXHC8rKzMIanBMm2XKK8oBARK9aUUVhde8HrF35sp2p5DxT//1nN2+ybSzx21UqDKYCKrtNrZ4bQpdmTv4KFND/H+oZcBqdCFjEx9nL5nDilXXkXZ+vXODkWmEcK1UQCk1VgB2BW1K8TVFAGw2Bw0h7C+cOcGmL7apmE1xJ197uTTcasoyJYKZllu6M889ji5r76KMT/fIXHYg3M9Lj1GjMDzigl4XOKgYibxNRYJRh2UZdffpv+tMP8oXDbfMTGdw46sHfyZ8SdnKs44fGwZGVuhVAi116wTOWVklGawPn09meWZuA8YgOf48XjYI9u8T81W2ZN/QOWF9w9WoVRLvuXhF9surkawiLbdGhFtRYPBIbG0Fv/ZdxAwZw6+N09tsm1aaRoAfq5+eLtcuO27vRPsKu2MSSpKcXIkNkYUIXWztPitcmy2qnboULr9vpbw116zed+i2Uz1sWPoUlIwt5O/N5n2z1dffUVUVBTFxcW8/PLLDhmz2aJtQ2nhp0+frtezQ6bz4KpyJcxDWqFMredmXhtmxC+hHO0gB3rhOQi1UkGUv+TTk+zkYmRtDct7QWWq6/EoI3M+LvHxCGo1xpxcZ4ci0whxvt0AyNOddsyAFi/aY63I6mypT2ILsNzMh3i54uWqxlReQcn331Pw8VJQ2r+iub2wiLaZ5ZmoLr6ILm+8gdeVVzpmcI0W7v4b5h0Br0YKhigUUlsbYThzhqKVX1N9/Hij7Wb0nMEjAx9hSOgQm40tI+MMzi1G9tLOl3hw44NszdyKa48edHnrTXynTLb9oEHdpQxFs+FsMcE2jjWZttnPPkvKdRMp27jRQVG1DM+RIwm8/z6rsoE7ahEyC119pO8rqyqjZR3kHIbv74KfHZPxbTV5x6CyQFr8DHN8bRlBEFB62P7+T1AoUGg0CCoVol5v8/5B8vM/XnictJI0zKLZLmPItC9mzpyJyWRi9+7dhIeHO2RMq/co9+vXD0EQEASBMWPGoDqnarDJZCI1NZUJEybYJUiZ9kOMdwyZ5ZmklqYyIGRAndc8vM/g0a8UxnTM90m3QC1JueUk55UzPL5jFBuwBZYJnq5K2i4cFyQXIZOpH//bbyPgnrvtV6Faxib0DY3nx2yoFLMxmU0oFXYWIuPGgUIl3XTkJ0FArHXnFWeAq7f05QCMZiNKQcnJHMnew7KrQBAg5Omn0Z/KQOXr65BY7IG/qz+eGk/K9GWkl6YT7xvv2AAC4hp+LecwBCaCjd+LFf/8Q/bTT+M+aBBRyz9rsF3/4P70D+54RVZlOh9ni5GV0z2xOwVVBbipHPCZ3GcKZB+ULBIG3t68c1fNAM8QGHY/eNv/BloUxbOibWD9QpQoipRv2owxN9cuW8KdRUcXbXsFxrEuF0qNmS3rwKiDAyvBxRuuet3mn0ktJm2L9BgxCFSOez+aystRaLV2tRXQREeDUmm3MfQmPUazEQCF0EatL2Q6PFaLthMnTgRg3759jB8/Ho9zVks0Gg3R0dFMmjTJ5gHKtC9ivGPYkrmFlOLztpUYqqQbaIAAB9/oOYjYIA9+P5xTO5GTkbBM8PILfQDZHkGmYZTybo12wcCIroi7VQgKI+mlp+nqE2XfAd184LIF4BMJHs3wBfz9cTj+G1z9hrR13s78kfYHT//zNCHKIcD42gUqhVaL7831V8ptTwiCQIx3DAfyDpBakkq8bzym0lL06Rm49e7luEBMRjBWgUvNAmB1KXw4Cly94O4tknhjI5Q+PmiHDcN94ICmG8vIdAASQs7aI7w9dS5z+82t87q5shJjfj6aSOurtltFrxvhjyfg1HYoSgdfKz9XyrLhyI/S/x3kZ5tXrqO02oggQNfA+jP7BUEgZvWPlG/ahPvAgQ6Jq7VUHztG4eef4zt5Mm4XXVRvm1rR1qtjirZDInvAYTApCyjTVeHp0swFi5A+oPEEXQnkHILQ+n+ODid9q/QYfalDhz19330Yz2QR+tz/2e3vQFDZt06Kq8qVrt5dMYkmu44jI9MYVr/Ln3rqKQCio6OZMmWK06v3ybRNunp3BaRCJediyjiAqVyB2s8TQRvgjNDsTrea1fbkPFm0tWA0G0kvSwegsNgHsK5og4yMTNuli7cWDIHgksWuzGP2F20BRi1sXntDFSStl7bbBve0T0znkVqaSpWxilK9lJHREReoYrzOirZVhw+TdtNklL6+xG352zEFOvYsh/XPQt9pcPkz0rETv4NJJ2VUe9i2GITnmDF4jhnTaJuU4hSOFR4j0T+xdg4kI9NesSw2JeeVYzCZUSvPZpaV//03p+66G9cePYj59hvbDuwVCuOflzxpfZohCB//VXoMHwBeYbaNqQEsyRkRvu64qhvOpFT5+uJTk/TUHihc/jkl33+PWK0j/LX6xcYIzwgS/RKJ821k50M7JjEgDDHvJqoq/DldWE330GaKtkoVRA6BpHWQtrVtiLaiKMUCEOUgH3rAXFVF1f4DiJWVqILaZiE+a1AICtzU8g5AGefS7BzvGTNmUF1dzccff8zChQspLJQM4/fs2UNmZgu3Esh0GCzbZdJK0uocr/jzN5J/CSZjg69Nqzq3Jc6KthVOjqTtcLrsNEazEY3CFdHoTZi3K56uameHJdOGqTp8mIy77uL0g/OcHYpMAygUAr7mS9DljUFX5ePscOonZSMYKsArHMIc46N+Z587+WniT1TnDQcgvka0rdyzF0NODqIoOiQOe2L5jE8pScE1Lg7BxQWllxcmRxVY03hARZ6UWWf5eVo8MHte75T5xV+n/uKRvx/hwwMfOnxsGRlbE+7jhlajxGASSS+Q5rNGsxGT2YRLt25gNmMqLkY0Gm0/+NA5EDm4eX/HR2sKVCZeZft4GiDZCj/b9ojfrbfgdeWV+N4yrcE28y6exzfXfMPQsKEOjMxxKBQKurmOxlQVTVp+CwtLR9cIo5bsVmeTfxIqckHp4rBCfQAKNzfiNm8m4oP30UTZd3HfkJeHLikJU5mcOCXTMWl2PvmBAwcYO3Ys3t7epKWlMXv2bPz8/Pj+++/JyMhg+fLl9ohTpp1guaE7U36GKmNVrQ+WsSAPQSGiDvZzZnh2xbJFKq9MR0mVAW83WZy0bKPyVYdTgILYYNnPVqZxBJWKik2bEdzcMOv1HcoLriPR3+daVqedobzcgTsnijPgyE/g3w0Srmi87bFzbuQdJOSpFWoCXLqQXXgEgNhAT0SzmYw77kCsrKTrr2tw6dq+MzHPXZgVNBriNvyF0sfHcQHEj5cKqRSlQdZ+8OsKSX9Kr/WYaNOhzNXVCCpVk1svO/p2YZnOhUIhEBvsyf5TxRzPLue5PfexP28/X175JYmhicRu2og62LYZ7S2mugRSN0v/736Nw4ZtqgiZ7uRJ8t57D68JV+A1fpzD4motrt27E/76a84Ow+l0C/Jg/+mSlu+cjKqxIEjfCmazVCDTmYhm6HkDIDq0KCuA0kOLx4gRdh9H1OkwV1djrqxA6WnbxZTcylyUghJvF29UCvtaMcjINESzryLz5s1j5syZnDx5so5FwpVXXsnmzZttGpxM+8PXxRdvF29ERNJL02uP+z32Lgn7DxD85hdOjM6+eLqqCfGS/iZkiwQJi02GRpQm+PEdLCtBxva4xMcT/NhjRH/1pd19qmRajmVngUM9vA9+C388Djs+arydySh52QIkXm3/uM7B8vMI8nTB212NqagIdVgoCnd323tAOgHL9v+0UqmKskMFWwCNFuJrRJBtb8P6ZyRrBL9Ym9tgFK/6huMDBpL76quNtrPsLOqohXlkOh8JwWd9bUVRxGg2klqSiiAI9hds847DL/Nh44tNtz25TrLACUhovFChjUnKa7wIWem6dZT9tpaSH35wWEyOQGfSYTJ3fF/PIN8q1D472Jq9rmUdhPUFtRaqiiD3iE1jaxFBiXDTMrjpU2dHYjeUfn5oIiJQ+tk2OcwsmsmrzCO7IrtD7JaSab80W7TdtWsXd9111wXHw8PDyc7OtklQMu0XQRBqs00s2Se1r6k1KAMc4zflLLoFSdm2yXIxMuDse8BQFQh0TI9HGdsiCAJ+02/FNTERwdnZCTIN0jXQHUFdyMHCfx03kbUIsKmbpQyrhjj1L1QWgKsPRA1zSGjZFdk89vdjfH5E2m1kudap/P3p9ssvxG//p0MsQoR7hqNSqKgyVpFTkVPnNYe9D7wjpMdD38HOj6X/l2fD0Z9tOkz1sWOI1dUI7u4NthFFscNXU5fpfMTX7Io6kVNW+74+v1aF3ShMhV1LYedSaQGuMY453hoBzi7OdWsgEcFzzBj8Z8/G58b2WaDbWFRE/kcfUfbnn3WOf3n0SwatGMRruzp2Nq7SNRPX0O85Ub2mhR2oJV/b4F6Nz1U6MOWbN3PqnjmU/v6HQ8ZTuruj9PZGoW5il6vZBKl/S0kAqX9LzxtBb9IDkq+tI7NsP/30U3ysWBQXBIEff/yx0TZbt26ld+/eqNVqJlrpsf3000/Tt29fq9rKOIZm3xG7uLhQWlp6wfETJ04QGBhok6Bk2jddfWqKkZU4aILXhpB9betieQ8UlngDECfbI8jIdAgi/TVou71Clts7FFYXOmbQwHgIiJcyq042kgFjuZFPuEK6eXIAJ4pO8HPKz+zMl25QLMV8LAgdxOZDrVDzyvBXWHHlCvzd/AHIefkVksZeTvWBA/YP4MhP8M+7Fx7Xl8Oq6dLrNiL0uf+j669r8JnUsPCSX5VPmaEMhaAgyssBBflkZBxAvaJtzXzOWFBA1pNPkXHbbfYZPHYMuPlJHpypGxtv6xsDXl2gu+N2VJRWG8gp1QEN2yO4JiQQ9NB8PMeOdVhctqT461XkvfY6+Uver7MYl16ajt6sx13V8EJWR2BIl54Yy+PRlca2fDHyP6vgnq1n/W2dRWWhlL3u4CzR0t/WUr5hA5W7djl03EY58hO82Qs+uxq+u116fLNXo/MGi2jronRpUbHVmTNnIgjCBV9JSUmNnjdlyhROnDhR+7w1Iur8+fPp27cvqampfPrppy3qQ8b5NFu0vfbaa3n22WcxGAyApPBnZGTwyCOPMKmRia1M58GSaWvZMmg4votTV/Qh954JTozKMVgmcA7dMtxGEUWRlJIUAIqKpe0qHa1og4z9qNj+L7mvvY6puNjZocjUQ0KwH6I+AFN1MKmFeY4b2JJRZRFm62PoXLjyVeg/wzExcVbQEIzS1uGOvKtgbNRY+gT2QaOUhGjDqQwMp09Tsf1f+w5sNsHaR4BGbj7XPtpk5oy1CAoFLl27Nrod3PJ77+LRpfbnISPT3kkIkUTbtIJKunhIti6W+ZzCzY3i776jYts/GOyxw1Kphl4195MHVjXeduxTMO8QhPW3fRwNYNlJF+jp0mFrV/hMvgm3vn3xnTatjtj3xJAn+PWGX7kx/kYnRmd/BkckoM+8nfLscWSXtrAYmbKN7Kw59gu8Owi+utmhw/rfNouA++/D+1rHeU2bDQaMxcWYysoufPHIT9LCbumZusdLsxpd8NWZpAUaF6VLi+OaMGECWVlZdb5iYhrfmePm5kZQUFCLxzyX5ORkRo8eTZcuXazK3pVpmzRbtH3ttdcoLy8nKCiIqqoqRowYQWxsLJ6enixatMgeMcq0M67udjU/T/yZRZdJ7wfd3m2Upxoo33/KyZHZH0umbYrsaUtBdQFl+jIEBMx6f0K8XPFy7ZgTXBnbk/3c/1Hw0Uf2F4JkWoSLSklAyf+oTJ2HvtqBxcgSa24ATq4Do67+Nt7hMGg2RDmuurVFvKsolxaoLJm26bdO5/R992PIynJYLI7Gb+ZMIj54X7rBtyfp2y684aqDCKWZUjsHIVsjyHREgjxd8HJVYTKLKI0hAKSXpGMym1C4uxP03wWEv/M2Si8v+wTQZ4r0ePQX0Dexc00QHFZsEs4pQtaAn23xd99L1irt2P9S5edH9Mqv8Lnh+jo2VUqFkgjPCALdO/bOWo1KQZS/lE2cnNvKnZOG6qbfw/Ykbav0GNzLocO6xMUROGcObr1723cgfUXtlzk/C0PaSUw5p6VjhhrBvdEF35pjax+pu+Bb06e+ugjBUIXGZGzx79HFxYWQkJA6X2+99Ra9e/dGq9USERHBnDlzKC8/qx2ca4/w6aef8swzz7B///7aTN1zM2bz8/O5/vrrcXd3Jy4ujp9+kgTotLQ0BEGgoKCA2267rfa8+qwXfvzxx0YziWfOnMnEiRN59dVXCQ0Nxd/fn3vvvbc2iRNAp9OxYMECwsPD0Wq1DB48mI0bN9a+np6ezjXXXIOvry9arZaePXvy66+/AlBUVMS0adMIDAzEzc2NuLg4li1b1qKfd0el2ctA3t7erFu3ji1btnDgwAHKy8vp378/Y9vpFhAZ2xPgFkCA29mbeBePSkIuLkbo0td5QTkIi2ibXliJ3mhGo+q8npxF1UVEekZSrjNSKqo7dOaZjO3xuvJKDOnpqEPaSJVqmQuIDfQkvaCK5LwKhnVzkHAb1g88Q6EsS/K2jbvcMeM2gUW8Kyn1BSAuyANTcTGVO3cCEPp8x1nUzqvM46+MvzCKRqZ1n4b7xRc7ZuDynKbbNKddI5T8/DPVR4/hNe5y3BrZkmjx+ZRFW5mOhCAIJIR4sjOtiKJSLRqFBr1Zz5mKM0R4RuA/c6Z9A+gyQLI+KEqFY79Cn5vqvm42QcZ2iBjs8IzG2iJk9ewcMxYVkfXEE2A20+3PP9F0CXdobDK2o1ugBykFeRzIOsWlcS2c3/z+OOz4EMY/Ly0kOxpRhPQa0dbZNg324vmztXJUnCdsxY2Dad9YueB7RmoXc5l06M3eUFlAOFDnr/hp23gUKxQK3n77bWJiYkhJSWHOnDk8/PDDvPfeexe0nTJlCocOHWLt2rX8WeMz7e3tXfv6M888w8svv8wrr7zCO++8w7Rp00hPTyciIoKsrCwSEhJ49tlnmTJlCt7e3nz99dctinnDhg2EhoayYcMGkpKSmDJlCn379mX2bOm9PXfuXI4cOcLKlSsJCwvjhx9+YMKECRw8eJC4uDjuvfde9Ho9mzdvRqvVcuTIETw8pOvoE088wZEjR/jtt98ICAggKSmJqqqqFsXZUWnxJ92ll17KpZdeastYZDooajEL37hKGN5BPzDOIdjLBQ8XFeU6IxmFFcQGdV4P1zjfONbcsIZnfj7EMtIv8HiUkWmMwDlznB2CTBPEBnmw/lguSTn1bEWzFwoFJFwJ+1ZAUdqFr/8yH4J7Qp/J4OK4a45FtDXrAgnwcMFXq8Gsh8hln6BPz0Dp2XGufzmVOTz373MEuAUwrbuds2vPxcPKBRxr2zVC6drfKV+/HlVgYKOibUqxtGW8q3fXVo8pI9OWiAuWRNuTOZVEeUdxsugkqSWpRHhG2H9wQZCu4ZteggNfXyjant4Jn14pCbv373Vopq3FHqE+0dZcVobHqFGYCgo6hGArGo2UbdiAuaKCM5fF8+nhT+kX1I+piVOdHZrdqXT/Dc+EVfyeOY459GtZJ67eYNJLwqkzRNvidCg5BQqVtMDhAERRJP+999AOHYZb34vaRkFhBy741scvv/xSK04CXHHFFXzzzTe1z6Ojo3nuuee4++676xVt3dzc8PDwQKVSERIScsHrM2fOZOpU6W/y+eef5+2332bHjh1MmDCBkJAQBEHA29u73nObg6+vL4sXL0apVJKYmMhVV13F+vXrmT17NhkZGSxbtoyMjAzCwiQhfcGCBaxdu5Zly5bx/PPPk5GRwaRJk+hdk33dtevZeVNGRgb9+vVjwIABtT8TmbpYLdouX77cqnbTp09vcTAyHYfvT37PzuydTEmYQt/8k9LBgHjnBuUABEGgW6CW/adLSMot79SirYWkmq1F8XKmrYxMh8Lbqwj3mLf5tUjgGdY6buCRC2Hc/4FGW/d4UZpUdVxQQs/rHRZOUXURRboiAMz6QOLCpWudQqNBO3Qo2qGOs2lwBNFe0YzoMoIY7xhMZhNKhRJDZiZlGzai9PHB+2o7VXKPGgZeYZIHXb3bHAXp9ahhrR7K+5prUAUFoh08qNF2cqatTEclobYYWTkxUTG1ou3wLsMRRRF9cjJV+/fjfc019im02Hsy7PsSwvpKGYPnCrMWT/MuAx0q2MI59gj1iLaayEgi3l3crq0RzqVswwYy77sfpb8/R+Lu5bfU3yjTl3UK0TbaO5wDFZBTndHyTqJqkpXStl74HnYEFmuEsP4XzpfshO7oUfLfWUzBx0uJ/2cbgqurfQd8rJEMWkEpPbZkwffBgxhMBk4WJyEgkOAXj0JomQA9atQolixZUvtcq9Xy559/8sILL3Ds2DFKS0sxGo1UV1dTWVmJu3vzCv316dOnTt9eXl7k5ua2KNbG6NmzJ0qlsvZ5aGgoBw8eBODgwYOYTCbi4+tqPTqdDn9/qWjt/fffzz333MMff/zB2LFjmTRpUm3s99xzD5MmTWLPnj2MGzeOiRMnMmxY6+dyHQmrRduZM2fWqvwNfRgJgiCLtjIAbMncwrr0dXT3607cgZNoVApU/rE4+OPKKXQL9GD/6RKS85zoYdSGsExwZXsEmZZgLCjAXFGBJjLS2aHInEf34GCUyWeoFgWqjdW4quw8Obfg0YCf3rE10mPUMHD3c0wsnM2ydVcEUCZqOvy1zkPjweIxi+scq9j+LznPPYfbRRfZT7RVKGHCS1LREATqCrc1s4sJL0rtWonXhPF4TRjfaJtKQyXZFVIhpmiv6FaPKSPTlrBcx07klDG5j7QoYbnWAaRPuwVTSQku8fH28a0MiIUHD14odImi5HUL0P1q24/bCNUGExmFlUDjhXVbUmW+LeI5ciQuPbrjcellZBQkA51ngapvSAI/nYFKsRV+9OEXg9IFKnIh/yQEOjh5yRnWCCoVXlddhaDRoLC3YAv1itGiKCIajSjUNXVUWrLgq9Gi05cjqt1QKzUoWrFzS6vVEhsbW/s8LS2Nq6++mnvuuYdFixbh5+fHli1buP3229Hr9c0WbdWW79Py3QgCZrO5wfYKheICLe9cb9qWjFNeXo5SqWT37t11hF2gNsv4jjvuYPz48axZs4Y//viDF154gddee4377ruPK664gvT0dH799VfWrVvHmDFjuPfee3n11VebjKuzYPWSQffu3dFoNEyfPp1NmzZRVFR0wVdhYaE9Y5VpR1wZcyUP9H+AAYowMn5TkfRzCKJ7F2eH5RC61UzkLFuoOiuTf57M1F+mkV15GkDOOpZpNkUrV3LykkvJffU1Z4ciUw99w7ogmtxAEDlWkOKcIKpLz/7fItomOvZG3iJkqEzS1rO4mgy1kp9/oWLHDsy6BgqmdSC0Q4fgPnQInuPs7DHc41qYvBy8Quse9wqTjve41r7jn0NaaRoAfq5++Lj6OGxcGRlHYMm0PVVUSbg2Cjh7rRMEAfehQ3EfOBDRipv9FlOf+Jl7VPK6VbpAtzH2G7se0goqMIvg6aIiyLNuNXlDdjam8o417xfUamK++46g+fM4qc8EOo9oOzSyOwCisoTssqKWdaJ2lbLBAdK32CiyZpBWM2aU4+wsXePjCX/tVcJeeN5hY56LuaoK3bFj6FPOmZNaFnwBLkgfa3jBV2eS5m4uShdsye7duzGbzbz22msMGTKE+Ph4zpxpzHMXNBoNJpOp0TbWEhgYSFlZGRUVZ5PL9u3b16o++/Xrh8lkIjc3l9jY2Dpf59oyREREcPfdd/P999/z0EMP8dFHH9WJa8aMGXzxxRe8+eabfPjhh62KqaNhtWh7+PBh1qxZQ1VVFcOHD2fAgAEsWbKE0tLSpk+W6XSMjRrLHb3voFuVEo2vgNoTFL5Bzg7LIViKkVmKFXRGqoxVHC08yqGCA2B2I9jLBW83ddMnysicg2tiIgCm4mLnBiJTL75aFxRGaTvZztPHHDt43nFYcgl8MFzKvKrIh4x/pNcSr3RoKBYho7pS2gIWF+SBaDSS9fjjZEyfgTHHPj5pzkQURQqqCsgsl27k1WFhRC1bhv/tt9t/8B7XwoOHYMYvMGmp9PjgQZsJtrqkJAzZ2U1uca40VNLVuytxvnE2GVdGpi3h7+GCv1aDKILSJF3nz8207fLmG0R9vhz3/v3tG4jJACf+gOKabeoWa4Ruo8HFsbsaLDvHugV5XJBNm/fGm5wYOoyilS0r8tNWsXyflt99Z/Hv7uLtByZp4WJbRivmN9E1gqnFqsBRiCJc9Rpc8gBEOsbPti0gaDSIJjOi0YRoNJ59oQULvnqTHrC9aBsbG4vBYOCdd94hJSWFzz//nPfff7/Rc6Kjo0lNTWXfvn3k5+eja0UywODBg3F3d+exxx4jOTmZL7/8kk8//bTF/QHEx8czbdo0pk+fzvfff09qaio7duzghRdeYM0aKaHiwQcf5Pfffyc1NZU9e/awYcMGuneXFkeefPJJVq9eTVJSEocPH+aXX36pfU1GolnmHIMHD+aDDz4gKyuL+++/n1WrVhEaGsq0adNa9eaR6bi49B9Ft3+O0O2fg84OxWHEBklbNZJzyzuMr1Vz0Sg0fHvNt0zq8hiiSUt8sJxlK9N8XHv3Jm7L30Qt/8zZocg0gJdSKrZyMO+kgwcOl7YbFqVKmVfHfwPRDKEXgY9jrTQsvqalZb6AJNqaysrQXnopLnGxqLt0vF0m35z4hpGrRvLSjpeabmwPFEqpynPvG6VHG1giWMhetIikkaMo+eHHRtsNCBnA6omr+ejyjxptJyPTXrHM3crLJbuZIl0RRdUtzDpsKT/cBV/eBHs+l55bRNtEO9mwNEJjfrb69HQwGHDp1vFEzSpjFcrUTEbvM3eaTFsAd0ES+PZlH295JxZrgvQaX1tHIQgQdzlc/qzDirLqTp7EWOTg68N5CEolLrHdcO2eiKA6zwW0mQu+lkxbjdK2nt0XXXQRr7/+Oi+99BK9evVixYoVvPDCC42eM2nSJCZMmMCoUaMIDAzkq6++avH4fn5+fPHFF/z666/07t2br776iqeffrrF/VlYtmwZ06dP56GHHiIhIYGJEyeyc+dOImvs7UwmE/feey/du3dnwoQJxMfH1xZe02g0LFy4kD59+jB8+HCUSiUrV65sdUwdCUFshaq0efNmnnrqKTZv3kx+fj6+vr62jM0ulJaW4u3tTUlJCV5eXs4Op0OTWpJKakkqQ0KH4K5unj9Le0ZvNNP9ybWYzCLbF44hxNtBPo9tkOd+OcLHW1KZdUk0T13T09nhyMjI2JjJK5/nqO4rol0v4ecpjWcK2Jwvp8CJtdD3Fsg+IH2NXAgjH3VoGFd+fyWnyk5RmT4bb6E7e56ws0VAG+CfM/9w57o7ifaK5ufrf649Lur1VB8/gVvvXk6MrnWk3zqdyj17iPn2G1zlTA+ZTszTPx3m021p3Dm8K4N6ZhLmEUacbxxqxdmdU6LJBKJ4oUBiKw5+C9/dDtpguOwhWPswIMB/k0AbYJ8xG2Dul3v45UAWj16RyN0jul3wui45GU1UlP1+Fk7i2L6/EG++F6MCEjZsRBNsZWGnds41X84jzfAnvdyv56ubnm1ZJ4Yq+PZ2yS918F2g7Li7DtOm3ULV3r2Ev/56k57wzaG6uprU1FRiYmJwdYRPbg05FTlUGisJ0YbgpnJz2LgyHY/G3sPWapPNLoOXmZnJ888/T1xcHDfffDMDBw7k8OHD7UKwlXEsd627iwc2PMCJohPODsWhaFQKovwkkTq5E1skAJywFCGT/WxlZDokcb7SjWte9SnHD+5Vk8G67wtJsAXYuRSO/OSwEHQmXa1FgFkXRFwjxWk6EpZsq9NlpzGYJU9LU3kFx4cMJe2mmzC24xoHUZ8vJ2HXTlziGy8a01l30sh0Hs4tRjY2aiw9/HvUEWyznniSE4MGU775b/sFIYqAABU5NYIt4OoF6dvsN2YD1GbaBtZ/nXfp1q3DCbYAad56jkRAUk8f6EQ7a6NrPufOVKa3vBO1G0z9EobNdaxgu+UNOPknGB3z+xL1esTqajCbcbuoj0PGtDfB2mBivGNkwVamTWC1aLtq1SquuOIK4uLi2LlzJ6+99hqnTp3i5ZdfJrHGd1BG5lxivGN49GsT+tuno9+/ydnhOBRLMbKkTlqMbNXxVXx2+DNO5EvbhuM7eDV1Gfthrq7mzCOPkDR6DOZzTPNl2gZ9QyRhq0LMwiw2XK3W5hz5CXYtvfB4RR6smu4w4VZn0nFL91uIcBmMaPKoFTnERir3dgSC3YNxU7lhFI2cKpUEe6WHFk2XLij9/KStwu0YhZsbgrJhywWT2cSIr0dw8y83O367uIyMg7AUIzuRXVbv66LZhLmigqqDB+wTwJGf4PvZXFDxvbrUodd5AJNZJCVfmoOca48gimKHX8BJLUnl2f8o2TdvHJpIx9oPOZNegbEAlBgbLxLV5ijNgj+flmxFjNUOGVLQaIj57ltiN21CHRra9Al2RBRFDLm56NLS6vraysi0Y6xeDrz55puJjIxk3rx5BAcHk5aWxrvvvntBu/vvv9+mAcq0X7q5hnNRqohSNCB4+Dk7HIfSLdCDdeR02kzbr49/zYmiE1TqZwKJcqatTIsRXFyo3LUbw5kzVO7ahceIEc4OSeYcBnXphrhHiaAwcLr0DJHeDvBvNZtg7SNccCMPNccEWPuo5HloQ6/T+vDSePHfgf/l+JFdHCGn9lqXNGYsSi8vuix+B01EhF1jcAaCIBDjHcORgiOklqTS1UfycYz8ZClKPz8ERbM3crUrMsszKdIVUWmsxEsjW23JdEziakTbMyXVnCjIYEfOZpQKJVMTpwLgf9tt+E2fjktsrO0Hb0PXeYDTRZXojWY0KgURfmct36oPHSZz3jy8rrmaoAcesHscziC1JBWzQiDGq/P42QIM6tIdjoJRmUulQYe7uhUFqYoz4PRO6HmD5DdrT9Jrip6F9AZXb/uOdR7qYOcXHRcEAVNxMaJej7mqCqVn8+9BTWYTgiCgEDr2XEam/WC1aBsZGYkgCHz55ZcNthEEQRZtZWqJUnnwwmQFw3OMJMZ0Lj/TboE1xcg6oWhrMptIL5WyrMy6QII8XfB277geTjL2RRAEgh55GKWXF279+jk7HJnziPD1BEMAuOSwI/OYY0Tb9G1Q2ljmiwilmVK7mMvsHw9wMlfKRIsL9sCYn48xKwtjdjYqf3+HjO8Munp3lUTb0rMV5VUBjvWYtDWn77sPgMAHHmhUiArzCGP1davJqcxB6QDBSEbGGXi7qQnxciW7tJodp5J4af9LRHhG1Iq2Lt0u9HW1GW3sOm/ZOdc1QItScVZ0K/trPYbTp9GnpDZ0arsnpSQFkHZQmkpKKPtrA94Tr0Owt/joZHoHRyKa1QgKA7tOJzG8pfey+kp4ux+YjRB+MfhG2zTOC0jbIj1GXWrfcWoQjUZQKNrUYq0qIEDy2m6hB25+VT75VfkEuAUQrO0cHs4ybRurRdu0tDQ7hiHTEYkRBA50VVAc48ZdbehC7gg6sz1CVkUWOpMOpaBCNPgSFyVbI8i0Dq9x45wdgkwDKBQC7oowqshhf/ZJbuwx1v6DlufYtl0rSCtJw1XpSUZhJSD5dys9NMSu/xNdWhoK945bhNPia5ta0jHEClGvp3zTZkS9nqAFCxptq1Ko6OrTtTbDWEamoxIX7EF2aTXVlQGMjRxLN59uiKJof8GuDV3n4ex8vtt5vuUBs2fj2qMHKv/2vWDVEGbRTFppGgDRruEkjxuPqaQEl5ho3Pr2dWps9kalVKIRQzBwil2Zx1su2mrcIayflGmbttX+oq0l0zbaMaJt2Z9/kr1oEb5TpxI4Z45DxmwKlV/rdvhavPpVio7nUS3TPulcSpqMQ4mpLAXgtGBCb9I7ORrH0q2mSEFOqY6yaoOTo3EslhV5D0UooJCtEWRkOjhBLtL2/5NFyY4Z0MPKrAdr27WChzc/zOXfjUShPYaPu5oADw2CIKAOD8fjkkvsPr4zaUi0LVr5Nak33kTJ6tXOCKvlKBRELv2Y4IWPou5Evo0yMo1h8bXNLFDyxqg3mNtvbh3BtnLvXnLfeJOyjRttO3Abus5Dw0XIFO7ueF1+Oe79O+ZOIEsihlqhpot/DB6jR+MSF4e52jFeqc7GTy3tHjpW0Mr5jUVAtQiq9qIsB/JPAAJEDbXvWDWUb9yEKS+/Q9WdCPcIJ94vHm8Xx9pLyMg0hCzaytgNt12HuDjVhItOJKM0w9nhOBRvNzWBnpL3UUpex/kQswbLDbzCKE2k4+QiZDI2QJeSSsHST6jY5viK0TKNY5MKy80hahh4hQENZXoJ4BUutbMjoiiiM0mVmc16f+KCPDr8dtFzsfgbppSk1CnEY8jKovrQoXb3tyqoVLgPHIjfjBlN/h4/PPAhHx/8mOyKbAdFJyPjHOItxchy6i9GVr5pEwUffEDZH+tsO3Abuc5bSKqxO4sN6lxzWpWgYlavWUyKm4RSoSTkif8R89NqtEOGODs0hzAy6FbKTz6Cp66Vu4gsVgUW6wJ7YRGFg3uBm699x6oh5NlniPjoQ3xuvNEh41mLaDJhKivDrG9+4pggCKgVajnTVqbNIIu2MnYjb80xHlkp0itdrON511norL62FtG2olzycrRM+GVkWkPJjz+S+8orFP/4o7NDkTmPoWEDqTpzI9qK6x0zoEIJE16qeXL+DX3N8wkv2r04jSAIrJ64mqlBnyPq/WuL9uS//wHF336LqbTUruM7m0ivSJSCkgpDBXlVebXHva++irCXXyJw/kNOjM6+fH7kc97a8xbFumJnhyIjY1fiQyyibTmiKJJTkUNmeWbt69phw/CedIPti4S2kes8SAt0tZm254i2OS+/QtGqVZjKO+48P1gbzPyL5/P4kMcBKbO4My1O9guNRTT6kpJf2bqOIgeDoITidCg+ZZvg6iNzt/QY7bidPgqNBo/LLsMlpm0VqjNkZqJPT8dUUuLsUGRkWo0s2srYDU2gN2XekOkvkFKc4uxwHE5sJ/W1tYi2JaXSCu/5W8lkZFqCx4jhaEcMRzu4c2R3tCcu7hKDsWQAp7P962Rc2pUe18Lk5eAVWve4V5h0vMe1jokDSMszIlnBeGDW68lbvJis/z3RobYK1odGqaGLp7R19FyLBJe4OLyvvbZNVJFuDkXffEPFjh1NZuUUVhfWirVRXlEOiExGxnnE1cxl88p0LNm7lLHfjuXtPW/Xvq4dNIiwRYvwGm8H7/k2cp3PK9NRVm1EIUBMgJSQYczLo3DZMrKffKrDX+vrQzSbKd+0qUVZjO2JbkHS7zspt7x18xsXTwi9SPq/PS0Sxj0Hc/6FwXfZb4x2gsLdHUGjQRCaJ3eV68vJKM2goKrATpHJyDQfOedbxm6Ef7uF3w5+zJk9b3XSTFtpottZM23N+kACPFzw1WqcHJFMR8D94ouJ/OADZ4chUw/R/loUApRVG8kr1xHk2bJqvc2mx7WQeJVUPbw8R/I2jBrmkMyrc7EszMUFeSJWV+N3yy3oMzJQhYQ4NA5nEOMVQ3ppOqklqQwOHezscFqMqbyC7CefAlEk7u/NKAIDG2xr+YwL04bhpnJzVIgyMk5B66Kii68bp4uqUJikvwuHFh9sA9d5yzU+ws8dV3XNuCoVgQ88gD4jA3Vwx60uf7TgKIHugfi7+tfJsM2YMZPKnTsJe/klvK913CKpo4nyc8cl4E8MLjmcyO9NQmBYyzuLvhTO7JEsEi662XZBnosgQFCiffo+D9FsJvPBebgPuBifG29sc4VXlf7+qALqFgg0mU3syd1DXmUege6B9A/qj/K8a0mVsYoyfRmKZoq9Ms4nLS2NmJgY9u7dS9++fdm4cSOjRo2iqKgIHx8fZ4fXKqwWbY1GIyaTCRcXl9pjOTk5vP/++1RUVHDttddy6aWOqVIo037oaNWlm8NZ0bbzrMAXVRdRpCsCwKwLJD5czrKVkenouKqVhAYVkas7wbpkL6b1vcxxgyuUEOPA8c7h7T1vsyd3L6d03YGexAd7oPRyJfjRR5wSjzOI8Y5h4+mNZJTV9a03lZZSvvlvTKUl+P3nP06KznrM5WV4Xn45xpwcVI0ItnB2PhPj07a2gsrI2IuEYE9OF1Whq5QEkLTSNMyiuY6oYczPx1RWZp8t0k68zsPZ5Itzd46pfH0JuLvjZzPe/efdFFYX8vXVX9PDv0ftce0lw6g+frxDW0MAuGlUuPruw6zK55+MI60TbS+aCuEXQ1THKFJafeAAZX/8QcXWrfjcbCcRuhWcb+PxZ/qfvLjjRXIqc2qPBbsH8+igRxkbddaz2FI8XaO0TdJRdnY2ixYtYs2aNWRmZhIUFETfvn158MEHGTNmjE3GABg5ciR9+/blzTfftFmfzel35MiRbNq0CQAXFxciIyOZNWsWjz76aKeyVLEXVou2s2fPRqPR8EFNplNZWRkDBw6kurqa0NBQ3njjDVavXs2VV15pt2Bl2hEmAyjVdURbURQ71R9tt5otZWn5FRhMZtTKjr9il1aaBoC7IoAyUVO7rU5GxlaYq6vRJSXj1quns0OROQdX3124Cuv5K0NwrGjrRPbl7WN3zi5EoSterqra4pOdiVt73MotPW4h0K2u0KlLTubMggUovL3xnTIFQenY7Ofmog4Jocvbb1nVtla09ZJFW5nOQVywJ+uP5ZJT6I5KoaLKWEVORQ6hHpJtQfF335H1+P/QXnYZkR996ORobU99fradgUpDJVq1lhJdCdFe0XVe873lVvymT29z2ZX2oItyHCdySqjo6t26joJ7SF/2YuNLkH8CBt4BUUPtN04N6ogIghc+irmyEoWmbe+qXJf2Bw9tWoBIXYuL3Mpc5m+cz+sjX68VbnVmqcCsi7L1c7q0tDQuueQSfHx8eOWVV+jduzcGg4Hff/+de++9l2PHjrV6jLbE7NmzefbZZ9HpdPz111/ceeed+Pj4cM899zg7tHaP1SrS1q1bmTRpUu3z5cuXYzKZOHnyJPv372f+/Pm88sordglSpv2RM+c6UoYm4LPsHdQKNb4uvp2uYEeolyvuGiVGs0hGYSsN7NsJlptZlUnaKhYnFyGTsSH606c5MWgw6bfe2uF91Nob3bx6YixPwKRrPEuxI3GuFUxcsCeCIGDIyUU0m50cmeMIdA8kyD3oggVZt969cevfH58bbkCsrnZSdPahVrT1lkVbmc5BQoilRkM1kZ6RwHk+1vEJIAiYKzvmXDepJtPWkoxRdfgwlbt2IZpMzgzL7rir3fn1hl/ZMW0H7uq64qzSQ9spBFuAYYHXYSi8jPziNv79HlkNh76F8myHDKfy98dvxgwCnCjIVRoqG/0qL8wh/8g+Xvjn+QsEWwCx5t+LO17EZDYhiiI6o45qYzVm0XxBf81lzpw5CILAjh07mDRpEvHx8fTs2ZP58+ezffv22nYZGRlcd911eHh44OXlxeTJk8nJOZsR/PTTT9O3b18+//xzoqOj8fb25uabb6asrAyAmTNnsmnTJt566y0EQUAQBNLS0gA4dOgQV1xxBR4eHgQHB3PrrbeSn58PwMaNG9FoNPz999+1Y7388ssEBQWRk5PTaL/14e7uTkhICFFRUcyaNYs+ffqwbt262td1Oh0LFiwgPDwcrVbL4MGD2bhxY50+tm7dysiRI3F3d8fX15fx48dTVCTt4l27di2XXnopPj4++Pv7c/XVV5OcnNzs30t7xOpM28zMTOLi4mqfr1+/nkmTJuHtLa06zZgxg2XLltk+Qpl2SXVaNroiBQpBwz//+ccmq1XtDYVCoGuglkOZpSTnltfaJXRkLJP46kp/ADnTVsamqMPDUXh5IahUGE5n4tJVFk3aCmMixvPHjjBMrgFNN+4AlOpLya+SJr1mfSBxQR6IokjqtdciGgxEf/ttp35/CioV0V+ucHYYVmOurkbhap0Xc0qJVFhVFm1lOgtxQdIC/ImcMkbGx5BSkkJqaSrDwocB4No9kfgd/6L07JgL9edn2hYuXUrpr78RMGcOgfff58zQHEJT28R1KakofbxR+fk5KCLHYrl/s0lh6YJkOPQduHjBkLtb35+FykLIPSz9v4PYL1jD4C9t46WfU5nDntw99A3qi1k0c9+G+yjTl13Q7uCMg1b3WVhYyNq1a1m0aBFarfaC1y0eq2azuVaw3bRpE0ajkXvvvZcpU6bUETSTk5P58ccf+eWXXygqKmLy5Mm8+OKLLFq0iLfeeosTJ07Qq1cvnn32WQACAwMpLi5m9OjR3HHHHbzxxhtUVVXxyCOPMHnyZP766y9GjhzJgw8+yK233sr+/ftJSUnhiSee4JtvviE4OLjBfptCFEW2bNnCsWPH6uiHc+fO5ciRI6xcuZKwsDB++OEHJkyYwMGDB4mLi2Pfvn2MGTOG2267jbfeeguVSsWGDRsw1SyQVVRUMH/+fPr06UN5eTlPPvkk119/Pfv27UOh6Ng7mq0WbV1dXamqqqp9vn379jqZta6urpR3cF8bGesJHVqJLqoQ1xFjUHdCwdZCt0APSbTtJL62EZ4RDAgexN97pC1z8XKmrYwNEQSBrj98jzIgoFNZrbQHaj28bXFT0w6wLFBp8AWzK3HBnpgKCzFXVSGaTKjDW+F71874+ODHHMw7yNx+c4nzjWv6hDaGsaCAk8NH4BIfT8yqrxHU6gbbVhurOVN+BoCu3l0dFaKMjFOJDfJAIUBxpYFgtwszbQWVqsMKtqXVBnJKpe3SFtFW6eODwssLjxHDnRlamyD31Vcp+HgpAffeS+B9c50djl2IDnRF4ZLF8dIkoJUiYd5x2LAIAuJtK9qmb5MeAxLAI8h2/TZA+d9bQBDQDhqI0MatEawlrzKv1s9WoPX3GElJSYiiSGJi44Xh1q9fz8GDB0lNTSUiIgKQdrT37NmTnTt3MnDgQEASdz/99FM8a661t956K+vXr2fRokV4e3uj0WhqM10tLF68mH79+vH888/XHvvkk0+IiIjgxIkTxMfH89xzz7Fu3TruvPNODh06xIwZM7i2prhgQ/02xHvvvcfHH3+MXq/HYDDg6urK/fffD0jZxMuWLSMjI4OwMGmOvGDBAtauXcuyZct4/vnnefnllxkwYADvvfdebZ89e561wzt317/lewkMDOTIkSP06tWryfjaM1aLtpaU7BdeeIG///6bnJwcRo8eXft6cnJy7S9AppNTXYqGLDRhQMIgZ0fjVGJtuTrbDpicMJlE7Tg2bNxKgIcGX23H+CCXaTs0VSRIxjlYRNus8gLyK8oI0HbMG3gLFsFC1Evvx7ggD1T+/iTs2Y0hMxOFS+dZrNySuYXdObu5PPryC0RbURTRJyejiYpqVAx1JtVHjoLJhKjXNxljemk6IiJeGi/8XDtmVpmMzPm4qpVE+WtJza9AY5Ju3DtLgWHLQmSQpwtertL1IeTJJwleuBDauFd3a3l8y+OcKjvF3L5zGRRa//2ca8+eIAgYc3Pqfb0jEOwtou36FlVAQeVt+Lu3Yn4TNRQQJO/ZshzwDLZNkOlbpcdox2TZ5r39NtUHDxLy7DP4Tp7skDHr49///Ntkm905u5mzfk6T7QLdA9GZpAWaT8Z/QoRnRKtiE8UL7Rjq4+jRo0RERNQKtgA9evTAx8eHo0eP1oq20dHRtYItQGhoKLm5uY32vX//fjZs2ICHx4U7X5OTk4mPj0ej0bBixQr69OlDVFQUb7zxhlVx18e0adN4/PHHKSoq4qmnnmLYsGEMGybtyDh48CAmk4n4+Pg65+h0Ovz9pR26+/bt46abbmqw/5MnT/Lkk0/y77//kp+fj7nGjiwjI0MWbS08+eSTXHHFFaxatYqsrCxmzpxJaGho7es//PADl1zSedLxZRqh4KT06BEMbj7sz9vPG7vfwN/Vn9dGvubc2ByMxf/KUnm2M3AiR/peLdvpZGTsRWcrbtiW8dVq8Ir+BNHtBKuPu3J7/2udHZJdsQgWlRU1VjDB0rVeUKnQREU5LS5nMDl+MpdHXU7vgN4XvJY68Xp0x48TteIL3C++2AnRNY3HZZcSu2kjxhqPt8ZILT3rZytfe2Q6E/HBHqTmV6CrkixwLDYhFvQZGeS9+RbmqioilrxXXxftkoaKkLXVRShbsi93HxllGfV6gVrwHDuWbn/8jiaidQJXWybGLxhMWlBWsC39GNd0H9jyztx8IbgX5ByUhNZeN9gmyLQt0qMDrBFEkwnXHj0wZmfjOWqU3cdrjPO9lutjWNgwgt2Dya3Mrfe9LCAQ7B5M/6D+5FZJIqi3i7dVfTdGXFwcgiDYrNiY+rxrjiAItaJlQ5SXl3PNNdfw0ksvXfDauTretm1SpnZhYSGFhYX12jlYg7e3N7GxsQCsWrWK2NhYhgwZwtixYykvL0epVLJ7926U5y14WURlNze3Rvu/5ppriIqK4qOPPiIsLAyz2UyvXr3Qd4I6J1abP4wYMYLdu3dz//33s2zZMj766KM6r/ft25f58+fbPECZ9kfVvxspTnan2izduCpQsDtnN3tz9zo5MsdTu2U4r9zqFbf2SrWxmnJ9OSdzJQ8gi4ghI2NrCpZ9Sso111L2x7qmG8s4DC+NlHl4MPeEkyOxPxbR1qQLxNNFRYiXdX6oHZEru17JtO7TiPK6UKzWdI1BcHFBn3HKCZFZjzo4GLdztuA1RGqxXIRMpnOSUGN3VVDsBUB+VT6l+tLa1wUXF0p//ZXyTZs6VEEySxGy2CAPRLMZY0GBkyNyDHqTntPlp4HGrWAEtbpDC7YW3JAErr3Zx1vfmSUb1pId21qqiiC7xms1+lLb9NkIglJJ6DNPE7t5U7vY/aYQ4b/d7wXEC2wPLM8fGfQISoWy1h7BFrV4/Pz8GD9+PO+++y4VFRfaJBYXFwPQvXt3Tp06xalTZ+dJR44cobi4mB49elg9nkajqfV+tdC/f38OHz5MdHQ0sbGxdb4swmxycjLz5s3jo48+YvDgwcyYMaOOGFxfv9bg4eHBAw88wIIFCxBFkX79+mEymcjNzb0gFov1Qp8+fVi/fn29/RUUFHD8+HH+97//MWbMGLp3715boKwz0CzH3u7du/PAAw8wZcqUC8x+77jjjjpvNpnOS9nGrWTt9KHoqHQh7ObTjRcue4F3xrzj5MgcT3SAOwoByqqN5JXpnB2OXdl8ejNDvxrKrzmLALkImYz9MGSdQXfyJBVbbTThlbEJIRavw9KOv23WItqadUHEBnsgCAJZTzxB7muvW5Wx2VkIeewx4nf8i8/1E50dik2w/N5lP1uZzkZcjWibkmsiyF3yzEwrSat9XR0cTNAjjxC59GMEldUbOds8yedk2lbt28/JSy/j1N33ODkq+5NRmoFZNOOh9iDAzboCo6bycgw5HdMmIcClCwAnC1OaaGkFlmzYNBvNYctyILSP5JPr2bTvqK1oN7tNRJHhxPFi13m11y4Lwe7BvD7ydcZGjQVAZ5Tu1Zsqvmct7777LiaTiUGDBvHdd99x8uRJjh49yttvv83QoUMBGDt2LL1792batGns2bOHHTt2MH36dEaMGMGAAQOsHis6Opp///2XtLS0WuuAe++9l8LCQqZOncrOnTtJTk7m999/Z9asWZhMJkwmE7fccgvjx49n1qxZLFu2jAMHDvDaa6812q+13HXXXZw4cYLvvvuO+Ph4pk2bxvTp0/n+++9JTU1lx44dvPDCC6xZswaAhQsXsnPnTubMmcOBAwc4duwYS5YsIT8/H19fX/z9/fnwww9JSkrir7/+6lQJo60us5aUlMRjjz1Gly5duP76620Rk0w7RxPdFW1XLW59+wHS1oWru15NT/+ms1g6Gi4qJZF+0vaKpA5ukWApzlJRLa1OxslFyGTshM8NNxD+xusEznvQ2aHInEOsryRk5VZ17AVcg9nA6TIpA8msDyQuyANzZSXF335HwXm7kDoDZtHM4YLD/Jz8M0azsc5rqsDANu3vazhzhqynnqZk9Wqr2vcM6Mkl4ZfQw9/67BcZmY5AQog0pzuZU06Ml5Rpfr6vrf+smWiHDu0whYngHHuEQA+qDuwHUURRjz9kR6O5VjAlP/9C0oiR5L7yqr1DcwrRNe/50xVpre/MItrmHYUKGyzyBiXCXZvhbvsnMpgrKjBkZtp9HFsiqFQofXy5POFq1l63hk/Gf8JLl73EJ+M/Ye2ktbWCrclswmA2ALbJtAXo2rUre/bsYdSoUTz00EP06tWLyy+/nPXr17NkyRIpPkFg9erV+Pr6Mnz4cMaOHUvXrl35+uuvmzXWggULUCqV9OjRg8DAwNqCX1u3bsVkMjFu3Dh69+7Ngw8+iI+PDwqFgkWLFpGens4HH3wASJYJH374If/73//Yv39/g/1ai5+fH9OnT+fpp5/GbDazbNkypk+fzkMPPURCQgITJ05k586dREZKSR/x8fH88ccf7N+/n0GDBjF06FBWr16NSqVCoVCwcuVKdu/eTa9evZg3bx6vvPJKs35G7RlBbMGe7aqqKr755hs+/vhjtm7dymWXXcbNN9/M9ddfT3CwjQy17URpaSne3t6UlJTg5eXl7HBkOgG3f7qT9cdy+b+Jvbh1SMf2OswuL2TYi79jNnqx+39j8fdouzfrMjIytmXlvp0s2n8bgujC/hk7208WRjNJKU7hutXXocSV4qNP8fiVPbj94mBKfvoJfVo6wQsfdXaIDsUsmhm8YjDVpmrWXL+GSK9IZ4dkNSVr1nDmoQW49u5NzDernB2OjEybRW800+PJtRjNIv+5cjc/p37DPRfdw5y+TRf4aa9UG0z0eHItZhF2PDaGIC9XDFlZiHp9h/cu/2D/Byzet5hru13LoksXNdm+6tBh0m68EZeEBGK+/abDef4u3v4zHxx/DJUxjL23/976Dt8dAiWnYNq3NcXJ2gclP//Mmf8+jNeVVxL+umPq1FRXV5OamkpMTAyurvazoqoyVpFSnIJSoSTRL9Fu48h0Php7D1urTTZr/8rOnTv5+OOPWblyJd26dWPatGls27aN9957r1meGzKdj+TiZLZnbSdUG8royNHODsehdAvyYP2x3NotVh2ZvGIlZqMX/lqNLNjKyHQyBnWJR9ynAEFHVnk2YZ6hTZ/UDrFklwnGIEAgLtgDhVaL79Spzg3MSSgEBdHe0RwrPEZqSeoFom35339T+Nly3Pr2JXDuvU6Ksn5cunXD/47bUQU7bkupjEx7RKNS0DVQy4mccgb7Tua/g+7D19W3ThvRaKT60CGqjhzBd+rUdr9wl1ZQgVkET1cVgZ7SnFYd2jE/187n3Exba3Dr1ZOor77ErW/fdv97r4+BYYl8cBwMilz0RiOa1lqA3PIteISAspX9GPVgNoCmZYWjmos+NRUUCtRR7Wdx1lqUgpJA97bv0SvTObHaHqFPnz7cdNNN+Pv7s23bNvbs2cNDDz3UIS/MMi1HrC5HrLzQFHp71nZe3PEiq5Os24LYkYg9pxhZR8dShOz8KrsyMrbGVFxM8fc/ULh8ubNDkakh2s8LDFIxsn8zbVMtty1iuZmtrpB8/mQrGGq3S59fUR7AVFRExZYtlG/c6OComsY1MZGgBQvwu/WWJtuW6cso0ZU4ICoZmbaJ5VqXXai+QLAFSbRNu3U6Oc/+H4bMM44Oz+YkneNn29nudy2Lk80puujer1+H/Tn1C4tBNKsQFEb2nrGBr613l9YLtgCpm+DFSPj2ttb3ZQWB999P3Ja/8bul6c/MtoZoNmOurERswJNVo9QQ5B50ge+tjExbwGrR9vjx4wwfPpxRo0bJWbUyDVL+9bucGDSEzBsH1zlu+dDvDAVqzqdbUE11xg6caZtTkcNd6+5iZdJ7AMTLIoaMndGnp5P12GPkLX4XsQVVTWVsj0Ih4C6EAbAv64STo7Efl4Vfxm2JD6AvuQitRkmYtytV+/ZhyM2lBY5THYLaz/iSCz/jtcOGEbzwUUIXNb3Fti3zY9KPXLryUv635X/ODkVGxikk1MztTuTUP59VuLqiHTYUj1GjEKurHBmaXTjXzzbjrrvIeuIJDGfavxjdFKIotki0rT3fbO5wBTk1KhVqsyTm7cg8atvOWzNvSNsCZiOo3GwXTxOo/PxQ+fs7bDxboUtKQpeSgrmq/V+bZDofVou2KSkpJCQkcM8999ClSxcWLFjA3r17O+yKmkzL0J04gtmogPOqLloqLZ8qPVVr8t1Z6BogZZ2eKammQmdsonX7JLkkmW1ntpFatQuAuGA501bGvrj27In7kCH4/mcqYnW1s8ORqSHAJQKAk0U2yERpoyT4JZDofhWmigRigz1BFMm4/Q6Sho9An5Tk7PCcQoxPw6KtKiAAvxkzcE2Id3RYjWIsKkKXlNRg1s355FXmAchZODKdlvha0baMxXsXM+fPOWSU1i1KE/nBB0QseQ+X2FhnhGhTLKJtT1UlFZs2U/ztdwhtuLCircipzKHKWIVKUBHhGdGsc6sOHiLlqqs5NadtWeHYAl91FwCO5CfbpsONL8JbfeHYmpb3kbZFeoy+xCYhNUZ7X5RWuLoiKJVgrP9evMJQgd6kb/ffp0zHxGrRNjw8nMcff5ykpCQ+//xzsrOzueSSSzAajXz66aecONFxs2pkrMe/vxtdr8wl4IYRdY4HuwfjpnLDKBo5VdaxK4ufj69Wg79WErFT8iqcHI19sNyom6olL6C4IDnTVsa+CCoVUZ8uI+jBB1FoHePlJdM0lm3yZyrSnRyJfbFkmsUFeWAqKUEVEoLg5oYmOtq5gTmJc+0R2ssNT/n69aRcfQ2n7r7bqvbzB8xn+3+2M6PnDDtHJiPTNomvWZA/mVvGlswt/J35NyeLTjo5KvthEW0jYiOJ/GQpQf/9b7vMMGwuFpubLp5dUCuaV1BMHRaK4fRp9CkpGLKz7RGe0wjXSsXn0m21a7Q8B4pSIX1ry87XlcOZvdL/o+wr2opGIykTriDzvw9jLLrQBrE9oA4PxyUxEaW39wWviaJIemk6J4tOdrrkMpn2gdWi7bmMHj2aL774gqysLBYvXsxff/1FYmIiffr0sXV8Mu0MoSgJFy8jLr0G1D0uCI1un+zodAvq2L62lt9pRYXkZyln2srIdE56BknZVcXG006OxD6U6Er4NeVX9uYcBiTRVuXrS7c1vxD/7/YOVzHbWqK8ohAQKNWXUlhdeMHrZr2e8r+3UPDxx06Irn5MJSUIbm64JiRYfY5WrcXb5cIbPhmZzkCUvxaNSkG1wcyEiBt5YsgTJPrXX2XdrNMhNpDR1h4wmUVS8qVEi9gwH7TDhuF/2ywnR+UYLHN6yy7J5qDy9yfig/eJ3bgRdUjHKvAY5yf9PPJ1NprfWIRWS7Zsczn1L4gm8I4E3yjbxNQAVfv2oU9Pp3zzZpSe7TMxR1AqG9whbhJNaJQaFIKi2QsVMjKOoEWirQVvb2/mzJnDrl272LNnDyNHjrRRWDLtElGEvJqM64ALt0F2atG2gxcjq8201QXi666uzSyWkbE3otlM1aHDskdVG2FQuHQDb1KUUKYrc3I0tudo4VEe+fsR9uneAeouUCk0nfe656pyJcxD8jOu7zNerKzk1J13kvvqaxhycx0dXr343347CTt34H+XdZm2MjKdHaVCqC2uG6a6lMkJkwn3CL+gXcZdd3F8wECqDx1ydIg243RRJXqjGY1KQRdfd2eH41Ba42cLko+50qPj7YDqF5KAKCqoMthoMSL6Uukx+yBUtSB71ZKh6wBrBLd+/Yj68ktCnnwCQWWDAmptDJVCRaxPLIl+ibL1p0ybpFWi7bn07duXt99+21bdybRDjKdPkLvDTEm6O/hduDpr2T7ZOUXbmmJkHVy0NeuCiAv2lD/wZBxG2uQppN14IxX//uvsUGSAXqEhmI1SFsbe7I5nm6RAQb/AfujLJZ8/2QrmLI0VHFX6+OAxahTe11+PqNc7OrQGEVQqq8SFwwWHuf3321m8d7EDopKRabskhEjXvJONFNcVFEowGKg6fNhRYdkcizXCFVVpFH/6KfrTmU6OyHHcEHcDjw1+jNGRo1vdl7mi49jCjYzpQ/mxZylLvZvCCht8jnmGgF83QISM7c0/P61GtLWzNQJIWaru/fvhfdVVdh/LnhgLC9ElJ2MsvHBHECDfv8q0WawWbZOTk7nttttqn0dGRuLn51f7FRQUxPHjx+0SpEz7QLfnbwqOepJ32AfUrhe8brmhSytJc2xgbYDYGnuEpEYmue2VMn0ZeVVSgRazPpC4INkaQcZxuHbvjkKrxdhGsvc6O65qJd4ld1F+ciEqg3236zmDQaGDeGrge1SeuQk3tZJwHzfSZ83i9AMPdoqq4o3R1G6aiPfeJeyF59F06eLIsGzCicIT7Mjewf68/c4ORUbGqVh2FxzNKmZf7j5+TPrxAh/roIfm0+3PP/H9z3+cEaJNsMzXL0/eRu7LL1Py/fdOjshx9PDvwdTEqfQJbLntoTEvj1N330PyhCva1EJda/B0dSHcR1q0sFkSjiXbtiUWCX1ugsSrIeYy28TSCRCNRsxVVR1qMUGmc2C1aPvOO+8QHBxc+7yoqIiFCxfyxhtv8MYbbzBgwADeeOMNuwQp0z5QBobhMzQSryE96n393Bu69lKoxFZY7BHS8isxmqyrVN1esNygq/EBs2ttdWEZGUcQ9NB84rf/g+/kyc4ORaaGBN/uiEbvWj/AjsbJmiJksUEeiBXlVP6znbLff0fh3rm20J6Pxf/QUsSmLVP0zTdk3HYbJT/9ZFX71ng8ysh0JBKCLZm2JcxcO5Mntj5Ru3BvwSU2Fk2X8HadtWYRbav6D8Z9yBA8x13u5IjaF0ofH6qPHMGYl0flrl3ODsdm1NYosVUSjkW0bUkxsoF3wM0rwDfaNrE0QPG335K/ZAn6U+2/kLhCq8WQk0vl7j1U/LsD0WQCIL00neTiZCoNlU6OUKY5pKWlIQgC+/bts2nbtojVpiTr169n6dKldY5NmjSJrl2lCWx0dDR33HGHbaOTaVe4DruS0GFXNvh6lFcUCkFBmaGMguoCAtwCHBidcwn3ccNFpUBnNHO6qIrogI7j9WS5mRX1QQBypq2MQ1H6+Dg7BJnziA3yYMPxvA5pB6M36UnKlbx644I9EFxciPxkKbq0tE7/XrR2N40xLw/B1dWpxUwq/91BxbZ/cB840Kr2rfV4lJHpKFgW5lPz9MRFhpNRlkFqSSpB7kFOjsy2JNV8frlNvIGoPnOdHI3jyCrPYmfOTuJ940n0q7/InDUIajWhzz+POjwMl5iOc91Ue+3FPXo1P6YN5uZBT7W+w6hLpDow4QOk2jBtcKGj8IsV6I4dQxUSiiYiwtnhtJjSP/4g5/kXMGZn1x5ThYQQ/NhCqgZEYjKb7LLQlJ2dzaJFi1izZg2ZmZkEBQXRt29fHnzwQcaMGWOzcUaOHEnfvn158803bdZnc/odOXIkmzZtAsDFxYWuXbsyd+5c5syZY9N4ziUiIoKsrCwCAprWlJrTti1idaZtWloaYWFhtc/vuOMOvL3PVtCNjo7m9OmOWS1axjZolBq6eEjbIlOK234mji1RKAS6BnZMiwTLzWxlhR8AcXKmrYyT6GwZ/G2VIB89msDf2Zj/obNDsSkVhgoGrhjIZ6fuBkFPXJAnCo0G7bBh+LXjbcC2IsY7BrVCjVatxWiuv1DLmUce5eRlwyn97TcHR1eXgLvvIuSZZ/AYbd0Nk8WnVxZtZTo74T5uuGuU6E1mgt0igfotUco3byb7/56j4t8djg6x1YiiWDtXj+1kiQg7c3by+JbHeWXnK63uy+PSSzqUYAvg42FG6XaK05U28uz3Doe5O+GqV5sn2B5fC4UpktBrR0RRxO+WaXiMHInHyBF2HcuelP7xB5kPPFhHsAUw5uSQ+cCDmDZImc4uShebjpuWlsbFF1/MX3/9xSuvvMLBgwdZu3Yto0aN4t5777XpWG2B2bNnk5WVxZEjR5g8eTL33nsvX331Vb1t9TawTVEqlYSEhKCyojhec9q2RawWbRUKBWfO8Wt744038Pf3r32ek5ODWq22bXQy7Qrjsa2I1Y1XC2/K864jY5n4dbTss7NFyALxcVcT4NF5K6jLOIeKf3eQdsutnLrrbkp+WVNny5OM44n0d8ElYAP5wiYMZoOzw7EZaSVpmEUzenMViBp5V8F5+Lr4smPaDr679jtUivonxeouXUAQMGRkODi6urjExuI7ZTKuCfFNttWb9Jwuk5ISZNFWprOjUAi1i/PuhAL1W6KU/bmeohUrqPh7s0PjswV5ZToqKnUMzDlKtFfnurf1UHswKGQQFwVeZNN+RUPHmAuMjLiMqtPTEAuucV4QhmpYNR3e7icJt3ZEEAR8bryRiPeXoPL1tetYLcFcWYm5srJO0oao10vHa0RB0WQi5/kX6he4RRFEEeHNZahEBQpBUbdf81lLw5a8h+fMmYMgCOzYsYNJkyYRHx9Pz549mT9/Ptu3ny0+l5GRwXXXXYeHhwdeXl5MnjyZnJyc2teffvpp+vbty+eff050dDTe3t7cfPPNlJVJusvMmTPZtGkTb731FoIgIAgCaWlpABw6dIgrrrgCDw8PgoODufXWW8nPzwdg48aNaDQa/v7779qxXn75ZYKCgsjJyWm03/pwd3cnJCSErl278vTTTxMXF8dPNTZUI0eOZO7cuTz44IMEBAQwfvz4JuMDMJvNvPzyy8TGxuLi4kJkZCSLFi0CLrQ8KCoqYtq0aQQGBuLm5kZcXBzLli2rty3Apk2bGDRoEC4uLoSGhvLoo49iNJ5NOhg5ciT3338/Dz/8MH5+foSEhPD0009b86u3OVaLtj179uTPP/9s8PXff/+dXr162SQomfaHuTifkxPv4MTAAZhyG/a86R3Qm/5B/fFy8XJgdG2DboGSJUJHE20tk3WzPoj4IM927WEm0z6p3LmTql27qNi8mTMLFpAxYwZJY8ZS+scfzg6tUzKgSwz6wqFU51xBWbXO2eHYDMu1zlgdCEj2CKW//krlrl2YdR3n+2wpgiA0KNZa8J32H+K2bSVowQIHRdV6TpWdwiSa0Kq1BLoFOjscGRmnE1+zYGWqscWqLxHDY/Qo/GZMR3vppQ6NzRYk5ZbTqyCVZ/9Zyulrr+1Uu3hGR45m6fil3N//fpv0JxoM5Lz4EidHjsKYl9f0CW2cIZFxGMt6cybfk2qDDZMDTAbIPmRd28xdYNKBRwj4dW6f9eP9L+Z4/4sxFRXVHiv45BOO97+YnP/7PwAqd+2+IMP2fIS8AtSHTtY+TxozluP9L0afnFx7rPiHH5oVW2FhIWvXruXee+9Fq73QFtGnxlLLbDZz3XXXUVhYyKZNm1i3bh0pKSlMmTKlTvvk5GR+/PFHfvnlF3755Rc2bdrEiy++CMBbb73F0KFDazNds7KyiIiIoLi4mNGjR9OvXz927drF2rVrycnJYXJNHZCRI0fy4IMPcuutt1JSUsLevXt54okn+PjjjwkODm6wX2txc3Ork1H72WefodFo2Lp1K++//36T8QEsXLiQF198kSeeeIIjR47w5Zdf1qmzdS6WNr/99htHjx5lyZIlDdohZGZmcuWVVzJw4ED279/PkiVLWLp0Kc8991yddp999hlarZZ///2Xl19+mWeffZZ169ZZ/TOwFVbnB8+aNYsHH3yQiy66iKuuuqrOaz///DMvvviizT00ZNoP+iPbARFBKaAMaviP+a6L7uKui+5yXGBtCEsxsuS8jlOcx2A21GYgmXVBxAbLmWcyjqX0jz/If/fdC45btjzx1pt4jRvn+MA6Mf4ermjLb6KwQs+ZIhN+HaQ+l0WYMOoCcVUrCPdyIemxxxGrq+n6268dbhuoPVD5+Tk7BCp27MBcXoFbv75WZQ5ZxPoYrxh5UVJGBkgIkTJty8qkv5/6RFvPkSPxHDnSkWHZjKS8crz0FZR5+NJl4ED5774VCGo1Vfv3YyoooOTnX/C/bZazQ2oVAR4avFxVlFYbSSuoIDHEBklI5XnwVh8w6uDRDHBp4l4qraZoWfQldvXANRYVUfnvDjwuuxRFPaJje8HaxQJlYalNx01KSkIURRITG/eGXr9+PQcPHiQ1NbVWEF2+fDk9e/Zk586dDKzx3jebzXz66ad41tQDuPXWW1m/fj2LFi3C29sbjUZTm+lqYfHixfTr14/nn3++9tgnn3xCREQEJ06cID4+nueee45169Zx5513cujQIWbMmMG1114L0GC/TWEymfjqq684cOAAd955Z+3xuLg4Xn755drnzz33XKPxhYaG8tZbb7F48WJmzJgBQLdu3bi0gcXAjIwM+vXrx4ABAwDJvrUh3nvvPSIiIli8eDGCIJCYmMiZM2d45JFHePLJJ1EopNzWPn368NRTT9XGv3jxYtavX8/llzu2OKXVou3s2bP566+/uOaaa0hMTCQhIQGA48ePc/z4cSZNmsTs2bPtFqhM28bVs5qEm7Iw+gxwdihtFos9QlJuOaIodohJoMFk4L7+97Fi9x7KjF7ydmEZh9LklidBIOf5F/AcMwZBqXR8gJ2Y2EAPdlQUkpxXTq9w76ZPaAeklaYBkhVMfKAHQkU52iFD0Keno4mMdG5wbYTNpzezeO9iEv0SefaSZ50dTr0UfvoZ5X/9RfDCR/GruQloDLkImYxMXSz2CJm5HuAPOZU5VBgq0Krbr7BzLkm55WwJv4ieN1/HwyOinB2OwzCLZqqN1birbbvSGvjgg4jVVWgvu8ym/ToDQRAIC8mmqvQwG1O1JIaMbH2nHoHg7g8lp+DUvxDbhNd6+hbpMeqS1o/dCOXr15P1vydwu+gior9eadexWkrCnt0ACG5utcf8b7sNv+nToca7VBVo3Q4Zl6CzomTseml3ueDqWnvM5/rrmxWbtRn6R48eJSIiok4Ga48ePfDx8eHo0aO1om10dHStYAsQGhpKbm5uo33v37+fDRs24OFx4f15cnIy8fHxaDQaVqxYQZ8+fYiKiuKNN96wKu76eO+99/j444/R6/UolUrmzZvHPffcU/v6xRdf3Kz4iouL0el0Vhdsu+eee5g0aRJ79uxh3LhxTJw4kWHDhtXb9ujRowwdOrSOHnPJJZdQXl7O6dOniayZ1/fp06fOedb83O2B1fYIAF999RVffvkl8fHxtWJtXFwcK1asYNWqVfaKUaY9kHcchRI0cT2tal5trG6wUElHJSZAiyBASZWBgorWm2+3BdzV7tzW6zbMeTcCQm1VYRkZR9DklidRxJidTeWu3Y4LSgaA6EA1CtdMtp3e4+xQbIalgKZZH0hckAdKHx8i3l9Ct99+lRcFahBFkaOFRzlScKTBNtXHj5P50ALO/O9/DozsLJroaFziYnE9byLeEBbRtqtP596GKiNjIaFmrpeRL+DnKmXPWxa1zkUURfRpaehS21cdi9oiZMFeKOsREzoq6aXpDP5yMNf+aFtLCO3gQXiMGIGgaJbs0Hbx2IFr8G9sz9pquz6jazIH05vo06iDUzvrnmMvFErUkZFoRwy37zitQOHujsLdvY7wJmg00nGNVGPFfcDFqEJCGs1KFoP88Rg46MJ+z3nPCs2s3RQXF4cgCBw7dqxZ5zXE+bWjBEHAfI7nbn2Ul5dzzTXXsG/fvjpfJ0+eZPjws7/Xbdu2AZKlQ2FhYYtjnDZtGvv27SM1NZWKigpef/312oxV4AKbiKbicztHjLeGK664gvT0dObNm8eZM2cYM2YMC1ppx9WSn7s9aPbV8+abb+bHH3/kyJEjHDlyhNWrV3PzzTdjNpv55Zdf7BGjTHsgv6aKZkDTRT2m/jKVQSsGcaLIRpU32wmuaiVdfKWLT3Jux/G1rTaYSC+QLB/kTFsZR2LtlqeO4KPW7nA/gjbmHf4uWOrsSGyC0WwkvSwdqBFt5QWqeukT2Ie3Rr3FayNfa7CNqDdQumYNZWt/RzQ6fvE2+OH/0vXnn3Hv18+q9rWZtl5ypq2MDECwlwueripMZpEQNykT1bKodS4FH3xA8oQrKHj/A0eH2CpOZ0pFcGI72ZzWcq1zVbrabTdgR/AHjvSKBuB0RbrtOrVkzaY1Idpm7gFjFWgDrbrnbg0+N1xPt9/XEnDHHXYdx94ISiXBjy2seXLe+1oQEAHxgVm4aJonEDaFn58f48eP591336Wi4kJrxOLiYgC6d+/OqVOnOHXqbE2gI0eOUFxcTI8ePaweT6PRYDqvCHP//v05fPgw0dHRxMbG1vmyCKjJycnMmzePjz76iMGDBzNjxow6omR9/TaEt7c3sbGxhIeH1xFrG6Kp+OLi4nBzc2P9+vVW/xwCAwOZMWMGX3zxBW+++SYffvhhve26d+/OP//8U+eatHXrVjw9PenSpYvV4zmKVi95JSUl8dhjj9GlSxeub2bauEzHIeeH/eQd9MSgqN8Y+lxcVC6IiGSUOrd6tDOw+NomdZBiZIfzD7M59RBm0Yi3m5pATxdnhyTTibB2y5O17WRsx0Uh0s1EuflMh7hJyyzPxGg2IohqRIMPcUEedaoKy0j4uvoyOnI0UV4Nbyl27dGdgLlz6fLuu3b147MFoijK9ggyMuchCEJttq1WEQrU72vr2qMHgkbjlMWZllJabeDRX1/j3b9eI7L4jLPDcSj2vtYVf/8DqddeR9X+/Xbp31F0948FoNBw2nadRteItpm7QV/ZcLtzrREc8PkpCAJCTcZqe8Zr3DjC33oT1XkFrJR+fvDcAhQjh6ESrHYNtZp3330Xk8nEoEGD+O677zh58iRHjx7l7bffZujQoQCMHTuW3r17M23aNPbs2cOOHTuYPn06I0aMqPVmtYbo6Gj+/fdf0tLSyM/Px2w2c++991JYWMjUqVPZuXMnycnJ/P7778yaNQuTyYTJZOKWW25h/PjxzJo1i2XLlnHgwAFee+21Rvu1FU3F5+rqyiOPPMLDDz/M8uXLSU5OZvv27SxdWn9CyJNPPsnq1atJSkri8OHD/PLLL3Tv3r3etnPmzOHUqVPcd999HDt2jNWrV/PUU08xf/58qwRnR9OiiKqqqli+fDnDhw8nISGBbdu28eSTT3L6tA0vXjLtBtGgp2h/NfmHPRG14U22f3bYs2yasokJMRMcEF3bItZSjCy3YxQjW/TvIh765xZUnkeJC/LoED69Mu2HJrc8CQKqkBDcB1xc/+sydmNIlwREUUBUVJFbke/scFqN5WbWpAsEFMQFe5I8bjwpE69Hn27DbJtOgKBUEjj3XrSDBzncVsKsb541UW5lLpXGSpSCkghP6ysmy8h0dCy7DUR9EFC/PYJ26FASdu0k/LVXHRlaq0g6mkpkWS5RZTn4RLW9bCt7Ym/RtvLff9GdPEnRV23TH9VaBoRJdX30QjZGKzMQm8Q3BjzDwGyA0zsaGfx2mLICBt9tm3EbwJCb2yEW3M/Fa9w4Ytf/SeRnnxH26iuE/N//0WXJe6gvGYKLysUu97Bdu3Zlz549jBo1ioceeohevXpx+eWXs379epYsWQJIwvjq1avx9fVl+PDhjB07lq5du/L11183a6wFCxagVCrp0aMHgYGBZGRkEBYWxtatWzGZTIwbN47evXvz4IMP4uPjg0KhYNGiRaSnp/PBB9JuiNDQUD788EP+97//sb9mcaW+fm1FU/EBPPHEEzz00EM8+eSTdO/enSlTpjToKavRaFi4cCF9+vRh+PDhKJVKVq6s/3oTHh7Or7/+yo4dO7jooou4++67uf322/mfk6y7mkIQm/EXuXPnTj7++GNWrlxJt27dmDZtGo888ggHDhxoVvr2+bz77ru88sorZGdnc9FFF/HOO+8waNCgJs9buXIlU6dO5brrruPHH3+0aqzS0lK8vb0pKSnBy8sGFR9lMJeXUPTyfPRp6YQs/RVB3f5X5OzFVzsyWPj9QUbEB/LZbU2/x9s6s/+Yze7s/RQn3c2UvgN54Ybezg5JppNR+scfZD7woPTk3I+zmslX+Ftv4jVunOMD6+SYzSJ9lo5E0BTy/JD3uCahfRcgWXZoGa/vfh1DyUWYc6dxaP4QkmuKG8Tv2oXSo2MU4LEFB/IOsCN7B4l+iVwabmfPvWaSPnMWhlOnCH3u/9DWZLk0RnF1MT8l/0Sxrpj7+9/vgAhlZNoHn25N5emfjzCwey7HeJ1Yn1h+uO4HZ4fValbtOsX/ffkPE91K+L9FnavA9rQ10ziQf4BXR7zK+OjxNu+/+tgxKrZvx+eGG1C243vwKoOegSsGIQgmPhu7mv7hNvI7/242HFwFwx+G0Y/bps8WIOr1nLjkUlR+fkQu+wR1WJjTYgGorq4mNTWVmJgYXM8pDNZazDodBcYScqvy8HH1Idyj6cQzGZmW0Nh72Fpt0upM2z59+nDTTTfh7+/Ptm3b2LNnDw899FCrVyW+/vpr5s+fz1NPPcWePXu46KKLGD9+fJNV2dLS0liwYAGXdYBKlO0dhYc3/s8uJXT5n7Jg2wQWe4TkDmKP8NG4jxjAu5j1QbKfrYxTaGjLkyo4mOAnnsDj0rYlGnUWFAoBN0HaNrs3+7iTo2k9KSVni5B1C/RA7etDtz/XEfHRh7Jgex6bTm/irT1vsT6jYQ8yURSpOniIgo8/xqzTOSQuURSpPnwYQ2amtCXSCnxcfZjec7os2MrInEd8iJRpm5XvhZvKDXe1e4fIzEvOLadMo0UxqOlFnY7EuVYwXb3tU3TRNTER/5kz27VgC+Cm1qAySbZbOzJtU2QKgD5TYMyT0HOi7fpsAdUnTyJWV2OqqLhgbt2RULi4oDNLu29clLK9n0zbxmrzjuPHjzNlyhRGjRrVqqza83n99deZPXs2s2bNAuD9999nzZo1fPLJJzz66KP1nmMymZg2bRrPPPMMf//9d62Rc33odDp059wQlJaW2ix2mZZRZazi3b3vkl6Wzpsj30Sp6DxVty1FDTKLq6jSm3DTtP/vPSm3AhCIlwvzyDgJr3Hj8BwzhspduzHm5aEKDESXmkLuCy9iOH2a4If/6+wQOyUBLhGcNh/mREGys0NpNZabWbMuiLhwyQpG06ULmjZYrMDZWLbW1leY6FxOz5mDMS8P11690Q4ZbPe4BEEg9q/1VB86hEu3bnYfT0amI2OZ82Xmu3Bo7la0LvVXVq86dJi8d95GqdUS/vrrjgyxRSTVFArubEXICqoLKDOUoRAURHpFOjucNo+PKpwCsjmUe9J2ncaNlb4a4uC3UuHv7tdCSC/bjXsebj17EvfPP+jT0hxuYeRoAtwC0Kq1uKlsW4RMRsbWWJ1pm5KSQkJCAvfccw9dunRhwYIF7N27t1WZtnq9nt27dzN27NkLlEKhYOzYsfzzzz8Nnvfss88SFBTE7bff3uQYL7zwAt7e3rVfERGyJ5mt0e/8DdPJ7WC0LltGo9Cw8vhKNp7aSGZ5pn2Da2P4aTX4uqsRRUjJb//ZtjqjibQCyZ83LrhzTXBl2haCUol28CC8r74K7eBBqENCEHU6dCdOINrKc0ymWUR7SeJdZmX79nw9NwPJrA+UdxU0gSVLqz6PSwuCIOB5+Vg8Ro9GcHHcDh2lpyfaoUMRVNblLOzI2sHxwuPoTc3zwpWR6egEeLjgr9UgigIpeQ0XThKUCio2baZ8899tvnhjyc8/M+Lrt+iXe4Junew6b/mMC/cIt3vWYeWevZy+7z5K1qyx6zj2JEwrCdtppRcW4LMb+7+CTS9B2t92H0rpocWtV0+7j+NsVMUVuOeUomk/tRJlOilWi7bh4eE8/vjjJCUl8fnnn5Odnc0ll1yC0Wjk008/5cSJE80ePD8/H5PJRPB5qffBwcFkZ2fXe86WLVtYunQpH330kVVjLFy4kJKSktqvU6dONTtOmcY58+hCTlwzi9JPXrCqvVKhJNorGqi/2mxH56xFQvsuRvbKzle47sfrUHjtxtNVRZCnvLVEpu3gOXIkkZ99RsRHH3b4TIG2So8AqcJysaH9L84tv2I5QVV3YtYHEBfsQf5HH1H83XeYSkqcHVqbw/L5XlhdSHF1cYPtQp58koj33sW9Xz/HBNYCFv69kBt/vpFjhTbcAisj00GwLNYfzylrsI1LXBzBjz9O5LJPHBVWiyn66WcGpu4mvuhUp8u0teyMsFcRsnOp2P4PZev+pHD5cruPZS/ifKXdGnk6G+sKFflSRu2J3+seNxkhY7v0/6hLbDtmO8Ee9ium8jLMZWWYK9r3PblM28YW712rRdtzGT16NF988QVZWVksXryYv/76i8TERPr06dPqgBqjrKyMW2+9lY8++oiAgACrznFxccHLy6vOl4xtMVdVAaDp3t/qcyyTgs4s2lq2YLVXThSdILMiHTATH+xpl6qbMjKtQTt4kPy+dCKDuiQCYFQUUqFvvxNiQRCI8erKmcw4ENXE+rmS9/Y7ZD3+P0xl7fs6bg/c1e6EaEOAxrNtHU3uW29R+NlnGAsLrWpvMBsI9wzHS+NFtHe0fYOTkWmHJNRYJPyZ/jvXr76e57Y/d0EbQaXC79ZbcOvdG0HRottOh1E17Xa+jhvNvph+BHp0rkSE1FL7+tmei+/kyfhMvZmwRYvsPpa9uCgkHoBKMcu2HR9ZDd/dDv8srns8az/oy8HVG4LtlwGb9+67nLpnDhXb/7XbGM1FrZasVyorG87obwk6o45qDzUE+aPw6FyLNDKOxfLetbyXW4LVnrb14e3tzZw5c5gzZw779u1jyZIlzTo/ICAApVJJTk5OneM5OTmEhIRc0D45OZm0tDSuueaa2mPmmq02KpWK48eP0032KXMsFQV0HZeN2SAgXDzS6tNqRVtHbitpI1hW79t7MbKz24XlImQybRvRYKBw+ed4XXUl6no+W2Tsw0Vh4YhGLYKqggO5SQztcpGzQ2oxmcVVVBlMqJUCEe4Kiv8zFV1aGupw51ZVbqvEeMWQXZFNakkqfYP6NtrWXFmJWadD5etrt3hEvZ7Cj5ciGgx4jBoFVhQiUyvULL9iOaIoyos/MjL1EFcj2p4pqSJVkYRW3b6LMib5RvBpzyvpH+nT6f7mLXN6R2TaqgICCH3qKbuPY0+GRSbCLkBZxuniQrr4WFfcskmia4rnntop2Q6qahYP0rdIj1GXgB1rwZSu+RV9SgpeV11ltzGai1KpxMfHp7ZIvbu7u03+Pouqi8g3FOKh9iBUFKG6utV9ysiciyiKVFZWkpubi4+PD8pW7PxslWhrQafT8ddff7F69Wo++OADq8/TaDRcfPHFrF+/nokTJwKSCLt+/Xrmzp17QfvExEQOHjxY59j//vc/ysrKeOutt2S/WmeQL9liKAIiwN36LGZrC5V0RLoFSZPa5HacaVthqCCnUlpsMesCayfuMjJtkaynn6bku++p3LuHiMWLmz5Bxia4qpWozMGYSGHH6ePtVrT9NeVXtqSdROHiQVefOFx8vAleuNDZYbVpYrxj+CfrnyZ30xQsXUrum2/h95//ELyw/uKztsCsNxBw7xx0J06ibuZcsbOJNzIy1pIQIs398vK68N5/3qObT/2JM2adjsodO9FnpOM3bZojQ2wWnbUIGThWtO0IhHj6Ipi8EZUlbDt1hMk+l9qm44B40AZCRR5k7oGoodLxtK3So52tEcLfeIOyP9fhMWK4XcdpLpZkPotwawuqjdVUGiupVlZTrZYFWxn74ePjU29CanOwWrTV6XQ8/fTTrFu3Do1Gw8MPP8zEiRNZtmwZjz/+OEqlknnz5jU7gPnz5zNjxgwGDBjAoEGDePPNN6moqGDWrFkATJ8+nfDwcF544QVcXV3p1atutUQfHx+AC47LOIj849JjQFyzTrNsv0kpSel0WSwWe4SU/ApMZhGlov1975Ytr4LZE8zucqatTJvGb8YMyjdvxnP0mE53vXE2vupw8knhaH6Ss0NpMT+l/MTWzK0o3W4gLrjt+q+2JWoXZksaX5hVh4aCwYAuxb4LuEoPLQF3392sc+RrhYxM48QHSaJtVqGavgFD8HStf+unqaSEU7NngyDgfd11KNvYVmRRryf/448pqg6VLHA62Zy20lBJVoW0zT/Gy3GirSE7m6IVX6IK8MdvxgyHjWsrvIUE8ssLySyqsl2nggBRwySbhPQtkmhrNkFGTYH2aPuKtq4J8bgmxNt1jJYgCAKhoaEEBQVhMBhs2repopLq48dQeXvjIu/YlrExarW6VRm2FqwWbZ988kk++OADxo4dy7Zt27jpppuYNWsW27dv5/XXX+emm25qUUBTpkwhLy+PJ598kuzsbPr27cvatWtri5NlZGSgaOMeSJ2Zwu9+o2q3D94+vjRnihPpFYmAQKm+lCJdEX6uNtpW0g7o4uuORqVAbzSTWVRFpL+7s0NqNpYVeWO15C0dL2fayrRhXOPjiV2/HoXGcVXqZSTCtVHkVWwjt7z9Fuwa0WUEKdkiSdXhxAV5YszLQ+nv3+b9GZ2JZWG2qUxb7fARdPvj92ZnvzqChzY9xLHCY/x3wH8ZFTnK2eHIyLQ5vN3VBHu5kFOq42RuOf0j67c4UQcF4T5oEKrgYMwVFW1OtK3491/y336Hq9y9+Pzy/3U60dZV5cov1/9Cemk6Pq4+Dhu3cvduCj76CGVgAL5TpyK0sznaKL95LD+ejq6rjT+/oi6VRNu0rTD8v1CcDgjg4gUh9q0f1NZRKpU2EcDOJeftdyj85BN8Jk/G+9lnbNq3jIytsFq0/eabb1i+fDnXXnsthw4dok+fPhiNRvbv39/qTIS5c+fWa4cAsHHjxkbP/fTTT1s1tkzrKD+QQkW6O+665vlYuancCPMII7M8k9SS1E4l2ioVAl0DtBzLLiM5r7xdi7YmXSCeLiqCvTpXwQaZ9se5gq2cQec4xkXcwJYfu+MR3369hKcmTmXl+kjM1cXEBWlJuW4iok5H9Kqv5ayMBrBk2p4uP43epEejrP9mXOmhRelhfx/M6qNH0XTtisLF+s+q5OJkTpWdwkUpf77JyDREfLAnOaU61iVvY3thOv2D+zMkdMgF7aKWf+aE6KxDofVAO3o0P2UYEQUFsYGdKxFBISiI8ooiyivKoeN6jRtH2YQJeF9zNdhYiHMEdissbcmmPbUDTAbw6wqPpEJxht38bA05uRR9vhzPsWNx69vXLmO0JQwmA2WGMnxdfHG/uD9lv/+O0o6++jIyrcXqNJHTp09z8cUXA5IVgYuLC/PmzZNvfDs5/jNvI/CGgbiPvrbZ51qqMTe1fbIjYvmgb6/FyM4WIQskLthDvg7ItBuqDhwgbdKNVB044OxQOgXdQ/wBZbv28BZFkaScMgBiXU2Yy8owV1WhDg93cmRtlwC3ADzUHphFMxmlGU6NxVReTuoNkzg+YCCmEusyvg1mAxllUtyyx6OMTMNYdlr9m7OZJfuXsPn0ZidH1Hzc+/fD/OzLLOl1LS4qBeG+bs4OqVMgqNV0efMNPMeMQWjHom1yXoFtOw7sDm6+YKiA7JpaPgol+Nnvs6hs/Z8UfLyUnBdfstsYbYkjhUcY8fUIbvjpBjxGjyZ2/Z8EzXvQ2WHJyDSI1Zm2JpMJzTmZSiqVCo82tr1FxvFob7wH7Y33tOjcrt5d2Zq5tcntkx2RboFSZpHNV2cdRK1oqwsirmvnykiQad8UrVhB9ZEj5L72OlGfferscDo8lpuazOIqKvVG3DU2qX/qMHIrc0kvKqFCb0ClUBLTLRzVnt3oT59G4erq7PDaLIIgEOMdw8H8g6SWphLrG9tgW2NeHvnvf4Dh9GkiPnjf5rEYMjNR+voiuLqg9Pa26pzMskyMZiNuKjeCtcE2j0lGpqOQUCPaVlX4g6ppSxRTSQkKL682t9hvmY93DfRol7UmWsOKoyso15czPnp8bUKNTNME+RjRxi4iT1VBhW4k2mbs5GgUhQKmrJBEWs9Q2/TZBK5xcXhddRVu/TuHb7/lOuXv5t/mrkUyMvVh9d2TKIrMnDkTl5oLUnV1NXfffTdabd1tbd9//71tI5TpsFiyVzqlaBvUfjNtjWYj6aXpwNlMWxmZ9kLQww8juLgS+MD9zg6lU+Cn1eAd9icGzTFWH3Nhap+xzg6pWXx74luW7F+CS8hAIs0zUCsVoFTgEiNnXzbFgJABeLt4o1U3bn8guLpS9NVXYDZjyMqSipPZENeEBOK2bsFUXGz1OZZ5SbRXNApB9i6WkWkIyxwwt9Abghqe04tmM6nX34Du+HG6/fE7mshIR4bZINXHj6MODyepZj7e2fxsAb47+R0ni07S3b+7U0Rbc3U1pWt+xZCVReDcex0+fkuJ9Q9CUOoRBDO7Mk8yoqsNi6JHXyIVINuzHP58BiKHwJTP7WaP4D5wIO4DB9ql77aI5Tp1fuE9Ua9vd97KMp0Dq0XbGedVdbzllltsHoxM+8Jw8G+MqQfR9B+JskuPZp9vuVB2StG2dktNhZMjaT5nys9gMBtAVCMafIiTi5DJtCNU/v6EyoUGHIpWW0SZ6jR7c44wlfYl2taxggnvfDfzrWH+xfOtaqf09CRo/jzUkZFWZ8I2F0EQUDXDr85i2yRbI8jINI5lDlhY7INHkDRHrDJW4aaqazEgKBS1uxOqjx1rE6KtKIpk3v8A+jNnKL91IeBFbGDnu85f1+06ThSdIN433inj604mkfX446BW4ztlMqrAQKfE0VwUCgVdqh7m2GklVRf727bzIz/B2keg9Iz0/Pga/p+9+46Toj7/AP6Z2V6v7u71XkCqdAsRI4IRa+wVa6yJhtg1amKCxhgjxq6x/uzGriCKgoUqiAJSrvd+t73vzO+PZQ9Oru3d7s7uzvN+vfIytzc78xwcOzPPPN/nwSOTgRP+ARwWfktCMtAvz/Gu7dvRetddkGh1KHrjdSFDI2RQo07avvDCC9GMgyQg88tPoPuj7UiZ+TZyXv0q7PeXppZiSckSlKWWiW4wUMn+9gi9Di96HV6kaxLnqd6B1giZAFiUi7AqgSQPT20t5MXFovr8ibVJut/gq6qJ0BlnCR1K2AYkbY06tN17LyQpqUi/8IKEubFMBBlXXCF0CAP0V+FQ0paQYWkVUuSmqtBi5qGR6uHwW9FobURleuUh22Yv/zsk6elhPUCJJs5iARgGDIDNMiMAtygrbZdOWjryRlGkmjIZ+hN/A+WkSWBUidVPeGJGBfY0tkS2COfnD4G3LgbAD3zd2hZ8/eyXI5q4dWzcBEVlRdz8u4yFeks9AKAktQQAIMnMhLe6BoxMBs7jCWtoKSGxQGu+yNg5eyBRBqAoGtu00TRlGh6Y/wCumHKF6BImannwIhdIvBYJoZvZgMcArUKK7BTq60gSU+dDD6H2pJNhW7lS6FCS2tzs2fDbJqO9N7E+KzieQ721Pvj/PQZUpMlgfutt9Dz9NHiOH/7NpJ/ZbQbPC/Pn5e/uRt1ZZ6P9b38PK4Y6KyVtCRmtyiwdAAap0uBwxqFW0ClKS+MqMSRJTUXpqpUoWf0Zdpv9AMTZHiEe5D78MDIuvxySBJuXE/p9idiwVS4QrLD9ZcIWOPDaqtuC20XicB4Pmq+9FlVHHQ1PrTgGg/sCPjTZmgAcWPUry81F3lNPouzrdZSwJXGJkrZkzAxTXKg4rQPpF5wpdCgJqTTSJ/oYOb7oeJxXdDt85rkoM2pFl3AnyYNRqgCOg3P7dqFDSWoH2sEk1mddq70VnoAH4CXgfWkozdDAdOcdSLvoIkiNVGU7Eo7ncNxbx2H+m/PR4ewYcXtvcwvM77wDX2trxGJw/bQD7h074Ny0cdTnKp7nqdKWkDCE+tpKAsGhfaGlx4miT5MGm8cPlgGKMtVChxNTjdZG1Fpqg23PSNhS9VYoTB9gk/W5yOywYf2BlgiD4gFrS3C7CPC3t0NWWAip0Qi5SHr1N9maEOADUEvVMKqNAIItlHQLFsTVQyVCDpZYY5xJ/OA4oKcaAMAYJ4x5N37OjxZ7C3ieF93E0lKDBl/v60q4REauNhcq7ywEnFWoOCyxnogTcrCM310J9axZ0MybK3QoSa3UoIVE+zMaAl3oc09DmjI6fUsj7cCqgkxIWCmK8zOguOACgaNKHCzDQiPXAK7gTVKWJmvY7dvuvBPOTZtguvvPSD///IjEoJo+DTn/eiis9/S4e2Dz2sAyLAr1Y1tJRIiYVO7va+t2ZgKy4WdVWD76CI5vv0PaxRdBNWlSrEI8BO/3AxIJGIZB9f7iicIMDRTS6Ax6ilfP73we/6v6H66edjWumy7sEDCe5+H64QfY130N4x9vFDSW0cpOlUKevgEWTgGO48Cy46yHs4/8gDOs7UYgLyxEyXvvImCziaYI5+CHsmL5mUnio0pbMjbWFsDnBFgZkFY05t288vMrOOm9k/DE9iciF1uCCFWfVSdYpS1wIOZyIw0hI4mLlcspYRsDuWkqqLI+gjRzJTY27RI6nFE70M/WiKIMtehu5iPhieOewKbzN2F21shTqbXzj4ZqxgxIU1MjdnxpejpSlixBypIlo35P6O89V5sLhYSWSRIykor9SdvuvuADueGStrbVq2H54AM4N26KSWxD6XvjTdQcvwi9r73Wf01bKsIhZKG/q5KUEoEjAQJmMxqXXoKep5+G66efhA5nVObkVYDnWTCsBz93No9/h1pTZLcbJYlOPPdzoZUAv/yd57xe9L35FlrvvBN8IDLtJwiJFErakjFxfPkx6j/PRFdVLiAZe8F2cUoxlBIl+EF79yS3/j5IkWxeH2UWjwWv/PwKdvRuBnBgSRwhiY5zOND99DPgvV6hQ0k6EpaBEsEqy21t+wSOZvRCfU1DQ8hcO3bA390tcFSJJU+XB7VsdMuNM664AkWvvQr9iSdGOarhUWsEQsJTZtSCZQCrNbi0uN5aD47nBt1Wv2QJMq+9FmqBH5ja162Dr7kZvNvTn7QVYz/bUAIrHj7vpGlpSPntb5F61pmQRPDhXTRpFUpIA5kAgE3Nu8e/w8IjAX0OgKEqQBlAnxvcbpw4hwM8N/i/02Q21DmekUjQ+eCDsPzvXXj2Jc61KhEHao9AxsSzcztcPXJIjePr/XR07tHYdMEmsIz4nh+Enug39Tnh9gWglMV/FVdVXxUe3PIgOGUagFtRbhLPk1mSvHieR8PSS+DeuRPgOWRefbXQISWdTEU+Wrjd2Lu/rU4iqDUHb2Y5jwHlRg0aL7sYnM2G4g8+gLKyQuDoyEj8XV1wbNwI1dSpkBeOvs1B/w2dXvgkBiGJQCmToDBDg7ruAKSMDJ6AB632VuTp8g7ZVn/CCcAJJwgQ5UB5Kx6B/bvvoJoyBdUf1gMQX9K2z90Hs8cMBkzctILJ/su9QocQNp00B2Z0YkdXNYDF49sZKwFO+Afw1sUIJm4PLmran8g94YHgduPU9Z/HYPnkYxj/uAypvz193PtLFMMlbdPOPw+MTAaJXi9EaIQMiZK2ZEx0514FaVYOJJnD96gbiZQV769gplYOvVIKq9uPum4HJmbH/wlCJVXhCNMCrNvjgkYuQU5KYk2DJ2QwDMMgfenF6FrxKJSTpwgdTlIq1BWhxQI0OxqEDmXU6q31AILtESq1gDQjAz6PB4riIkHjSiQ9rh48vv1x9Lp78cixj4zqPXwggIDFAml6+riO7di0Ga033wLl1KkofuvNUb/vgokX4HDj4cjX5Y/r+ISISblRi7puB1JkOejxNqDOUjdo0jZesGo19McfDwCo7toJQHxJ21DyKkebA5VUJXA0iStbXQCzczvqzBEawHfYKcDZLwOrbh04lEyfE0zYHnZKRA7j2LQJga5usFpNRPaXCHie719FNVh1ufFPf4p1SISMingzZmRcZBUzIKuYIXQYCY1hGJQZtdjWaEZNlz0hkraTMifhtNw7sHrtNkzI11EDd5I09CedBN3xx4NV0oOIaJiYWYb1FsDsaxE6lFExu83odfcCADhPJkpLclC6aiU4jweMXC5wdIlDLpHj7X1vAwBsXht08uFXZ9jWrEHr7XdANW0aCp59ZlzHZhRyqKZPh2pGeNcqebq8uE42ERKPKrN0WP1zB/T8YZiWXwqNbOhEEO/zwb13HyQ6bVhV8NFgcfnQZfMAAEoM4kleAQeStvE4CNrX0QHLe+8h47LL4v6cW5Zagt1OoNPdFLmdHnYKMGEJ0LA+OHRMawq2RIhAhW1I0ZtvwLlpE9QzZ0Zsn/Gu09kJh88BCSNBga5A6HAIGTXxrUknceelXS/htx/+Fm/seUPoUGIu1CKhpjNx+tpWdYSGkImrIoEkN4ZhBiRseV58fbajaU7uRACAj+mG2+8WOJqR9VfZ+lLAQoHizODNPKugwVTh0Ml1MKgMAIB6S/2I28tyc8FZrfDs3TvuXnv6449H0Ruvw3TLzePaDyFkZKF2WVLzqVjx6xWYYRr6YUnH/Q+g/swz0ffG6CvgI4Vzu9F03fXoe+st8D5ffz9bk14BvVIW83iE1N/PNs5awfAch/rzzkPXIytgXf250OGMaJop2C7JwbVFdsesBCieD0w5M/jfCCZsgeAwXu38+WDV42t1mEiMaiPWnLUGL57wImSSwf+9834/XLt2IWA2xzY4QoZBSVsSNq63HZaH/wD3qv8CEUhsWDwWVPVVYV+f+Jp+l/YPI7MLHMnodDo7sbfDCgCooCFkJEnZ161D/Vlnw9/XJ3QoSWN6Th74gBJgeOzoqBE6nBE124NToDmPEYUZmoToOR6vQksQQ0sSh6OoqEDRW2+i7Ms1YNjYX6K22Fvwws4XsL51fcyPTUgiq9yftK3qsI/40FM5dQpYgXpGOtavh33NGnQ/9RQglaJGxEPI4nXoIsOySD3jDKhmzYQkPQ2OTZth+fgTODZtBh8ICB3eIY4sOAwAwEvN6LBbBI6GDIdhGBjVRkw3Th9ym8YrrkT9GWfCtnZtzOIiZCTUHoGEzbPlK7Q+8zmkms9RfsLl495f/w2dZeQbumQTqrQNPemPZy6/CwvfXgjwMoC9DeVGGkJGkg8fCKDzXw/Ds28fep55FqZbbxE6pKSgVkghDWQhIKnH5uY9mJ07SeiQhnVSyUloaSnGA59tR3mFFo1XXAlWp4Xpppsgy80VOryEUpxSjM3tm/sHuw2HYVmopk4d9zF5vx+QSMJu4fNj5494eOvDmGGcgSNzxj+dmxCxKM7UQMoysHn8aDW7oFA6kanKHHTblCVLkHLKKYI8mFFWVsJw441gVUowDIPq/UUTZQZK2saTzKuvhqK8HG133Al/e3v/69KsLJjuuB36RYsEjG6g/NQMIKADJDZsaNyN0w6bJ3RIw/I2t6DtjjugW7wI6RdcIHQ4cUc56TC4d+0CZ7UJHQoh/ajSloSN72uAKtMDVX5kLnBKUkoAHFimIyahJ/u13XZwXHwvx260NoIHD46TApwK5VRpS5IQI5Eg+69/Qfpll8Hw++uFDieppMqCyc5d3dUCRzI6Dd0ceH8qJqRK4PjuO9hWrgKjomEt4RLiwaz53XdRdcSR6Hz432G9L1OVid8U/YYStoSESS5lg21kGB9O/+TXOPatY2HxDF51yMhkgiRsgWALlsyrr0L60qUADhRNiK3S1hPwoMUe7DEfj0lb25o1aLnhxgEJWwDwd3Sg5YYbYV29WqDIBqdmsgEA29vjf9Wofc0XcG7eDNtn8fVnGAtPbH8Cj2x9BI3WxiG3MVx7LSo2bUT6xRfFMDJChkeVtiRs6gwnihb2ALN/G5H9hS4Wet29sHgsSFGkRGS/iSA/TQWZhIHbx6HV4kJeWvz2FQrdcHMeA9RyKXJSKHlBkpNq+nSopk8XOoykk6MpRI/zOzSMYpl8PKje37+7NCsV+c88A29dHaTp6QJHlXjCaY8AALzXi+6nn4FzyxbkP/0U2DEkyt079/ejC7OF05zsOZiTPSfs4xFCgIosHao67ZBABZZxodnWHPfX9KGkbanIkrb1lnrw4KGT65ChzBA6nAH4QAAdy+8f/POb5wGGQcfy+6E77jgwkvhoXWRQ5KPBtw/7euO/AEm3v0pZmp0tcCSx927Vu+hwdmBB/gIU6AcfRMZqxDWQkCQGqrQl4euuCv7XUBmR3allapjUJgDia5EglbAoygieHOK9RULo7ybgNaDcqAXLhrfslJBE5W0c+ok8Gb3K9OCqii5Ps8CRDM8b8OKKz67AHv+LAONDWV46tPOPpqqLMQqtpmmyNsHH+UZ+g0wG87vvwrl5M5zbto3pmKa77kTR228h9awzx/R+Qkj4Kva3zTpcehe2XLAFkzKHboPjWL8e9RdciNa77opVeLCtWQPn99/390V1+wJo6nMCEF+lbeghWklKSdhtZKLN+f3WQypsB+B5+Nvb4fx+a+yCGkGhvggA0OpoEDaQUZBlZyN96dK4ajERCzzP47LJl+GcynNQkloidDiEhIWStiR83fuXfmSWR2yXYu5rW9Y/jMwhcCTDO7jStoz62RIR4DwetCxbhpolJ8FTE//Ds+Ld4VkTAAAuvg0czwkczdAarY3Y1L4JnHorGEj7e4+TsTGpTVBJVfDzfjTZmkbcnmEYZF71O2T99S9QVo7t4TArl0M1ZQrkBYNX0gwmwAXQZGtCgIu/QTeEJILKrOBnZXO3AnKJfNhteZ6Ha+tWODdtjkVo4HkeHX9fjoYLL4J93ToAQG2XAzwP6JVSGLSKmMQRLzodnWDAxGVrBH9XV0S3i4XZWYfDa54Fv32C0KGQITAMg/Mnno+75t0FvXz4QYi2L75A/YUXovORR2ITHCEjoKQtCQvvcqDqeRvq12QgoIzcMBYxJ21DCYGarviutA31HOa8BupnS0SBkcvBOV1AIADnlu+FDifhzc0vA+fNgN9ZhB6HVehwhpSpysQl5bfC07UYBekaeNd8Aee2beA8HqFDS0gMw4R9jk8791yknX02pJmDDzKKhlZHK05890TMf3N+XD9UICRelZuCD/SrOkae06CaNg3Z99+P/KefjkVo4J1OqGfPgtRkguaIIwDgwBAyozbuqk2j7eJJF2PTBZuwbOYyoUM5hNRgiOh2sbCwdDY8bWeio3UK/IH4PX/0vf467N9+B97rFTqUuMY5nXB9vxXODRuFDoUQANTTloTJu3MD/C4JOB8LNqs0YvsVddLWGGyPUBPH7RE4nkO9tT74/z1GVFDSlogAwzDIuvvP8Pf1QTVp6GWeZHRMeg3kbXegz+lDp5VBvBawpipTYWCOhq8vDeUT1Gi94ybwTidKPvkYitLInffEpDilGD/3/ByTc7xtzRq4d++BdsECqCaP/t9tKLYsTRZYhmoaCAlXYboacikLN9+Dm9beAYb14l8L/jXothKtFqmnnxaz2FiNBjn/+Ad4jusfgibWIWQhKqkKKmn8zadQz5oJaVYW/B0dQ/Yll2ZlQT1rZowjG1pOigpKGQu3j0NTnys4lC/OcE4nOh74B3iPB8UfvD/mlSyJam/vXvh5P4r1xVDLhp8hozniCGT//e9Qz5wRo+gIGR5dlZKwyCbNRdFTy5F7x1URnfwa6nk32kElySQRKm3bHG3wBDzgOQl4XxrKqT0CEQlZTg4lbCMo9HkX7z28q/YPITtML4F69izICgogLywUOKrEVawP/8FswGKB9fPP4d69O6xjWT/5FN2PPQbHt9+G9b5QbKFYCSHhkUrY/Z/xLD5v+hhfNH4BbyC+KvoOvnepEXnSNl4xEglMd9y+/4vBK6DTzj03boaQAQDLMijOVIBVtGNrc73Q4QyKczqRcuqpUB1+OBQVFUKHE3NP//Q0zv34XPyv6n8jbis1GJB6xm8hLyqKfmCEjAIlbUlYWLUOqgWnQ3v+HyO631ClbbOtOe4u8KItlMTotnthdsbnz97fz9abCZVMjtzU+HsyT0i0+bu70fvaa0KHkdBCn3f7Os3CBjKMVfWr8EPXFoDxoqgoCwVPP42y1Z+BkdLipLEay2qarhWPouX3f4D5nZFvsA6mPXYB9KecDPWcOWG9LxQbDSghZOwqTVrwfh3kjBocz6HROvQgz4DNBuuqVVE/rwYsFvi7uw95XayVtu2OdixduRTLNy0XOpQh6RctQu6KRyA1mQa8zigUkGRkQL/kRIEiG5o3/XVoSh7B6saVQocyKGlmJrL/+hcUvf6a6NqBAAc9mI3DPs6EjITuQEhcMKgM0Mg0cPgcaLI1oTRVPEtQNQopslOUaLO4UdPlwMzC4Yc3COFA0taAcqMWLCu+kz0Rt4DNhtqTTkbAbIY8vwDa+UcLHVJCkmn3QVP+L3zQno+b8ZbQ4RyC53nc/d3dcMldYGR/olUFETIxYyLOrjgbEzJGP6RFPW8uHBs3HnLTPpKUk09GysknhxsiVdoSEgEVWToADJTIghe1qLPWoSytbNBtfa1taLnxj2DVaqSdc07UKifN/3sXnf/8J9Ivvgim24MVnP4Ah7ru4ADgMoO4PudrzDXY1rkNFo9F6FCGpV+0CLrjjoPz+63wd3VBajBAOekwcE4nZEaj0OEdIk9TiA7zdnTZ43slkRj5OX9/m7/Q6t6RBOx2OL5bj0BvD9LOOy+K0REyMkrakrCYl/8OrE4HzVnXQZIVuWoUhmFQrC/Gzp6dqLXUiippCwSrz9osbtR02jGzME3ocA4xIGmbL66KBEIAQKLTIeXUU+Dc8j2kGelCh5OwSjMNYDvssAVahQ5lUB3ODrj8LvA8C96bjhLD8H3PyOjk6/Lx5yP+HNZ7dMcfD/2iRVGK6FBUhUPI+FXsf9DlcxsARe2w1fWKslKoZs6EsrICnMsFiTY615feujqA5yHLy+9/ranPBW+Ag0LKIjdNXKvHKtMr8cD8B8Ag/gswGIkEmrkDV00c/Hvi2rULkpRUyPMiNxx7rH5bejHWvT4dTEH83cf5WloAALJc4f+chNBib4Gf80MpUSJLkzWq9/iam9Fyww1g1WqknnUWrbYigqLfPjJqPMeh4/WvwfkYlMz9TUSTtgBwyeRL4A14MSVzSkT3mwhKDRp8W90dt31t+5O2HiNVnhHRMvzxj2CkUrpwG4f5BVNx38fXQxowIsDxkMRZ1X6tpRYAwHkzkJ+uRfupp4BVq5Hzr4egKKZkXiyNZfmmt7kZEp0OkpSUsN7X5+5Dn6cPAFCop97FhIxVZVbwGtFmTYPUcOAzdTCMRIKiV/8v6jFl3/dXZF57DVj1gYdwodYIJQZt3J2Hoi1TlYklJUuEDmPcXNu3o/GKKyFJT0fRq/8HqcEgaDwVxlQADGq6HOB5Pq5aEPT897/oe+11ZF5/PQzXXyd0ODEXuo8tSika9aBRRXk5VIcfDuXECcGHSjq6/yXCoTtPMmp8dwN0uS54bFLIJs+L+P4XFy2O+D4TRaifVrwmbQ8kMgyoMFGlLREnVqkUOoSEV5yZCqm/AF4/h5Y+Fwoy4quS9eBVBZP1LLz19QAAaUaGgFElB6fPiQZrA9QyddiJ0YDZDElq6ojbdfx9OexffYXsv/8NqWecMer9h/7eczQ5I06VJoQMLTdVBZVMAp/bACnC62MdTbLs7AFfi7WfbTKRZmdDkpoKmdEIRiV8tXRxpgYMA1hcPnTbvTDoFEKH1C9gNgMsC+VkcQ7WHUv7I0YiQdHrNMeCxAcaREZGjbU3IGeeGcXnpoJV00VOJIWG89R0OQSOZHD/O/k9eJuuBecxUaUtET2e42B+5x00/u534AMBocNJKBKWQUmmBgBQ3WUTOJpDHbyqoCDPgNLVnyHvqSch0esFjizxPfnjkzj747Px+p7XR/0e9759qD5uIep+ewZ4nh9x+4DZDACQl4S3EohaIxASGSzLoMKkBecNVj3WWepG/LfL8zx8HZ1RiWeoY/cnbQ3iu595a+9b+K7lu4Qf/CwzmVD48kvIf/aZqLXWCIdSJkF6wUfQlDyEldXrhQ5ngNyHH0b5N19De+SRQociiFDxEZ3jSaKipC0Zve6q4H8zK6Kyex/nw8a2jXhjzxujujlLJqX7n/Q39Djg8cdfEsjmVMBjL4BSqkCeyHp/EfJLgd5edNz/ABxffwPrJ58IHU7CychshsL0Pj6oeU/oUA5Rb6kHsL9/d3YK5AUF0C1YIGhMyaI4pRhpirRRL00EAHluLnwdHfB1dsLf2TXi9kWvv4byDeuhmjw5rNgoaUtI5JSbdOC8GWAggcvvQoezY8htOacT1b86BtXHHIOA1RrROAJ2B6qPOw5tf/4zOLd7wPequ8RZaWvxWHDfxvtw9RdXw8f5hA5n3GQ5OWAPqrJ1bNhwyN91LKlUVrCKbvzUsVewGIYizcgAI4+/Ydex0H+OTw3/HM/zPHwtLaLLTZD4Qu0RyKjxHXuCLeszy6N0AODqz69GgA9gQf6CUTcKTwZGnQI6hRQ2jx8NPU5UmOKrmnVfR7AirsyoBSuy3l+E/JI0MxPG224FZ3dAf+KJQoeTcFTaTsgDG7GrzwfgaqHDGaC/FYzHiHKR3cxH22llp+G35b8N6z2sRoPCV16GsqICrEYzqvdI08IfAkNVOIRETqVJB0ACJYxwoQ21ltohr+lZtTq4tF0qhae6BuoZh0csDse338Lf2gbn5i1gFAeWqvM8jxqRtkcIJa9MahM0stF9piYK83vvo+3OO6GdPx95j/0HjEwW8xhMqgJY3DtQa46PtiAAwPt8gvxZxAue58fUHgEA+EAANb85Eb7GRpSu/gzygoJohEjIiChpS0at6fGv4OkwIbtQimhc4sgkMszLmQcZK0v4JTvhYhgGJUYtfmwyo6bTHldJ2/eq3sM7e7dAospFufEoocMhJC6knXWW0CEkrIkZpdhkAfp8zUKHMoDNa0OXK1jNyXkNMKx8B2ZDJnS/PjbswVbkUOFU2B5MfXjkkjhDoUpbQiKnfP/sA85rAORtqLPU4cicoZdl5z/9FGRZWQMqJiNBd9yvUfD8f8E5nQOGQnVYPbB7/GAZoChTXD2sk/mzTp6XC0Ymg9RoBFhhFhOXppZgXzvQ4W4U5Pi/5KmrQ/3Z50C3cCGyl/89roajxUqvuxdWrxUMmLD76TMSCaQZGfC1tsJTXU1JWyIYStqSUfN0ueB3SSDJiVKlLYCnFj4VtX3Hu1KDBj82mfv7bMWLLxu/xE+2tWCVp/ZfiBNCDuB5Hv62NshycoQOJSHMzp2AF2sBL9MFX8AHmSQ+KkD6WyP4dMjX6mF97K+weL1Qr/6MkrYJoPXOOwG/HxlXXglFWdmo3+cNeNHqaAWQnIkMQmKtMitYeGC3pUOWMfIwMkVxdP7dMTIZNIP08AxdZxdmaKCQSqJy7HhVZ03epK169mwUv/s/yEtKBEtOTjNVYGU7YAu0CnL8X7KvWwfOZoO/s1OUCVvgwEqaHG0OlNLwBwrn/PNBSDMyIv5QiZBwUE9bMmolK1ejaMXdUMxbJHQoSenAMLL4StqeVHoStO5fI+AspCFkhPyCr6MTjRcvRf35FyBgj89BgvFmVm4x+IAcYDjs6KgROpx+oZtZzmvAxHQ5Us85B5qjj4YsL0/gyJLHw1sfxgn/OwEr61aG9b6+t95C03XXw1NdPej3+UAAtlWfwfLBh+ADXFj7lkvk+O7c7/D6kteRocwI672EkENl6ZXQKaXwe4LDyEIPxOJFdWew5VepCIeQJXOlLQAoSkv7k5M8z8P+zbcx7UU6L38iAICT9KHPKfz9XPpFF6HwtdeQed11QocimNDvfElKeANKQ+R5eZSwJYKjpC0ZNYkxH6rF54FNie5NDc/zsHojO4wgEYT6atV0xVfiZ2HBIvQ2nQDOk4MKqrQlZACJXgdfWxsCZjPcO3cIHU5CUCukkHImAMDmlj0CR3NA6MKe8xhRmG9A1p13oOC5Z8EItMwyGVk9VrTYW1BjDi9Zb/tsNexr1sCxfoiJ3DyP3If/hczrr4eiNPwbM61ci8mZk0VbiURIJDEMgwqTDpwrH0dknopTy04d8T29L7+Mpuuuh7ehISIx9L70EnpeeBG+js5DvifWIWTAgQR6siZtD9b5wD/QdOWV6H7iiZgdszTDBATUYBge65uEv75hJBKoZxwe0V7RicbqtULGykTxO0+SF7VHIHGlqq8KS1cthUqqwpqz1ggdTkwdXGnL83zc3Dw29Trh8XNQSFnkpYmr9xchI2FVKuQ+/C9I0jMgz8sVOpyEkSLJRS+asKtr8MpJIdSa9w8h8xpoVUGUhG6aQssVRyv1rLOgnjcXmqOPHvT7jFQK7THHQHvMMeOOkRAyfhUmHbY2GFEhPRInl1aOuL111WdwbdsG3fELIS8Mr+/kL/Ech57n/gt/VxcUZaWQmYwDvl8t0iFkvoAPTbYmAOEPZEpEstxgyypJSmpMj6tCNlyowQ9t+7CkclZMj00OdcWUK3DJpEvGNS/H8sEHsK7+HGnnngvt/MGvQwiJJkraklGxPvNn+PbtgubEs6D89XlRO45RbYTNa4PNa4PD50i6yabDKcxQQ8oycHoDaLO4kZMq/FKMZlszPqveA7BOlBqyIGHjI5FMSDxRTZ0qdAgJJ0dTiF7Xxv6WBPFAKVUCnBqc14hyuTeuHp4li9DyxJF6XP6S/oTF0QgHAPDcjufQ6ezEaWWn4bCMw6J2HELEJLQya2+HbVTbp517DnTHHw/19OnjPjbv9yPjqqvg+OYbqOfOPeT71Z3BFW1iS9o22hoR4APQyDQwqo0jvyHBpV98MdRz50JZOfJDg0jKUOSj2V+DvT3Ctn9qX74c0rQ0pJ55JqQGg6CxCE3KSiFlx572cm77AfY1ayDPz6ekLREErfkjo2Jd+QU6P94N5zdfRfU4KYqU/p5y4d7UJTqZhEVhRrCSNV762n5a9yke2/tHKE0fU2sEQkbBU1cH62erhQ4j7pWnlQIAutxNAkdywB2z/wbb3j8j4CiD+rpLsG/OXHiqqoQOK6mEKm0brA0IcIGI7de6ahVcO3eB9/nCfu/q+tV4fc/r6HB0RCweQsSu0hRcrbC3sxM/dv2IRmvjsNunnHIKMi69BPKionEfm5XLkX7hBch/+imwcvmA71mcPnTbPQCCA4DFpL+frb5YNA8kD07Ycl4vnFu3Rv2YhfoiAECLIzKtPsYiYLOh7/U30LXiUQRs8XFPmcj0S06E8eabkXL6aUKHQkSKkrZkVDRZbugLnFDOmB31Y5Wkjq0SJxn0t0jojI8TbH+PR68B5SZaLkzIcNy7d6Pu1NPQettt8LW0CB1OXJueVQ4AcPKtMR0SMpzgcBoGpUoenMUCzm6HLJdaXkRStiYbCokCPs6HVnt407U5rxfOLVvg/P77Q15vufkW1J95Jnwd4Sdel05aiksnX4qJGRPDfi8hZHCha8YO6Xu48NML8V71ewJHFFTdFaz8DQ5LkwkcTWwl+xCy4XBuN5qvuRYNl1wK+7p1UT3WYZllAACzrzmqxxkOI5Eg+567kXrOOVCUiO/vO2Rf3z6c/dHZ+NvGv41rP5o5c5Bx+WUxr9omJISStmRkAT/SspuQe6QZ6vnRW6IYEuqzJMqk7f6lWtVxUmkb6jsY7PFIlbaEDEdRWQnllClQz5gBiKSKZazmFVSC51mA9aDJ0i50OACAqo7g525OvhETtm1FyScfg1VTH+9IkrASFOqD/SrD7WtrfuttNFx0Mboef3zA64E+MzRz50JeVDSmJPuSkiVYNnMZsjRZYb+XEDK4TK0c6Ro5OI8BaYrMUS1N5hwOODZugrd57A89fW1tsK9bB847eP9KsfazBcSdtGVkMkhSUsBIpWBk0U3Wz8mdAADwsh3wBSK3oiQcrFqN1DPPRPZf7hXk+PGixlyD3b27sa9vn9ChEDIulLQlIzM3AJwPkKoAfV7UDxe6mBBl0ra/0tYhcCQAz/MDpqlTpS0hw2NYFvlPPYn8556FLCdH6HDiWrZeC8afDgDY2Cz8hOUXd76IFXsvhyz9a1SYdGBkMihKSoQOKymNta+tZt5cSDIyIMse+G9LZjKi4LlnUbpqpWiW/BIS7xiGQblRC1/fkbih/BVcN/26Ed/TesedaLzkElhXfjrm41o+/AhNV12NlmXLBv0+JW1FmrSVSJDzjwdQ9MYb0Bx5ZFSPNSOnFDwnBcP6sb1NfPey8WRO1hysOHYFrpxy5bj3xblccGzaDOeWLRGIjJDwUNKWjCjQ+BM4PwNklgFs9H9lxjpdOhmELiLjoadth7MDLr8LPM9CyhlQkE4VZ4SMRKLTUeJoFBiGgYYJJt+2dwhfAVFlroKdawfD+GhVQZT1P5gNcwidvLQU5d9+g5zlf49YLPv69mFL+xaY3eaI7ZMQElSZpQPAYF/n6IaRqaZNgzQnGwwrGfMxGZkMUoMB2vm/GvT7oaRtqQg/5zNUGchQZogyaQsEfzeUlRX9X/t7euCpjfy9plwqRarrDLiaz0eXeey/y2Pl/vlnWFd9Bs4hfAGQ0DJUGfh1wa8xP2/+uPdlfu89NC5diu4nn4xAZISEh5K2ZETm/32Ave9ko33D2KcuhiNUhdNoa4SPC3+oSCIr2T8UodPmgdUt7M9+oJ9tBkoNKZCwlIgiZLR4rxfdTz2FjvsfEDqUuFWsngVv3xz4XZlCh4JlM5dB2X0dfNbpOOydZ9D5yCPwdXYKHVZSGutqGoZhBn0gwo9j+embe97EZZ9dhpd+fmnM+yCEDK5i/wqtfe3BpO1I/cvTl16M8i+/RMbll435mBmXXYqydWuROsTAoFD7sTKD+JK2jx33GNaesxalqaVChyI4X3s7Gi68CI1LL4G3KfIDUaem/AZ+21S09sW+Z3/f66+j5cYb0fnwv2N+7GSmnjkTUqMR0uxsoUMhIkRJWzIiX1uw36DEGJsPKZPGBJVUBT/nR4tNXMN89EoZjDoFAOGHkR3cz7bCJL6LW0LGw7VzJ7oeWYHel1+Ge6/wlaTx6NjcU+Bp/y0cVuGrfuSMHl1d+ZC6UyD79AP0PPU04PcLHVZSikQLpFAFUcBmw97Zc1B/3vng3O6w9xOq9g09LCaERE4oafuj6wUc9/Zx+LLpy2G3ZySRqUpkWBaMXH7I625fAM19LgDibI9ADmCVSjBSKSCVgo/Cub6/3Z0AKydlefmQFRRAd9yvY37seBLgAnhux3P4ouGLiBSBKSoqULZuLXL+HrnVPoSMVmxKJ0lCy/rvKmQ218TseCzDokhfhN29u1FnqUNRSlHMjh0PyoxadNo8qOly4PCCNMHiGNDPli5uCQmLesYMpF92GZQTJ0BRUS50OHHpQA9v4dvBVO2PIUsng+m22+CtraVqiigp1BeCAQOzx4xedy/Slemjfm/A7kDj0qXw7NuH8g0b4N61C7zTCX9nJ1ilMuxYxNzjkZBoCz3wd/rt8Dk7w3pQw/N82K2G/N3dkGYOvXKjpssOngdSVDJkag9N6iazsfx5JjNJaioKnv8veJ8vKjMI8jIYSDR7sK2nGsDUiO9/OJlX/Q4Zvxt/D9dE1+Zow4ptKyBn5dh8weZx74/+/RAhUaUtGRVpXimkebFbThNK1Iqxr62QT2cPVm+pBxCstC0z0hAyQsJluuVmpJx8Ml3oDaHMqAUYH+qtVXD5PILFsbd3L57+6T+QanehMCcD6RdegKy7/0x/b1GikqqQow3eJIdbbSvRahCwWsH7fHDv3AH1nDko+fRT5Dxwf9hxWL1WdLu6AQBF+qKw308IGV6qWg6jTgHOYwAwun/vlo8/Qe0pp6LzHw+GdayAxYKqBcei9tTTELAPfv188BAysX2+P7D5ASx+ZzHerXpX6FDihtRgGJCw9VRVIWAbXf/lkajVZqgLXkSr5I2I7C9cQ7UTEpPQ501hSiEk4+iTPRjO643o/ggZCVXakrg01unSyaB0f1/basHbI4Qqbak9AiHjxft8CNhskKaPvqow2eWlqaEtexCM1Ib1jdNxXOlMQeLY3rkd63vehix1AsqMSwSJQWxumHED5KwcpSnhPwzOfeifkGZlQ2YyAgAUJcVASfiVsqHrC6PKCK2cznGERENllg4b2oP/VkPFAMPiOXj27QOjCq9y3rVjJ8DzABeARDv4v+fQqg4x9rOttdSi1dEKlqF6rcG4fvwRjVf+DorychQ89yxYlWpc+zuiYCICX2eB8xrRaXPAqNNEKNKh8TwPb10d5MXFok/YAgetpNFHbiWNt7kZzdddj0BvL8q+Xkd/ziRmKGlLhuVe+y76nnkIqmlTkXrrUzE77linSyeD0ERbISttbV4bulzBITzSgAkF6WrBYiEk0bn37EHrzbdAajAg/7/P0UXefhKWgYLPgscfwJ6uNhwn0GyUA/27jZjmbIe/JwvSjAxhghGJ3xT/ZszvVU2bFpEY+m/oUqk1AiHRUm7U4bvGA5W2Iy3T1xx5JHL/82jY/861Rx+F8m+/gb+tbcht+oeQibDl14O/ehB1ljoU6AuEDiUuMXL5/qQ/B97nA8aZtM3U6JDaextaLW409nhikrT17KtC3amnQlFZieL33gXDijtBH8ohRLL9kdRohLeuDrzXC19LC+R5eRHbNyHDoaQtGZZr01qYt/XBZ92K1Ftjd9w5WXPw/OLnRdlnLnQx2djjhC/AQSaJ/Um3vzWCT4eSjExIBYiBkGTBKpXwNjTA390Nf2srZLm5QocUN2Yrb8aqnb1QVEwULIZQ8i7gzkTlw3ei6l4Lit97F8qJwsVEhscHArB9+SXM7/wPqsmTkXntNWEPMYpGFQ4hZKDKLC04bybAM7D5bOh2dcOgNgy5vTQjA/rjjx/TsaRpaZCmDT0L4uD2CGKTpkxDmlK4ORnxTjlxIgpfeRnyggKw6sgUqpQatWi1uFHTZcesouivsvLs2wdGJoMsJ0f0CVsAqDUHH8hHMpfAyuXIf+ZpKEpLITUM/TlGSKTRv2gyLGW6D5mTbNDPrYzpcdOUaZidNRuZqqEHCiSrLL0SarkEfo5HQ49TkBh63D2QMkpwXgPKTdTPlpDxkBcVIffRFSj59BNK2P5ChTF4IyNkO5hQKxiVIxXS1BRAJoO8pESweMTA5Xfhy8Yv8cae8Pv9WVevRtXR89Hy+z/AsW4duh9/HNXHLYR19eqw9kNDyAiJvnKTDuClYPzB1QtCtT3zBzjUdTsAiDNpS0amnDBhQMLWtWMneI4b8/6CM0o47O7oikB0I0s5+SSUb9gA0513xuR48a7eWg8g8ud4zbx5lLAlMUdJWzIslbIDhik2pJ44tqfeJHwMw/QPIxMqkbEgfwEWKJ6Gq/liVNDFLSHjpluwYNgKILESuh2M0+dEh7MdAKDQlqBi9Weo3LIZrEIhSDxi4fQ5ccNXN2D5puVw+92jfp919Wq03HAjAn19A173d3Sg5YYbw0rcUtKWkOgr3/8Z73MHizBGk7T1d3XB/M476HvzrVEdo335crQsWwbXzl1DbtPY64QvwEMpY5GbOr6l74lmc9tmPLTlIXzd/LXQoSQMy4cfov7cc9Gx/H7wPD+mfXiV26CtvAdruv8d4eiGJtFqIM+j4gCz24xedy8AGjRKkgMlbcnwuquC/82siPmhN7RuwMNbH8a3Ld/G/NhCK4uDvrY1XU6AU6KchpARElGu7dvh2r5d6DDiQnYaoMp7EXukf4af88f8+KFKDM6vQUWmCUCwnQWJrnRlOmZnzcaJJSfC6R/dihI+EEDH8vuDfQcP+WbwtY7l94MPBEbcly/gQ5OtCQAlbQmJJp1ShtxUFThvcBjZaGZVeGpq0HbXn9Hz9NMjbssHArB+9DGsn64EZx/6mjlUBFGSqQXLiquv/Ma2jXjp55ewrmmd0KEklkAg+Ds1xmrb8owsMKwP1kBrhAM71FgTy8kqdG2XrcmGWhb5uSzm999H6513wt/dHfF9EzIY6mlLhsQ7LPA2tkKuBRgBkrbftXyHl35+Cb6AD0fnHh3z4wup1BBsWC9U0pbjeFR1BI9N7REIiRzLJ5+g9aabIS8qQvEH74OVy4UOSVCHZRkg0dQArA8/d9ZjalZZTI8fqvrivAaU59ADqlhhGAbPL34+rPc4v98Kf3v70BvwPPzt7XB+vxWauXOG3VeTrQkBPgC1VA2T2hRWHISQ8FSYtOhsPzCMbCTKyVOgnjMHqmnTwPv9YKTD3K6yLPKffgr2tWuhnjVzyM3EPISMVhWEL+WUUyDLyYFqxowx94edlz8R2AkEJN2wuJ1IUUZvqHPTVVeBkUhhXPZHKMrLo3acRBEaMBut3/ne51+AZ98+aI85BvpFi6JyDEIORklbMiTPtnWoW2mERMmj4r7YT9Kemz0XXs6LudlzY35soYXaI9QI0B7Bx/lw5gfngDcqIOs4G4Xp0bvIIERstEcfDUlmRvBm1OsFRJ601SrkkASM4NgWbGreHfOkbejCnvMYccJr/0TLugwYlv2JlhfGIX/X6PoCjma7g5MYw02yJ4SMX0WWDl83jj5pK9FqUPjyS6PaN8MwUE2dCtXUqcNuJ+YhZJS0HRv1rFn9/5/nebh//hmqSZNG/f7KzBwgoAQjcWNz0z4cXz49ClECAbMZjm+/AziO+tnuF+3f+ZTfno5ATy/kRUVR2T8hv0RJWzIkf2sDWBkPeYYwS0Xn583H/Lz5ghxbaAf6PDrA83xMbyqbbc2otVZBqpWjwJ8GqYS6qBASKZKUFJR+9BEkqalChxI39JJcmNGCnV3VMT926MJe5khDyk+fwvojD9Ptt8c8DrHyc35YPBZkqEZ+MDzawR+j2c7ms0En01ESg5AYqDDqEPAG/122Odrg9DmjsmR5ODUiTdr6OT8abA0AgJIUGrA5FjzPo/Ohh9D7/AvI+ec/kXLSklG9j2VZKJAND+qwtW1v1JK2bEoKit97D86t39MD5/36k7b66JzjMy65JCr7JWQolI0hQ9Ke83tU/Pgz8t9eJXQoolOYoYaEZWD3+NFp88T02Ca1CScb74a77XRUmPQxPTYhYkAJ24FyNAUAhJkqXmsOHtPnMyHt0cdhuv02mgocI5vaNmHOq3Nw9RdXj2p79ayZkGZlAUM9xGQYSLOyhl0iHXJa2Wn47rzvcM8R94QTMiFkDCqzdEBAAwSCCdNQv8mR8IEAvM3NQ37fU1ODrv88BvfefcPvh+dR0+UAIL6kbYu9BX7OD5VUBZOGWsGMFe9yBVvwdI9uxUdIhjwPALC3pyYaYQEIVpsrKyuQfv75UTtGoqHqcpJsKGlLhsWwLCTpWYId3+61Y0fXDpjdZsFiEIJCKkHB/rYEsW6RoJap4bVXwG89HOVG6mdLSLQE7Ha0//U+mN99T+hQBFWeVgoA6HQ3xfS4AS6ABmuwAkkjz0f28ccifenSmMYgZlmaLPg4H+ot9eD4kQe9MBIJTHfsr4L+ZeJ2/9emO24HI5GM6vgMw0AppaFzhERbqUELhgFc7Sdh+RGPIF+XP+J7fK2t2DdnLmpPOhm8f/AhldZPV6L78cfR9eijw+6r3eqG3eOHhGVQlKEZ08+QqGrNwRZARfoisAzd9o8FwzAw3XUXCl58MewKy3xdIQCgyV4f+cDIoHieR0lKCfK0eVFN2vIcB/fevfC1tUXtGISE0Kc3iWvXrbkO5396Pta3rhc6lJgLDSOrFmAYWVWHDUBweAQhJDos772PvtdeQ+c//oGA3SF0OIKZnhUcdOnkW2M6AbnV3go/7wPPSVGeOXISgURWrjYXUlYKd8CNdscwA8YOol+0CLkrHoHUNLBiTGoyIXfFIzQQhJA4pJJLUJiuht86HRnsNOjkIxcESLOyAJYFWBa+IaptlZMnQXvccdD/5jfD7ivUz7YwXQ25VFy3vnXWYMVhUUqRsIEkOIZloZl3YMYK7/PBU1s74vsmZgSHgvX5hq4YHw/Hhg3ofOghuHbtisr+ExHDMPjPcf/ByjNWwqCO3sqptj//GXWnngbz/96N2jEICRHXmYuMGu/zonHxVLRfNB+B7uicaEYj9IQsdNEhJv19bWNcafv23rdR7fwGYJ0op6QtIVGTdt650C1ahNx/PwyJVlzVPwc7oqASPM+AZ51os3fH7LgcOBQqjoLffhh+3bkHzm0/gPPEth2NmElZKQr3VyGF0xpDv2gRytZ8gYKXXkLOQw+h4KWXULbmi1EnbDudnTj5vZNx41c3xvQhASFiVm4KJmr37i8KGAnDsih+711Ubtk85LAf3bHHIv/xx0bsMRpK2paKrDUCQMvEo4Fzu9H8+z+g/tzz4N67d9htZ+dWAgA8TDv8gUDEY7G8/z56nvsvrB9+FPF9k+GppkwBo1aDd7uEDoWIACVtyaB8+7bB0eCDeWsX2BSjYHH0J20F6HUotFLDgWFkscLzPP71/cNgTa9BrrCiUGTLyAiJJUYqRd6jK6A58kihQxFUjl4Pxp8GAFjfuDtmxy3UFyLLczk8zefiqHceR8P558Pb0BCz45MD5/hay8gVSwdjJBJo5s5ByklLoJk7Z9QtEYDg9US9tR7V5uqYDvkkRMwqTTqAdWNt8xd4Y88bo3qPPC8vrH/bQ6kW6RAy4MD9Ew0hiyCOQ8BiAe/xwN85fI/bWXll4HkWDOvDzs7It4DSnXAC9EuWQHfC4ojvO1H5Ar6YHCfltNNQuXkTjDfdFJPjEXGTCh0AiU8Sbwey5/QhIM8GI5MLFgclbQ9cbMZCj7sHDr8dPM+gUF8ImYSe6xASKwG7HYxUClYprj6bDMNAzeTAiV782L4PZ076VcyOXdVph8rvATd5GpTtzVAUUzVSLAlxjp+UMQnPLnoW3oA3ZsckROwqsnRgJE5scz+KHVtkOKviLEjYsSdk7d99B9W0aZBoR07E9idtDeJK2vI8T5W2UcCq1ch/+il4a2uhmj592G3VMgWkAQMC0g5savoZ07OLIhqL7thjoTv22IjuM9H9ef2fsb5lPW6efTNOLj05ascR27U6ERZlZMigJO5mpJa4kLFoqqBxhC4y6i31CHCRX1YSz0IXl6EBCrEQurjjfemoMKXH5JiEEMD+zTeoXXISup98SuhQBGFUBnvKVvVFb8LyLzVb29HYa4dTpkLOU0+h7IvPwchkMTs+ESZpq5VrMS97Hn6VF7uHA4SIXYVJC96XCriKsaR4CVz+kZcU8xyHzn8/goall8Df19f/ur+nB01X/g5VRxw54PWh1HSJs9K2190Lq9cKBsFCDBI5Er1+QMLW39cHf/fg7Z1SpLkAgJ1d1bEITfTqLHXo8/RBI6PVoiR5UNKWDK5rf4+ezApBw8jR5EDOyuHlvGh1tAoaS6ylqGXI1CoAALUxGkYWunHmPAaUi+zilhAh8R4P/B0dsH3+OXiv+CoAQ8m7NmdjzI551sdnQFNxN1L1vcjQCLeiRMxCS3bFuJqGEDEpydRCykpgq78K1065A1r5yNeYDMvC9tlncG7aBPeOHf2v+1rbIC8ogKK8HNK0tGH3YXZ60W0PnlPF1tM21HYmV5sLhUQhcDTJy9fRiYaLLkLj5VcgYLEc8v1sdTBhXh/B+Sw8z8Py0UejemghNi8sfgFvnfQWZmXNivqxnFu3omHpJWi55ZaoH4uIG7VHIINybt8BiVUCeVophOz4JmElKEwpRFVfFeosdcjXiWvCd6lBg267B9WddkzNS4368UIXeJzXgHLjyNN9CSGRoVu4EDn/eAC6xYvByMWXQJxsLMdXPYDF3xKT41m9Vrj9LoAJoCwtj3qbCiQ00bzH3QOLx4IURUrUj/nsT8/CpDFhYcFCqGXqqB+PEALIpSyKMjWo7rRjb7sN2SmqUb0v44rLwfM8FJUT+l9TTZmM0lUrEbBaR3x/qDVCdooSWoW4bnu1Mi1OLjkZqcpUoUNJarzbhYDFAoZh4e/thSRl4Hns17knYfPqbKhyKiN2TPfOXWi9+RawOh0q1n9Hq4QOopapMTFjYmwOxjBwbtoESUYGeJ6na0kSNVRpSwbV/G4baj81wW0R/slssV7EfW2NoWFkMaq0NYcqbY2oMImrIoEQoaWceqpoe2QdmT8Jns5FcHecCI7jo348vVyPc42vwFFzM/70+nLUnXEmPLXiO8cITSPTwKQ2AYjNOd7hc+DRHx7Fnd/eCR8Xm2ElhJCgSlOwGGBPex+6XYMvJf+l1DPPRNpZZ0FmOnQoskSvH/H9Yh5CNjFjIpbPX45bZlMVYDTJCwtR8N//ovC1Vwftiz8nvwKcuwD1XVzEjsk57FBMnAjNkUdSwlZAysmTkXXvvSh86UWhQyFJjpK25BCcrQ9SnRSMlIdi+nyhw0FJqniXT4b62tZ0OmJyvGpzsNKW8RtRmEG9gAgRiu3Lr+Dr7BQ6jJiZYDQB5oVwWyahxTxyr8NIqO50QeVUI7WzGe5duyBNH36ZLYmOWPa1rbfWAwDSlekxqeolhBxQbtJCov0ZT9SfjWVrl41pH5zTCZ4f/YO9UNK2VGRDyEhsKSsqIM/L6//a29QE3hd8MFhiCN5Pddo8sLoj87BQM28eSt57Fzn/fDAi+0sWXzR8gb9u+CvWNa2LyfFYuRxp554DRVkZVdmSqKKkLTkEq0tDyTc7Ubl9J9jUTKHDoUpbxKbS1ulzotPVDgDI1xZCLqWPB0KE0LliBZqvvRYdy+8XOpSYkUpYFGcGb2yqY7SyoLrTDpdUDvMzryPviScgSU2NyXHJQP1J2wj2+xsKTVInRDiVJh14Xyp4cGFd0/u7u2Fbswa+jk603XMvahYthu3Lr0b13mqRDiEDgCZbE/xcbAYZkwNcO3ai/qyz0XrHneA5DnqlDOnGnVCYPsTXdTsjeixWhO20hrOxbSPe3vc2tndtFzoUQiKKsjJkSIw0Pno/CTFdOl6U7n86W9/jgD8QuWU1g2mwNgAAOL8GlcasqB6LEDI0/aJFYBQKyPPzwXPR/XcfT/INPki0P+Obxi1RP9bfNixHu/w5MKpWlE6fAN2vj436Mcng+s/x5uif42v3ryYJDUAjhMROuUkHzpsJnmdg9pjR6+4d1fta/rgMzdddD/vX6+DcuBG+piZIUkdXKS/W9gguvwsnvnsiZr86GxbPocOxSPT4e7oRsNvhbWgA53QCAORp30Oevh4bW7aPf//d3aK6NgyHEA9mOY8Hti+/RPfTz8TsmER84iMrR8gwCvXBqZt9nj70ufuQphTPEtacFBVUMglcvgAae50oieLyrgFDyApoCBkhQlFOnIiyL9dAmpEhdCgxFVBvhTr/FXzXOQ/Akqge66umdZCmtELmPAoGnfC928VsYcFCTDVM7V9VE02h9ghUaUtI7BVlqCFnleB9qWDkfaiz1CFdmT7i+5TTpsHX2gr3rl3I/vvfwLndUE2bNuL7XN5Af7sdsSVt2xxtUElVUEgU1AomxnQLFiD/6aegmjYdEm2w+KZMfSS2tBjhSTeMe//N1/8e3pZm5D70L2jmzhn3/pJJf9I2BtcTIbzHg+brrgd4Hqm/PR1Sw/j/jgn5pbiotH388cdRVFQEpVKJuXPnYvPmzUNu++yzz2L+/PlIS0tDWloaFi5cOOz2JHxt589H46IpcH4YH0+M1DI1sjXZkLNytDnahA4npliW6e+FVNMV3b62oRMd5zGinIaQESIosSVsAWCSYQIC7mx4XKlRPY7b70aXK3guuai2EZYPPkDAQpVIQjGoDZiUMQlqmTrqx6L2CIQIRyphUWrUgvMGkxqjWUFnXb0a1o8/gq+lBeY33kTT765Cx/L7YVuzZsT31nTZwfNAqlqGDI24lpGXpJRg4/kb8d6p7wkdiihpjzqqP2ELAEsUc+HpPBGWvvGtZAzYHfDU1CDQ1Q15UeF4w0wqNq8NXa4uAEBRSlHMjivR66FbuBCpZ53V38eYkEgTPGn75ptvYtmyZbjnnnuwbds2TJs2DYsXL0bnEANY1q5di/POOw9fffUVNmzYgPz8fCxatAgtLS0xjjx5Oaq74Wj0g+fip6H2a0tew+YLNuOwjMOEDiXmQsMTQku8oqU/aes1oMJElbaExANfRyea//hHuHbtEjqUqDuh5Bg4626AtW1RVI/TYG0ADx6MT4kTNnyKtttuh79ndMt0SeLyc/7+NkCUtCVEGBUmLTjP6JK21tWr0XLDjfC3dwx43d/RgZYbboR19eph3x+aB1Fm0IpySBDLsMhUCT+bROysK1di2l1X4eTab8fds1+i1aD8u29R+NqrkJlMEYowOYQ+TwwqA3Ty2N7H5v3nUWTf91fIcnJielwiHoInbR9++GFceeWVuPTSS3HYYYfhqaeeglqtxvPPPz/o9q+++iquvfZaTJ8+HRMmTMBzzz0HjuOwZhRPXMko+NzImdODrNlmKGfHT4+/TFUmJKxE6DAEEUraRnsYmVaagYDHCHhNKMrQjPwGQkjUdT3yCGwrV6Hjr/eFNTE7EYVWFfQ6vOh1eKN2nNDAK6kjE13zT4DmyCMgLyyI2vHIyD6r/wx/2/g3bO3YGrVjtNhb4ON8UEqUyNZkR+04hJChVZh04LxGAMMnbflAIDiMc7Dz3v7XOpbfDz4QGHIfYu1nS+KLp7oGjN+PCeZaNDt2wunzjGt/rFwO9YwZEYoueYQ+T6hnPUlGgiZtvV4vtm7dioULF/a/xrIsFi5ciA0bNoxqH06nEz6fD+npg/dE8ng8sFqtA/5HhtFbC3WmB2mHySDJKRU6GoIDF5vRTtoeZ7wSztplKFAdDrlU8Oc5hBAAxmV/hOaoo5D1l3uTvlJILZciN1UFgMOejp6oHSc08MqBLMj+eBMKnn8ejEScDwXjxbqmdXhz75vY1rEtascI3dAVpRSBZegcR4gQKky6UVXaOr/fCn97+9A74nn429vh/H7oBz1iTtouW7sMy9YuQ72lXuhQRC/z+uuQ8/DDeOrcKigKn8bmpiqhQ0pKB5/jheLr6ADnGV9SnpDBCHrV2t3djUAgANMvyvtNJhPahztRH+TWW29FTk7OgMTvwe6//36kpKT0/y8/P3/ccSe17n3B/2aWA3GUIGizt+GWdbfg+jXXCx1KzJUa9/e07bRHtdJuX4cNAKg1AiFxRGowoOC/z0E5YYLQocSEwvQxtJV34+29b0XtGNXm/UMXPQaUG+nzLh4cW3AsLp98OWaaZkbtGEIMKCGEDFR5UKVti70FnsDgCQ5/V9eo9jfcdqGkbanIkrYBLoCvm7/G5w2fQ8LQA0mhMQyDlBN/AzmCKzy2tu6FbwxtHa2rVqHh4qWwfPxJpENMCkL3rG+46GJUH7MAzu+/F+T4JLkldKnBAw88gDfeeAPvvfcelErloNvcfvvtsFgs/f9ramqKcZSJxbXlO9hbFfAr4qu5uUwiw8r6lfi6+eshL/CSVVGGBgwDWN1+dNmj87P7OB+q9idty0V2cUtIIgmYzUndJiFDrQXD+vtbGETDvt4aAIDBlQqjTlzDaeLV8YXH48aZN2KGKXpLPoW+oSOEAHlpKihZPfiACjz4IStBRzuBfajt/AEO9T3BAb5lBnFd17Y6WuEJeCBn5cjRUo/NeJEuywN4HmmvvYWak06Gc2t47YBsq1fDuXkzPHv3RinCxBa6bhTqHC/NygJYFt76ekGOT5KboEnbzMxMSCQSdHQMbDDf0dGBrKzhpys+9NBDeOCBB7B69WpMnTp1yO0UCgX0ev2A/5Gh9a7chKavM2De4xc6lAEylBm4adZNePTXj4JB/FQAx4JSJkF+WnCqdk2nIyrHeG33a1hlvwJywyqUU6UtIXGp7+23UX38Itg+/1zoUKKmLC3Yi6zDFZ0HrBzPocXeCAD410cfomreEXDv3ReVY5H4UmsJVlhT0pYQ4bAsg/KDWyQM8YBOPWtmMAky1Ko/hoE0KwvqWYNX5zf0OuEL8FDJJPvb7ohH6AFVYUqhaOeBxKM8bSFYHkhpaAbvcsG1Y0dY7zcsWwbT7bdBf/JJUYowcfk4H5qswetGoXraGm/6Eyo2b0L6BRcIcnyS3ARN2srlcsycOXPAELHQULEjjjhiyPc9+OCDuO+++7Bq1SrMmjUrFqGKhsyQAUUGC8Wk6UKHMgDDMFg6aSkW5C+AXCK+yqho97WtNdeCY9wAL0G5SVwVCYQkCl9zCzibDZYPPhQ6lKiZZqoEADj41qjsv93RDh/vgcrFQueyg7NaIculSqR40OXswqa2TbB7o3OeK0stQ0VaBUpTqV8/IUKqMOkQGGEYGSORwHTH7fu/+EXidv/XpjtuH7Ifeag1QolBA5YVV7EHtYKJT5UZpeBYBv86JRW5K1Yg45JLwnq/PC8P6UuXQllREZ0AE1iTrQl+3g+VVAWT2jTyG6JAZjJBoqV7aBIdUqEDWLZsGZYuXYpZs2Zhzpw5eOSRR+BwOHDppZcCAC6++GLk5ubi/vvvBwD84x//wN13343XXnsNRUVF/b1vtVottPQPZdyMj70Ho9BBkEOUGjT4cs+Bi9BIu3zin/DKZ4VgoURxpiYqxyCEjE/mNVdDnp+HlNNPFzqUqDmyYAKwHeBZO9rtPcjSZkR0/6GbWTtjwKbHHseFeSxdZMeJyz67DPXWejy76FnMy54X8f3fe+S9Ed8nISR8FSYtuLr9lbbmoVvh6BctAlY8go7l9w8YSiY1mWC64/bg94cg5iFk1AomPs3KmYBX6wCbvBPa4w/M4uH9fvh7eyEz0h34WB38O5/sQ3uJOAmetD3nnHPQ1dWFu+++G+3t7Zg+fTpWrVrVP5yssbERLHugIPjJJ5+E1+vFmWeeOWA/99xzD+69995Yhk5irMfVg22d28CCxXGFxwkdTkyVGqJbadvQ4wHnNaHIoIFCSkupCIlHrFKJ1F+c+5JNfmoq4E8FpGZsbNqD0yYeFdH9h5bIc14DSnPSoCinm6R4UZxSjHprPeosdVFJ2hJC4kOFSQe/9XBkKKbjvqPPGHZb/aJF0B13HJzfb4W/qwtSgwHqWTOHrLANqQklbUXWzxagpG28mptfAZ5nwLBu7O1uxURjHjivF61/+hPce/eh8P9eGTRxy3McOv/5EDRHHQXNvLlgpIKnb+KOSW3C2RVnI0szfHvNaLN99RUs738A7fyjk/56ncRWXPyrv/7663H99dcP+r21a9cO+LqemjtHj98LSGRD948S2E9dP2HZ2mWYmD5RdEnbUKVAbVd0etru6whe3FbQJHVCEgLPcbB+uhL6xYvAyGRChxMxDMNAzWTDCTO2t++LeNK2pu9A0raC+nfHleKUYnzV9BVqzbUR37fL74JCogDLJPT8XUKSQoVJB96vR3MHA4Yf+fzFSCTQzJ0T1jGqu6jSVqjenmRwOoUKkkAmOGkXNjbuDiZtLRa4d++Bv7MTnqqqQZO2ru0/oveFF2B+5x1UfPetAJHHv8mZkzE5c7LQYcBbUwPbZ5+BD/gpaUsiiq5eSb/eB29EzZxKdN8Unx8yoSfG9dZ6cDwncDSxFaq0bTG74PRGdkjc3t69eKfhIchSN1E/W0ISRPPv/4DWm25C70svCR1KxBkU+QCAfb3VEd/3xJQj4ek+Bld+2wnpS8/C19EZ8WOQsQmd44caTDQe//nhP5j76ly8uPPFiO+bEBKe7BQldAop/ByPuu7IFyPwPH+g0lZkSds+dx/6PH0AgEJ9ocDRkF/SS4I99Hd0BQegSg0GFLz4AvKfeQbaowZ/SC3R65B69tlIOf00MHLxzXVJJNoFC2D44x+ReeWVQodCkgwlbUk/T1UNvDYJOG98JkRzdbmQslK4/C50ODqEDiem0jRypGuCJ+pIV9vu7N6JZv/XkOp2ie7ilpBEpTvuODBqNViNBnwgAMemzbB8/AkcmzaDDwSEDm9civYPT2l1NEZ83zp+MriO43HC3l3oefJJ8D5vxI9BxqY/aTvEYKLxaLA2wB1wQyuncxwhQmMYBuUmLaQpW3H/lr/ip66fIrr/NosbDm8AEpZBYYa45jTUW+sBANmabKhlamGDIYfIUhcAAGot9f2vyfPyoJk3t//rgNUKzuXqv7Zz79kL/ZIlMN16a6zDTQg8z2N3z244fU6hQ4GirAyZV/0OqmnThA6FJJm4aI9A4oNhFgO9shvSE44VOpRByVgZCnQFqLXUos5Sh2xtttAhxVSpQYNehxc1XXZMzk2J2H4P7vFIy4UJSQwpp58GzdFHwbV9O6qPWzhwSEtW1ohDWuLZJEM51vUCFn9LxPdd1WGHhOPw/QkX4AS9G7Lc3Igfg4xNKGnb6eyEw+eARha5ZMuKY1egxd6CFHnkzp2EkLGrzNLh58BubO3diR+7pmKqYWrE9h0aQlaYoYZcKq76pFB7GepnG5/KU0uxxwV0ugZ/KO3v7kbjFVcCLINATy/8HQeKlBL92i5aul3dOPvjsyFlpNh8wWbIJMnTMoyQEHGdyciwpK46aExeKKYdKXQoQwpdhIQSjWISqoINLfmKlL09wT9L3mtAcaa4KhIISVQMw8C1fTtabrhxQMIWAPwdHWi54UZYV68WKLrxmZM3AQDgY7vh9nkitt8uZxc2t2+GV+GF77SzkH3vvTRlOI7o5XpkqjIBAPUHVSFFgpSVolBfiFRlakT3SwgZm3KjDn7rFOThFBxuPDyi+66mIWTUzzZOTc0qBwDYudZBv+9taoKnpgaen3cPSNgCiX9tFy1dri6kKdKQo82Ji4Qt5/XCuW0bbL+Yy0TIeFDSlgS5LYB9/41/RrmwsQwjdBESjeWT8S7U1zY0XCFSavY/lTcqC6CUDT+NlxASH/hAAB3L7wd4fpBvBl/rWH5/QrZKmJqVBz6gBMPw2NKyL2L7/bblW/wUeADKnDdQTkMX45KYH8wSIiaVWTr4bdPg6lwY8QFCoh5Ctr8nOFXaxqd5+x9Kc4wbvc5D292ppk6FRDvE722CX9tFy2EZh+Hrc7/G2ye/LXQoAADnxo1oOP+C4DU6IRFCSVsCAPD+9B1696nhdGQDSr3Q4QwpmoNK4l0oaVvTGbmetp6AB92eNgBAWSo9lSckUTi/33pIhe0APA9/ezuc32+NXVARIpNKIOezAADft+6J2H4DHAfOm47CVjVK5b6I7ZdETrE+8n1t17eux23f3IYPaz6M2D4JIeMTasfV0OuE2xfZBFS1SIeQAcCZ5WfiyilXYrpxutChkEEUpRsha7kH9n33oKX30MHSzu+3ItDXN/QOEvjaLtripYez6vDDITUYoKysBOeluQkkMihpSwAAzg3r0LEtFV0/xccH3lCiOagk3oUuPuu6HQhwg1TXjUGDtQEADz6gxCQT9XYkJFH4u7oiul28mSi/AI6666D0TYrYPg9PXwxH9c249+N9cJy4EK5duyK2bxIZ0TjHb+/cjk9qP8HWDrrJJSReZGrlSFPLAGkv3vn5i4gOEaoRcdL22IJj8YcZf0BFWoXQoZAhlGXkAmBQM8jKyWS/thMDiU6Hsq/XIe8/j4KVy4UOhyQJStoSAIDUlAttZQo00yYKHcqwivRFAIJNx61eq7DBxFhOqgoKKQtvgENzX2QubkM3xpzHiIosWi5MSKKQGgwR3S7ezDAdDs6dj8aeyFVgVXXYoPZ74FOqAKkUitLSiO2bREY0krahfYWqeAkhwmMYBhUmHdSFT+Ef22/Cvr7ItMLpc3jR4whWt5WKsKctiX/9KycHSdom+7VdNCxduRTXfHENWu2D9wkWAs1LIJFGSVsCANCe+wfkf7ARmQ++KnQow9LKtTCqjAAiP6gk3klYpn9QWHWEhpGFpsxyXgP1eCQkgahnzYQ0KwsY5sJQmpUF9ayZMYwqcg60g4nMZx3P86jqsMMpU+K9Pz2Kyi2bwSqVEdk3iZxQ3/oGWwP83KFLR8ci1B+3hFoAERJXKkw6cJ7gNX2kHtSE+tnmpCihUUgjss9E0WBtwPrW9eh2dQsdChmGStcIZd7L+Lz92UO+N+K1HcMk9LVdpDl9Tmzr3IZvW76FWhp/q4U5t1voEEiSoKQtSThiHlRSahz66exY7OmpAQDwXiNKDJqI7JMQEn2MRALTHbfv/2Lwi/uU354ORpKYwwXz02WQpW3AXt8r4AcbthamWkstXmi+GKr8F1Bh0oFVqSIQJYk0k8YElVQFtVQdkcRDgAvsbwNElbaExJuKLB04b7BiMFKzKkJFDaUibI2wsm4lrvr8Kvx767+FDoUMI0PHQab7GR2+nw753rDXdvu/Nt1xe8Je20Va6HMjXZmOVGWqsMEchHM6UXfmWdg3ew4C9sgOECfiRElbAt7vA9/XMvgU8jhUlFIEQKR9bSM8jKyqL5j4zpDnQSmjCwBCEol+0SLkrngEUpNpwOuMWg3VjBkwXHONQJGNX5lRB4XpI3D6b7C3u2nc+6uz1MEHOxiJA+UivJlPFCzD4vMzP8e3536LLE3WuPfX5miDJ+CBjJUhR5sTgQgJIZFSYdSC80a20lbM/WzVUjWK9EXUzzbO/arwcLjbT4az7WT4A9wh3x/q2k5qMiF3xSPQL1oUq1DjXn/7o5T4eijLqtUI9PWB9/ng3rlT6HBIEhDXuhEyKO/2b1B78bVQZjAo/vrnYZfbxoPy1HLk6/LjchlEtIUqB6ojUGnL8RzanI0AaNkoIYlKv2gRdMcdB+f3W+Hv6oLUYIBqxuFgWDahKzFSVCrInUfA7pKiqdeNCeNs31bdF1xVcPMHZuTvuh/eW/4EeX5+BCIlkZaiSInYvkI3dIX6QkjYxP33QEgyCrZHCH6415gjs3oudH0sxqTtxZMuxsWTLhY6DDKCSaZcsLb58Pg5NPe5UJR56ErHwa7t1LNmJvR1XTTEa9IWAHIe+idkJhNkOfTAmIwfJW0JvDs3AxwDMNK4T9gCwDkTzsE5E84ROgxBlBoO9LTleX5cjc7bHe3w8x7wvASTTfF3siOEjA4jkUAzd86Q3zQD4NwAAQAASURBVO99+RUoJ0+CesaMGEY1fpMUl+Kbxm70Wsffe3ZXdzVkfh6z6y0I1H0G5q7bIxAhiXfxfENHiNilaeRIk+XBA6DF3gJvwAu5ZHzT1kPtEcpoCBmJUyzLoMSgxe42K2q67IMmbYGRr+1IfA8aVR9+uNAhkCRC7REItMUylJ3cgexzpggdChlBSaYWDANYXD707p+OO1YswyLNfxx85hmYkJUamQAJIXHF8tFH6Fi+HE1XXQ1fR4fQ4YTlwITl8beDqemrBccAT598Iow33wyp0TjufZLo2Nu7F9etuQ5/Wvunce8r1O+OkraExKdKQy74gAIcH0CTbXytcFzeAFrMLgDiq7TleC4i/d9JbORk2iHV/4BvmjYLHUpCowezRCwoaUvA9FZDpglAOTmxngjxPA+OP7QXUDJTySXITQ0O0BlvIsOkNsHaciI87WeI7uKWELHQLVwI1cyZyLj88oRLVJYYNGAkNvzU+fO49sPzPNpdjQhIGHgOPxYZl182rlUKJLoYhsHXzV9jQ9uGcSchavcvuS5JoRZAhMSjSpP+wDCycfa1remyg+eBNLUMGVpFJMJLGBtbN+LI14/ELetuEToUMgo+1Vaoct/Epq7PhA4lYR08aDRe2/xZP/8cbffcC/eePUKHQhIcJW0J0L03+N/McmHjCMO96+/F/Dfn46vGr4QOJeZC1WehJWBj1W33wuz0gWHEV5FAiFiwKhUKX3wBmVdflXCJSqmqGdqKv+Nn7uFx7afD2QEf7wbPs5hsis8Le3JAob4Qd829CyuOXQEe40va1lvrAVAVDiHxqsKk7e9rW2sZX1/bGhH3s62z1sHus8PLjW8VHomNivTgtUi3p1ngSBJXi70FPs4HhUSBbE220OEMyvL+BzC/+SYc360XOhSS4ChpK3I8x6FjVQP6qtXgdEVChzNqnoAHFo+lf+mjmBxYMjy+pO26+h0A60RBuhpKGTW2JyRZMTJZ///n/X50Pvxv+Do7BYxodOYVTAAA8BIruh3mMe8nVL01Y7cG0yxt4DyeSIRHokQhUeCcCedgdtZssMzYL1PNbjN63b0AgCJ9UYSiI4REUkWWDpw3uApkvJW2/f1sxZi0pWXiCWVGdiUAwI02amsxRqHf+SJ90biuFaJJ/5vfIP3SS6GemVgzJUj8ic/fcBIz/obd6N0lR/vWFCCzVOhwRu2KKVfgnZPfwYUTLxQ6lJgLXYyON2n70I/LoKv8K3KMPZEIixCSADoefBA9zzyDpt9dBd7vFzqcYRWnZQB+PQBgQ9PYl5bVmusAnsfvV1lhvP06eGsjM6WcxLdQlW2WJgtqmVrYYAghgyo3avvbI1T3je+zOZS0LRXhEDJK2iaWI/IrwfMMIHGitjex5g3Ei0T4nU85aQlMt94C1fTpQodCEhwlbQnSF5QhZWYOWLVO6FBGrTS1FJXplVBKxz9VPNGUGoJTRsfTHsHldyHABZdKH2ag5cKEiEX6RRdBlpcHw/XXgZFKhQ5nWAzDQMUEl7z90LZvzPvZ0VkFpReoMqRCmpUFeWniPKAUqxZ7C96vfh9rm9aOeR+pilRcMukSnFJ6SsTiIoRElk4pQ6YiH0Bwif94qg7FXGkbai1RrI/fBBY5IE2tBRtIAwCsb9wtcDSJKbTalnrWEzGI7zs2EnWy4kkwPfWR0GGQMJTuvxhtMbvg8gagkoff2kAlVaHE9QA21bdh0pTEGk5ECBk7eX4+Sj/9BIxcLnQoo5KpyEOTfy/29daMeR/7emvgVjB46oxTcP41NKQlEWxs3Yh7N9yLo3KOwoL8BWPaR1FKEf4060+RDYwQEnEVGUXYxrOQQgmzx4w0ZVrY+/AHONT3BAf0ii1pa/Va0e3qBhDfVYdkIJ0kF1b0YkdnFYBjhQ4n4SRCpS0QHIbrrQvGqiihBDMZG6q0JQnr1d2v4t719/ZfqIhFhkaOFJUMPA/UdTvGvJ+qTjvAK1BhSpwKa0LI+B2csA1Yreh8+N/gvfE5vKRwf9VQi6NhzPtode6fLkzVGAkjdBM23h6XhJD4N9GUBvu+u7FY98SYErYA0NDrhC/AQyWTICdFFeEI41u9pR4AYFQZoZWLK2GdyEzKAgBAjZlaNo3FlVOuxB8O/wOmGKYIHcqwup98ErUnLkHP088IHQpJYJS0FTn/ng3gbV1ChzEmr+95Hf+r+h9qzGOvwEpEDMOMu69tj92DXocXDCPO3l+EkODT/6arr0HPM8+g/b6/CR3OoCZllgMAzP6xTVi2e+1wBHoBnscUY1kkQyNRFEratjpa4fQ5x7SPHV070OvupSEvhMS5cpMO4JTY22Eb8z76+9kaNWBZJlKhJYREqTgkA5WmBf++2l2NAkeSmObnzceVU69ErjZX6FCGpZoyFYxCAZ7nhA6FJDBK2opc3QWXYe+8+XB/9Y7QoYQt1Lcp1MdJTMbb1/aub++GquBZZBlbxtRegRCS+BiGQeY1V0OalYW0C84XOpxBzc4NTlj2sV3w+MOvBpZL5DA5bsADT+tw3CN/gadGXA/5ElWaMg1pimDFXYM1/Cprb8CLC1deiGPePEZ0q3EISTSV+1d87esY+6yG/n62IixECN0HFaUUCRsICctUY/ChtD3QKnAkJJo08+aicstm5D74oNChkARGSVsR4yxdCLh48AEGsgmzhA4nbMWp4l0+GaqOHWul7a7eHyHV1CA3TRHJsAghCUY7fz5KP1sF5YQJQocyqMNzCsFzcjAMhx/awk+4ShkZepoMKOqzQFG9B5LU1MgHSaJiPC0Selw9yNZkQyfXIVOVGenQCCERVGbUQqJqhCv9Sfzxy7H1Ha8R8RAyqrRNTPPyJwIAApJemF1jb3cnRnt69+DLxi/RZm8TOpQRMTJZwsyRIPGLkrYixjqaUXlmG0rP8kGSXSR0OGELVdqKMWl7oD1C+Cd5X8AHsy94kjssk5YLEyJ2rOLAwxtPXR16X35FwGgGkkulkHEmAMCm5vAnLLeYXTBzLK5bdCuyV6yANCMj0iGSKOlP2lrDP8dna7Ox6oxVWHf2OjCMuJZKE5JoVHIJsvQKSLVV+L5jy5j2Ud1FSVvq255YStNNQEAFhuGxoXGv0OEklA9rPsQNX92AV3bHz/UqIdFESVsx664CwwLykgqhIxkTMQ8qCVXa1nbZEeDC69fXaGsEDw58QIGpWQXRCI8QkoD8fX1ouOhidCxfDvP/3hU6nH5p0jwAwO7u8CttX/v5PUhSf4CsNB2pixdFOjQSRZE4x8skskiFQwiJosr0crhbz8AJhpvCfi/P86KttPVxPjTbgj3fqdI2sbAsCyWyAQDb2vYIHE1iMagMmJg+EZVplUKHMiqemho0Xn4FGi68SOhQSIKipK2Yde8L/nf/oJdEE7o46XB2wOET17KSvDQV5BIWHj+HVrMrrPeGboA5rwGVWfpohEcISUDStDSknX8eFBMnQrvgGKHD6ZerLQQANNjqw37vJ43/B1XO2zBk9EY4KhJtYn4wS4jYTMwywGeZDbs1/GKCNosbDm8AUpZBYYYmCtHFryZbE/y8HyqpCia1SehwSJiMimIE3CZ0WD1Ch5JQLp18Kd46+S2cWnaq0KGMCqvVwfHdd3Bu24aAbewDF4l4UdJWxLrfXYuO7Xq43Ym5XDRFkYIMZTD2eku9sMHEmFTCoihTDeDAkrDR2tVVBQDgPAaUGsV1cUsIGV7mNdeg6LVX46qNQGV6cMlnr7sz7Pfq+MOwaH0aflPfg4DFEunQSBSFWiDVW+oR4AJhvfeqz6/C0pVLsaeXqpcISQQV+4eRVXWEn9AIDSErzFBDJhHXrW22JhsvLH4Bfzvqb9QKJgGdkvd7OOv+CN4xXehQSBTJTEZk//1vKP7fO2DVaqHDIQlIXGc2MoB1Wxt692jh96cIHcqYhSpxQpNTxaS/r21nuEnbagCAVpIDtVwa8bgIIYmLYRiwKlX/147162H/+msBIwIWFy+Afd+d8LdeGfZ7Zb2n4oJvLZj1f8/C39UV+eBI1ORocyBn5fByXrQ6Rj9dm+M5/ND5A7Z1boNCQsM2CUkEFSYdGHkX9jo/x9dN4Z1zqkXaGgEAVFIVZmXNwqIiav+TiELt7qrDvJcTM1/AF/aD3HiQesYZUE6cCEYiEToUkoAoaStiaacfj7Sji6GY82uhQxkzMS+fDJ3oa8KstA0NdcnTFkU6JEJIEnH99BOarroazb//A1y7dgkWx2HZBvABHXrsPpid3lG/j+d5NLb24fOCWWAOnwl5UVH0giQRJ2ElKEwJtsYI5xzf6eyEy++ClJEiT5cXrfAIIRFUYtBArt0HZL6D13e/E9Z7xTyEjCS2UmNoRokNgQAncDSJYU3jGsx+dTZu/+Z2oUMhJGYoaStiaTc/gqznPoWsdIrQoYwZJW2Bms7R9/PleR7d7uDAggnppVGJixCSHJQTJ0Jz1FHQ/upXUJQL1/tco5AiO0UJILyHVD93tqIXHJ6bcQZK/+9lMFJaWZBoQi0SwjnHh1be5OvzIWNpEBkhiUAhlcCgzAcAVJvDWz0n5krbV3e/iveq3kOfu0/oUMgY5KUqoSl6ErLSP+OHNvHdy45FnaUOPs6XcOd3PhCA/euv0fnII+C9oy9AIAQA6A6GJLSSlGCvQzEmbfvbI4SRxOh0dsIPF3iexeHZZdEKjRCSBBiZDLkrHgEjkQie8Ewz/QizbiM+qOrDzMILR/Wef2/9N3QTPoPOeTpkkhOjHCGJhokZE9Foa4RGNvr+66HrgVDClxCSGCrSS7AlAHS6WuDn/JCyozvvhNqElRl00Qwv7vA8j8d+eAx2nx3vnfIe0pRpQodEwiSTSiCXeeFnvdjcshuz8qigZiT95/iUBDvHsyxab7kVAbMZugULoJo+XeiISAKhpK1I+XZ8DfjdkE6cB0apFzqcMQt9YDfYGsK6wEsGxZnBm9gehxd9Di/SNPIR3xNqjcB70zEhiy7uCCHDYxUDe4Ka33kH6jlzIC8If8L3eCjVXZBKdmNXb9Go39NgrYfGxSNPkx29wEhUXTHlClwx5Yqw3hO6oStJLYlGSISQKJliKsTmZhk41odmWzOKUopGfE+fw4seR7BqTWzDdb2cF6eUnoI6Sx0K9LE9J5PImSS/Et9VOSAprhA6lIQQupdNtKQtwzDQn3giOJcLzEGzIwgZDfFkuMgAXQ/+BZYtrcg8eToM/3xd6HDGLEuThaNyjkKeLg9uvxtauXiWRmkUUuSkKNFqcaOmy45ZmvQR37OrMziELOA1iHIZGSFk7Prefhvtf74bstxcFL/7P0hSYjfEco7xGOzYIoHMNH1U2/M8j25PE1Y8H4De9xzcc46FcsKE6AZJ4kLCVuEQInKVWSngag2QKFtRZ6kbVdI21M82N1UluuG6CokCt8+lvp6JbrpxKr7dVY36bo/QocQ9judQb6kHkJjn+Ky7/yx0CCRBUU9bkeLtZoDhIS8Vrk9hJLAMi6eOfwp3zbtLVAnbkNIwWyT81LkPAKBhcqBRiOvilhAyProFCyArLEDKqaeC1cd2hcavCmfB13cE2rsyRrV9j7sHUo8DaXZA4XJClpMT5QhJNAW4wKinRVN7BEISU4VJB85jAHCgN/VIQv1sS6kQgSSoUIV4uIOlxajN0QZ3wA0ZK0OuNlfocAiJGUraihHPI3deDyac2QbdiacJHQ0Zh/5hZF2jG0aWKz0a7o4lKFbPjmZYhJAkJDUYUPy/d2H4w+/BMExMj122/7OusdcJt2/k5F2tuRZuBYMLf28A+/xrkMQ4yUwi55ovrsHc1+bip+6fRtzW5rWhy9UFAKOq0iOExI+iDDUYnxEAsKurelTv6R9CZhBf0rbN3gab1yZ0GGScctIYyDO+wl7/i0KHEvdCD2UL9YUJ3RLR39ODgI3+7ZLRo6StGDl7AFcfGAkDNis5los6fU602duEDiPmQpUFoYvWkThtufD1zscM04xohkUISVIS7YGegXwggJ7nXwDnckX9uAadAjp9B1j9VmxtHnnw5I8dwVUFPs6E4jnToh0eiSKe5+EJeFA7ionyoWWTRpUROrm4hhIRkuikEhYGZT4AYF9vzaje05+0FWGl7T3r78GRrx+Jj2s/FjoUMg4lmToojJ+B065Ho7lL6HDiWjK0P2q99VZUHXU0rJ+uFDoUkkAoaStG3cGbWaTmA3K1sLFEwJeNX2Lua3Nx89c3Cx1KzJUawltSU9UZfKonxotbQkhktf/1PnQ++CCab7wRPM9H9VgMw0CZ9TFUOW9jbcOmEbff0VEFANCyOZBL6VInkd08+2Z8evqnOK3stBG3TdQBJYSQoLLUUgBAm6txVOcVMSdtQ593edo8gSMh42HQ6sEEUgEAGxp3CxtMnAu1TSnSFwkbyDjIcnMBhoGvuUnoUEgCoTsZEbJ++hGav02DudUkdCgREeppY/FYBI4k9kIXqU2jWDLc7mjHbus3YOUdqDBRBRIhZHxCvW1TTz89Ju0SMuXBG9O9o6jAqrXU4aI1AVy8tRu+js5oh0aiqDS1FPn6fEhYyYjbhqpwqDUCIYlpWnYZeJ6Bl3Ogx90z7LZOrx8t5uBKj1ARg1g4fU60O9oB0EOqZKBhsgEAP7ZXCRxJfAud40tSSwSOZOzSLrwQFRs3wPinPwkdCkkgidsMhIyZ68edsDWrICtVCh1KRJSmluLbc79FiiJ2k8zjhUGrgE4phc3tR32PAxOyhu7buK5xI3wZL0OhLEaZ8eIYRkkISUbqGYej7IvPY9YvtlBfjOa+r9DiaBhx225nIxZv5SEPbAPvjn77BhIfQi0UKIlBSGKaaEoHX5MGRt6LOksdMlWZQ25bu3+eQ5pahgytIlYhxoVQlW26Ml2U9z/JxqQsgN27G9WjaAMkZsnQHkGani50CCQBUaWtCOnPvQSm838F3clnCh1KREhZqWgvWBiGOTCMrHP4YWR9diDgLISaL4ZWQc9rCCHjd3DCNmC3o/f/Xo1aq4SJmcFls32+5mG3c/qc8Pq78doCFj0LFkKWnx+VeEhs+DgfnvzxSdzy9S3wBDzDbnvp5Etx25zbMC97XoyiI4REUmWWDpzXAACoGSGBJerWCEmQvCIHlKQG/x7bXI0CRxK/LB4Let29AIBiPf3eE3GhzI0IqRacAdWCM4QOg0RIqUGL7U3mEfva6rkZcDZcg9kVhhhFRggRC97vR+Nll8P9008IWC0wXHttxI8xO3cCnqsCvGwHfH4/ZNLBL2HqLfXwyRh8PEOLS5f8HQxLz6cTmZSR4pWfX4HNa8MVU65ARVrFkNtON07HdOP02AVHCImo/DQ1GG8+Ai4nnO7hb1MpaQuUpCTuMnFywGRDOT7vBKz+FqFDiVuh33mT2gS1LLFn8ji3bkXfa69DXlwMw/XXCR0OSQB0J0OSwprGNbj686vx7E/PCh1KzIUuVkdK2u7rCA4hqzCJ7+KWEBJdjFSKlNNOhSQtDdpfHROVY8zIKQbPScEwAWzvqBtyux/3DyHjvQYUZ4qrz2EyYhimv5osdNNGCElOLMugVHoGnPXXIUd65LDbhpK2oRVnYkKVtsllbv5EAEBA0gObh1o6DWZixkS8cdIbuO+o+4QOZdz8XV2wfvIJbF98IXQoJEFQ0lZk/A274XjjQfj3rBc6lIjqc/fhu9bvsLVzq9ChxFxo+ELo4nUwHM9hX2cfAKDcSEPICCGRl37++Shd+SlUkydFZf9KmQwyLjhAc3PzniG3296+DwWdPIyeTCikIw+vIvEvtBRyuKRtjbkGH9V8hBrzyIPqCCHxKzQsd1/H8MUI1V1UaUtJ2+Qw0ZALnlOAYThsbqZhZINRSBSYlDEJR+QcIXQo46aePRuZ118P0223Ch0KSRCUtBUZ56o30XjvC2i+9hqhQ4mo0EVLvaVe2EAEULr/YrW2ywGOG7yXZKO1ETuk10Jd/AjKjFR5RgiJDklqav//9zY1wbp6dUT3nyrNBQDs6hr6pqZCcTpuf1WFJ5/aCNeOHRE9PhFG6Bxfaxm6x+VXTV/hjm/vwLM7xLfihpBkEloRtru9DwEuMOg2vgCH+u7gLAexJW39nB8N1uBATkraJgeWZaHkswAAW1uGfihNkoM0IwOG66+DZh713yejQ0lbkeHNrZBp/FDkZggdSkSFLlpa7a1w+cW1rKQgXQ2ZhIHLF0Cb1T3oNru6qgGGB8Ci3ESVtoSQ6PJ1dKDh/AvQ8sdlsH/zTcT2m6MpBAA0WIeuuGxo7oOXVYBnWChKqN9fMhjNg1mDyoBZplmYkjklRlERQqKhIksHVeGT+NZ3JarMgz+ga+hxws/xUMkkyElRxThCYbXaW+HjfFBIFMjWZAsdDomQdHlwaOqeXlotMpjHtz+OV3e/CovHInQohMQcJW1FJqXUj7KTO5F11SlChxJRaYo0pChSwINHo1VckzdlEhaFGcO3SNjWFnxqq+CyoFPKYhYbIUScpAYDNEceAUVJCRSVlRHbb0V6MAnb5WkacpvdZh8uW3Q7ql/4AKyGVhYkg9CwnXprPTieG3SbU8tOxQsnvIALJl4Qy9AIIREWao8AhkPVEAms/n62Rg1YlolVaHEh1BqhSF8ElqFb+WSRry0CADTZGoQNJA55A148+9OzeGDzA/AEPEKHExG8zwfXTz/B8sknQodCEgB90otN1z4AAGOI3E10PGAYpv+mbrjlk8kq1Ne2Zoik7Z6e4J9JpjI/ZjERQsSLYVlk//3vKPy/VyAzGiO238Ozg+cuF98+6Pfb7G3YE3gasvSvUVpgiNhxibBydbmQslK4/C50ODqEDocQEkU5KUpIes6Gvep2lGvmD7pNaPhumQiHkIXuc0L3PSQ5TMgoBQD0+poFjiT+eANeXDLpEiwuWgyDKjmu7bxNzag/+xy03X4HOK9X6HBInKOkrZhwHNCzf5lRZoWwsUSBmKdLhybnhi5if6nFsb/3lZ56XxFCYoORSiFJSen/2rF5M9x7945rn0cWTADPM4DEgfq+zkO+/33bz4B2G+Qp20Q5UTxZyVgZCnQFAAY/x/sCPtG1RiIkWTEMg4r0UvD+FFQNUYwQqrQVWz9bANTPNknNy5sEv70SbksZeH7wGSVipZVrcePMG/HQMQ+BYZKjsl5eXAR5cTE0Rx2FgNksdDgkzlHSVkR8e75H7cc6tG5KB1ILhQ4n4kYzXTpZhS5aB2uPwPM8zPuf2k42lsU0LkIIAQDnli1ouvJ3aLz8Cnibx15FkqnRgfUbEXBnYUdb6yHf97szcOPrmVj2iQRsW8t4QiZxZrhhZD90/oA5r87BpasujXVYhJAoCLVI2NdhG/T7Yk7a3n3E3Vh1xiqcWXGm0KGQCJpXUAlfy2VwdByP9iFmlJDkwTAMSj79BPlPPhHRFWkkOVHSVkQ8P22ExyyD26IGJFKhw4k4qrQFaroch3yvx92DAOMEzzOYnZtcbTEIIYlBUVkJeUkJVFOmQGoY39K2KfgbnHU3wuU4dKCmrVeNOQ2dOHJfAxhp8p3nxCy0FHiwc3zoNY2MehgTkgyKDXLIDZ/ho7Z/IMAFBnyP4/gD7RFEmLRlGRa52lwY1MmxTJwEySQsCjLUAICazkPv58SsxlyDTmdn0lUgJ0vVMIk+uqMREeWvTkbeXT4g4Bc6lKg4eFBJgAtAwkoEjih2Svb3tO22e2Bx+pCiPjBsbGdnNQCA96VhUs6hSQ5CCIk2iV6PwheeB6vRgJGNbxhimUGL76p7Bm0HU93lwEdzL8G5mT5MyKap2smk/8Gs9dCkbaj6lpYLE5IcJmalQZ7+DXrgR5ujDXm6vP7vtVndcHoDkLJM/yBeQpJBqUGL2t5O/NjaiKPLM4UOJ27c+e2d2NWzC48c+wiOKzhO6HAijnO7wcjlYFiqpySDo98MEZHmFEN34Z+gW3qr0KFERY42BzJWBk/AgzZHm9DhxJROKUOWXgkAqOkemMj4vnUPAEDGZUGvHF+yhBBCxkqSmjogYWtduRIBiyXs/fS3g+k6dNns932bsLUgEzj/QqpgSDLDraYJvUaDeQhJDhOyU8B5g0mrvT3VA74Xao1QmKGGTCKuW9mfe37GTetuwqu7XxU6FBIFDvVK6Cruw2et9PcbwvN8/zk+2Waz8DyPxssuw97Zc+CtqRE6HBLHxHWmI0lNwkpQqA/26hVliwRjsNrgl31td3cHL3bT5bkxj4kQQgbT9/rraPnjMjT97ipw7vB6t2k0vVAXr8BW/10D9+nuQ5P8cWhKH0ZRpiKS4ZI4UKQvwoL8BTi59ORDlkuHqm+p0paQ5GDQKiANmAAA37cOHGAp5n62u3p24bP6z/Bty7dCh0KioCQ1WFHe6+oTOJL40eHsgNPvhISRIF+XL3Q4EcUwDHiOB3w+uH76SehwSByj9ggiYnnoOshKKqA64VIwar3Q4URFcUoxqs3VqLPUYX7efKHDianSIZYMN9qCU2YLtHQzSwiJD6oZM8GmpEA9Zw4YRXgJ1sOysyBRtsHPM7C4HUhRBh9Y/di+DzOqOFgUOkxMT85znJhp5Vr859f/OeR1p8+Jdkc7gGBilxCS+BiGQYYiD934EbuHqLQVY9L2cMPh+NPMPyFLkyV0KCQKTio9Af+3JgVqHV3DhIQKsfJ1+ZBJkm/FqOm2W8FqNJDl5Y28MREtStqKRKCtHq3PfQngS1Qee17SJm2XFC/BpIxJmJ01W+hQYq5/GNkvmtf3epsBBphoKBUiLEIIOYSysgIlH304pom5FRlZQNtlcDjS0Wb2I2X/veuWlj249hMOepcFzOm1wORJEY6axKNQlW26Mh2pylRhgyGEREyhrgjdrgPFByE1Ik7alqWVoSytTOgwSJRMzMoEeAU6rB7Y3D7oqK3dgdYISbqSRjlhgtAhkARA7RFEItC8CxqTG0oDwKaFf5OcKI4rPA6XT7kcEzMmCh1KzIUuXg+utHX5XfCiBwAwJ5dOCoSQ+HFwwpbnOJjffx88x434PpZlUaKbA96XidouZ//r1a17UJvFwKxVQFFON7XJiOd5dLu60Wxr7n8tdENHVbaEJJfJhnIAQJ+3acDr1fuvc8sMupjHREg0pahkMOiCq49quhwjbC0OyZ60JWQ0KGkrEnJJLwqO7UXxlZVCh0KiJFRp29jrhMcf7Pfn8TJw1N0AV/OFODyXll0QQuJT+z33oO2229F+332j2r7UEGyJUHNQD+8abwuWnyvBs3dcCjbMlgskMby19y0c+9axeHDLg/2v9Q8hS6UhZIQkkzl5wXsWP2NHnzvY47PX4UWvwwvgwCwHsfAEPPis/jPs69sHnueFDodESWrWRqgKnsFHVZ8JHUpcEEPPesf69Wi/72+wf0O9qsngKGkrFt37gv/NrBA2jhioNddiTcMaOHziekJp0iugVUgR4Hg09gSrz2q7XeA8WchgZiJVLRc4QkIIGZzmiCPAyGRQz5gxqu0z0iyQZ6zB2vb3+l/r8wWrLw/LLI9KjER4BfoCMGDg8rv6X0vWqdKEiN2UHCM4XyoAYHd3cLJ6qJ9tbqoKarm4uvzVW+px07qbcNlnlwkdCokipaoHUk0tfu7ZJXQocaHOnPxJW9vateh79VXYv/pS6FBInBLX2U7MuquC/xXBzexVX1yFdkc7XvnNK5hunC50ODHDMAxKDRr82GxBTZcd5SYdqjuCF7flRlpCRgiJX/oTT4Tq8MMhy84e1fYqdS8Uxs9R58kDcAs8AQ+8fBfAAHOpFUzSmmWahS0XboFCcqCSmpZOEpKc0jRySAMmcDIzNrfswZF5s/qTtqUi7Gd78AMqhmEEjoZES1FKMeq7gVZnw8gbJzm7145OVyeA5D7H6379a4DjoT12gdChkDhFSVuRqHvyJyCQiZwFWiT7otHJGZORqcyEj/MJHUrMlRq0+LHZ0n9R+2nDu5CltyA7c7HAkRFCyPAOTthyDgfs69dDf/zxg247O3cCXqgGvEwH/IEAdnRU4/6X/AgwLCbP9MQqZBJjv5wc7ef8aLAGb2yT+YaOELFKk+WhB3uxqzNYfBK6vi0ziC9pW2upBUCfdclusqEMa7sBi79V6FAEV2+tBwBkqjKhlyfnEHUA0MybB828eUKHQeIYJW1FgHPa4O7mAF4OtmCK0OFE3b+P/bfQIQimtH8YWbA1xM/2VVCamqHTzREyLEIIGTXO5ULjZZfD9eOP4P/xAFJOPfWQbWblloLnJWBYH37qaMCWmp/wqw6A5TnoTJkCRE2E0GpvhY/zQSFRIEebI3Q4hJAIK9AVoccJNNjqARw0hEzElbYlKdS/O5nNyZsI7Ab8ki44fR6oZclebjU0WklDSBAlbUWAkatQ8vJj8O76HtKiw4QOh0RR/3Ce/Re1nG06fHw65s6dJGRYhBAyaoxSCdX0afDU10NeVDToNiqZHNKAAQFpOzY17cYOcz3eu1KCSZ2V+LfBENuASUy9W/Uu3q9+H4uLFuOcynPw4WkfosPZAZahMQ2EJJvDMsvwQyPQ420CcGD4pJiTtpTASm5TTAXgORkY1ofvm2vwq2Lx3rv3V5eLoGc9z/PwNTUhYLNBNYnu28lAdIUrAoxUCsXshdBdchsYVjx/5QEuIHQIMRe6iK3ptMPm9qG75Wi4W8/H3IJSgSMjhJDRYRgGxttuQ8l770I1bdqQ26VI8wAAP3dXod7egLYMBl2zZsUqTCKQblc3fuj8AT/3/AwpK0VxSjHmZdOyQkKS0dy8w+CzTgFvmwG724cWc3AIodiSthzP9S8Vp6RtcpNKJJDzWQCA71v3CByNsNocbQCAktTkry63fvQRahYtRsfy+4UOhcQh8WTwiGjYvDac+v6pmPvaXHgDXqHDiamCdA0kLAOHN4DvqrsBAAadAqlqucCREULI6DEMA1nOgeXuvtZWuLZvH7BNjqYAAFBnqUeXJ1iFVZlOD6iSXShhEao6I4Qkr5n5+XC3XIDelmPxY7MFAJCukSNdI67r2jZHGzwBD2SsjFrBiEC6LPhQend3jcCRCOv+o+/H2rPX4qSSk4QOJepUU6cCMhkYmQw8zwsdDokzlLQVAevjt8Ly4DXw7d4odCgxoZVp0enshCfgQaO1UehwYkouZVGYrgYAvLtjBxhZL8pNaoGjIoSQsfM2t6D+ggvReMWVcP/8c//r5WnBBG2XpxHHbOnFUTt5zNblCRUmiZHQMslaSy2e/PFJPPvTs2h3tAscFSEkGvRKGXJSlACAT3cEq+7EOIQs9JCqUF8IKUvdDZNdnrYQANBoE/fDSYZhkKHKQIoiRehQok5WWIjKLZtR+OILYBhG6HBInKGkrQj0vPUpWp9fC9fGr4QOJSYYhumvxAn1whGT0DCyDd1vQVv2IPy61QJHRAghYyfNSIcsNwdSoxGS9PT+1w/PqgAAOAJtOHedGzd8FMBsZfJf2Itdgb4AEkYCh8+BJ7Y/gUd/eBRmj1nosAghUVKepQEjNWPVnl0ADlznikmteX9vT2qNIAqV6WUAgB5vs8CRkFhhGAasUil0GCROUdI22fE81BkOqI0eKKeJp+ebmJdPlu6vQPBLOwAAZWl0gUcISVysSoX8p55C4f+9AllWVv/rRxRMAAAoYMOXBVOx11SG9AkVQoVJYkQukSNvf0X13Oy5OKnkJBTqCwWOihASLZxuHbTlD8Ch+QSA+PrZAkCdlYaQicnMnEoAgAft4DhO4GiEsaltE65fcz1e3f2q0KHEHLVHIL9ESdskxvu8cLz5MJQpTmROdkB22ByhQ4qZ0A3ct63fYkv7FlENJSvMUECirgGrCC4jm2IsEzgiQggZH4lWC+lBVbau7duR4fKC8epQ1spjb6Ebnx09E16R3tyITZGuCACQrc7Gb8t/Czkrrv6WhIhJWVoheJ4BI+uGRF2DogxxVaMFuAB+6voJQDCZI6Z7GrGam18BngcgceGmz57Ci1u/gNfvFzqsmPH6/Xjpx/exrnkd3t+7WjQ/u7+nBw1XXY1dR/0KXz72Mra89zn8PnH87ADg9/mx5b3P8eXjr4juZx8Jw4sslW+1WpGSkgKLxQK9Xi90OFFjfX45Oh5/BX7HgdekGsB03UXQX3aHcIHFwBcNX+AvG/4yYLmkSW3CbXNuw8LChcIFFgP//OZtvLxvBSC1HHjRn4KLK27AzfPPEi4wQgiJEMfGTWi65ho4lDK4fVak2w98r1vHov7Cs3HRDfcIFyCJqi8avsCd394Jp9/Z/5pYzvGEiI3Yr2u/aPgCD2x+AB3Ojv7X6PMu+f3zm7fxUs3fwDAHHkQzgVRcVP6HpP+9/+c3b+OVqkfBS8z9r4nlZ1/3zOswPPxXHNzRtledCv+1N+KYK84RLK5YWPfcm5A+8QjSneb+18Tws482NxkXlbaPP/44ioqKoFQqMXfuXGzevHnY7d9++21MmDABSqUSU6ZMwaeffhqjSBOD9fnlaHnwZfgdA/PxfgePlgdfhvX55QJFFn1fNHyBZWuXHdLfrtPZiWVrl+GLhi+ECSwGgif4v4KXWAa8zksseKnmr/jnN28LFBkhhESOLC8XbgZQ9VmRZh/4vXQbh5lPvoFXVvxFmOBIVIXO8QcnbAFxnOMJERuxX9eGPu8OTtgC9HmX7EK/98DAlUMca0763/vQz86x5gGvi+FnX/fcmzA8/NdDXk91mmF46F6se+5NAaKKjXXPvQnDQ/ci7aCELSCOn320BE/avvnmm1i2bBnuuecebNu2DdOmTcPixYvR2dk56Pbr16/Heeedh8svvxw//PADTjvtNJx22mnYuXNnjCOPT7zPi47HX9n/1S8nDwa/7nj8FfA+b0zjioUAF8ADmx8Aj0OLx0Ov/WPzP5JyWZHX78crVY8CAH45cDL09Sv7HhXN8hJCSPLiDQY4GTeAQ89yLAAeQNH/vQWPxxPr0EgUifkcT4jYiP26lj7vxEnMv/di/tn9Pj+kTzwCYOjrWskTjyRluwAx/+zhkAodwMMPP4wrr7wSl156KQDgqaeewieffILnn38et9122yHbr1ixAieccAJuvvlmAMB9992Hzz//HI899hieeuqpQ7b3eDwDbtysVmuUfpL44Pz05f0tEX75ax/CwO8Ibqc59YoYRhZ92zq3HfI0+mA8eLQ727GtcxtmZ81GvaUel6y6BHqFHh+e9mH/dn/86o/4ofOHsI59StkpWDZzGQDA4rHg1PdPBQB8efaXYJngs5F719+LtU1rw9rvr/J+hb8edeCp27FvHQue5/Huqe8iXRns77hi2wq89vMb4CX2of/WGYCXmvHaj2txyUxaTkUISVwfv/U8DnMM3dmJBZBp4/DxW8/jjIuuiV1gJKrCPccTQhLXaz+uBS8xj3hdO/+N+VDJFINuk65Kx7unvNv/9e/X/B47unfgr0f9Fb/K+xUAYE3DGty38b6wYpMwEqw5e03/13d9exe+bfkWy2YtwymlpwAAvm//Hjetuyms/QLAh6d/CL1cT593IjXa3/uZj9wPpK8Me//yjj+ACWQCAPy6zxHQfQOJfR6k1hMBADxrgTfrX2HvV9Z9BVhvAQAgoFkPf8oqsK6pkPWdGdwv/PDmHFpFOpAfkLhG/tkfegYKf3INnS1r2Yt7flFlejAWQIbTjEuuexzXbQ4OZrvptHvgkQY/+07auRqL96zFurIj8c70k/rf9593gi0xbz/pDtiVwQGOi/asxck7V2ND0Sy8Nuu3/dv+6/17Ifd7ce9vbkKPJphjWFD1Hc748RNszZ+GF+ceaFFw/0d/h9bjwPLj/4C2lOCA4CPrtuC8re9hR85EPHPkRf3b/uXTfyLd2YeHfn0NGtLzAQCzGrdj6ea3sM9YipWlR4/qZ//h468w+/TjR/iTTF6CJm29Xi+2bt2K22+/vf81lmWxcOFCbNiwYdD3bNiwAcuWLRvw2uLFi/H+++8Puv3999+Pv/xFPMsk/a2NEd0ukXQ5u8LajuM59Lh74OcHPrmxeq3ocfeEdWyH90DzYJ7nB32/3WcPe782r23A1z2uHvDgwfEHlsw4fA44A/ZfvnVQjdb2sI5PCCHxxto2uvPXaLcjiSHcczwhJHGN9nrVGbAPeQ3M/KJcz+K1oMfdA1/A1/+aJ+AJ+9pcwkgGfG3z2tDj7oHb7+5/zcf5wt4vcGBqPH3eidNof+/dnAVKiW3kDX+h1+kB7wsWs8lVTigkNrgCTnjswdcYqQfaMezX7HKDcwX3IZO5oJTY4Ak4YLGHCuf80I1hv4NxcX2w2ZNrJdUkW9+otlPa+pDqDv459tg9cO/P5HFOJ1LdNvBOB7oP+rMJbdvncMPilwEAAo7gtoxr4LY6lw2qgBdmuxvdfPB1n9OFVLcN7C+21bpsSPU6YHW40S0Jvu5xBLeV/iIGtduGVLcNNocb3fLg6+7928pcDihG+bPbWsWdwxA0advd3Y1AIACTyTTgdZPJhD179gz6nvb29kG3b28f/C/y9ttvH5DktVqtyM/PH2fk8UuaUxDR7RKJQW0Ia7tcXS7eOfmdQy6+7j3i3kP65Y0kRZHS//+1ci3eOfkdAABz0PPCZTOX4copV4a1X61cO+Drt09++5DjXT75csj8eXi5+p8j7q9AnxXW8QkhJN7os0d3/hrtdiQxhHuOJ4QkrtFer15cdjNOmTh30O9J2YG3uX876m9w+V3I0eb0v3Z03tH91+xjdcucW3Dt9GthUh+4P51qmDqm/WpkGgD0eSdWo/29v3TWUZiRd37Y+89ZWAAZKwcA9Hkmwuq9FDpZCtKVwd8jP+dHi2NC2Ps1/ToXSqkKAGD1Tkaf5yxopFpkqoI/D8dzaLK/NOw+1tZvxSdtj454rCuPnIZTKueHHWM8q/mcA75/dcTtTls8A7juZADAe0UlgGR/DqP3MKDvUpymT8FpBuOBN5z0fwCAN4qKAcn+z8O+SUDvBThBp8cJxoNyaqe8CHAcXiwoAmTBBC8sU4Dus3CsVodjTQf9bp7yHMAF8FR+ASDfv9LBOhXoOhVHaDRYmXXgMxanPQn4/ViRmw8olcHXbNOAzhNxuEoN/a7qUf3suhxx5zAYPvRITwCtra3Izc3F+vXrccQRR/S/fsstt2DdunXYtGnTIe+Ry+V46aWXcN555/W/9sQTT+Avf/kLOjqGXkYSMtoJbYmK93lRPW/a/iFkgy0w4CHVMCjb+CMYmTzW4UVVgAtg8f8Wo9PZOWgPKAYMTGoTVp2xChJWMsgeEpfX78esl48Fx5oP6QMEADwPsIFUfL/0K8ilgndFIYSQMfN4PNh89Ayk27hBG/NzAHp1LOZ8uw0KxeDLZkniEfM5nhCxEft1LX3eiZOYf+/F/LP7fX5smjsfqU7zkNe1fepUzNv0DaQy+tmTyWhzk4IOIsvMzIREIjkk2drR0YGsrMGz6VlZWWFtLzaMTA7TdaE+Ir88yQe/Nl13UdIlbAFAwkpw25xgH2TmFwnr0Ne3zrk1KS9u5FIpLir/A4DgSe1goa8vqvhD0p3kCCHio1AoUH/h2WDwy9nKwa8ZAPUXnk0J2yQj5nM8IWIj9uta+rwTJzH/3ov5Z5fKpPBfe+Ow17WBa29MyqSlmH/2cAiatJXL5Zg5cybWrDnQzJ3jOKxZs2ZA5e3BjjjiiAHbA8Dnn38+5PZipL/sDuTecjGkmoEneamGQe4tF0N/2R0CRRZ9CwsX4uEFD8OoNg543aQ24eEFD2NhYfIO4bp5/llYWno3WC51wOtsIBVLS+/GzfPPEiYwQgiJsItuuAdbrzkXvbqBlzG9OhZbrzkXF91wj0CRkWgS8zmeELER+3Utfd6Jk5h/78X8sx9zxTnouulemNWpA17vU6ei66Z7ccwV5wz+xiQg5p99tARtjwAAb775JpYuXYqnn34ac+bMwSOPPIK33noLe/bsgclkwsUXX4zc3Fzcf//9AID169fjmGOOwQMPPIAlS5bgjTfewPLly7Ft2zZMnjx5xOMle3uEg/E+L5yfvgx/ayOkOQVQn3hxUlbYDibABbCtcxu6nF0wqA2YYZwhmqfRXr8fr/24Fo3WdhTos3D+tAVJ+VSSEEI8Hg8+fut5WNsaoc8uwElnX0YVtiIg5nM8IWIj9uta+rwTJzH/3ov5Z/f7/Pjh469ga22HLicLh590rGiqTMX4s482Nyl40hYAHnvsMfzzn/9Ee3s7pk+fjkcffRRz5wabyi9YsABFRUV48cUX+7d/++23cdddd6G+vh7l5eV48MEHceKJJ47qWGJK2hJCCCGEEEIIIYQQQuJHQiVtY4mStoQQQgghhBBCCCGEECEkxCAyQgghhBBCCCGEEEIIIQNR0pYQQgghhBBCCCGEEELiCCVtCSGEEEIIIYQQQgghJI5Q0pYQQgghhBBCCCGEEELiCCVtCfl/9u47PIp6beP4d3Y3vROSEFpCr0oVKYpKBxXESlEQCxZeQcSjRz32ox712FCRYwNRwYodQaqIoChSpAcIvYZAet2d948hS0JC0ySb7N6f68qV7Mzs5LlTdnee/c1vREREREREREREqhA1bUVERERERERERESqEDVtRURERERERERERKoQNW1FREREREREREREqhA1bUVERERERERERESqEDVtRURERERERERERKoQNW1FREREREREREREqhCHpwuobKZpApCenu7hSkRERERERERERMSXFPUki3qUJ+NzTduMjAwA6tWr5+FKRERERERERERExBdlZGQQERFx0vWGebq2rpdxuVzs3buXsLAwDMPwdDkVLj09nXr16rFr1y7Cw8M9XU6lUnbfzA6+nV/ZlV3ZfYeyK7uy+w5l983s4Nv5lV3Zld13+Fp20zTJyMigdu3a2Gwnn7nW50ba2mw26tat6+kyKl14eLhP/OGXRdl9Mzv4dn5lV3Zfo+zK7muUXdl9jS9nB9/Or+zK7muU3Teyn2qEbRFdiExERERERERERESkClHTVkRERERERERERKQKUdPWywUEBPDoo48SEBDg6VIqnbL7Znbw7fzKruy+RtmV3dcou7L7Gl/ODr6dX9mV3dcou29mPxWfuxCZiIiIiIiIiIiISFWmkbYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IuJ1Fi1aRE5OjqfLkEqUl5fH1q1bycvL83QpUskOHDjA/v37PV1GpXE6nRw4cIBDhw55uhQRkUqTnJxMYWGhp8uQSqbfue8yTdPTJYhUCWra+pANGzbQsGFDT5dRYVavXs2///1vJk2aREpKSol16enp3HTTTR6qrOK9/fbbjBw5kilTpgDw8ccf06JFCxo2bMijjz7q4eoqX58+fdi+fbuny6hQBw8eLHF71apVjBw5km7dunH11VezaNEizxRWCaZOncqyZcsAyM3N5eabbyYkJISmTZsSGhrK7bff7rXN23POOYcnn3ySXbt2ebqUSpeamsrVV19N/fr1ueOOO3A6ndxyyy3Ex8dTp04dunbtyr59+zxdZoX57rvv6N69OyEhIdSuXZtatWoRGRnJDTfcwM6dOz1dXoVbv349d955J+3atSM+Pp74+HjatWvHnXfeyfr16z1dnsds3bqVHj16eLqMCrNv3z4++OADZs2aRX5+fol1WVlZPPHEEx6qrOLNnTuXRx99lAULFgCwePFi+vfvT48ePdyv93xJs2bNSEpK8nQZlWrv3r08+uijDB8+nHvvvZeNGzd6uqQKM3v2bP78808AXC4XTz75JHXq1CEgIIC6devyn//8x2ubeJdffjnvv/++Tw44ycvL495776V79+48++yzAPz73/8mNDSUsLAwhg0bRnp6uoerrDirV69mxIgRNGzYkKCgIEJCQjjnnHN4+OGHvTp3kZSUFJ577jkGDx5Mly5d6NKlC4MHD+b555/X4IRiDNNbH/2klNWrV9O+fXucTqenSyl3P/zwA5dffjlNmjQhIyODrKwsPv30Uy655BLAGolVu3Ztr8z+8ssv869//Yu+ffuybNkyxowZw0svvcT48eNxOp288MILPP/884wePdrTpZa79u3bl7l81apVNG/enMDAQAD++OOPyiyrUtjtdvbt20dsbCxLly7l4osvpmvXrnTq1IlVq1axcOFC5s+fT/fu3T1darlr2LAhM2bM4Pzzz+cf//gHn332GS+++CItWrRg06ZN3HfffQwaNIjnnnvO06WWO5vNRo0aNTh69Ci9evXi1ltvZdCgQTgcDk+XVuFuvvlmli9fzm233cZnn31GZGQkycnJTJo0CZvNxrhx42jRogXvvfeep0std++//z5jxoxh9OjRBAYG8s4773DjjTeSkJDARx99xLp161i6dClNmjTxdKkV4vvvv+eKK66gffv29O3bl7i4OMB6bp87dy4rVqzgq6++om/fvh6utPJ582u73377jT59+uByuSgoKKBOnTp8+eWXtGrVCvDu13YffPABo0aN4txzz2Xz5s28+uqrjB8/nquvvhqXy8UHH3zAhx9+yNVXX+3pUsvdlVdeWebyr776ih49ehAWFgbAzJkzK7OsShEcHMyOHTuIiYlh/fr1dO3alZiYGNq1a8eff/7Jzp07WbZsGeeee66nSy13zZs356233uLCCy/kmWee4YUXXuChhx5yv7Z75plnGD9+PPfff7+nSy13NpsNu91OSEgIQ4cO5ZZbbqFDhw6eLqtS3HPPPXz88ccMHTqUWbNmcckll/Dtt9/y9NNPY7PZeOSRR+jfvz8TJ070dKnlbs6cOQwePJgBAwYQFBTEzJkzuemmmwgJCeHzzz/HNE2WLFlCrVq1PF1qhfjtt9/o27cvwcHB9OrVq8Rru/nz55Odnc2cOXPo2LGjhyv1PDVtvcg999xzyvWHDh1i+vTpXvnitmvXrlxyySU89dRTmKbJ888/z5NPPsmnn35Kv379vPqFfYsWLXj44YcZNmwYK1eupFOnTkyePJmbb74ZgHfeeYc33niD33//3cOVlj8/Pz969epF586d3ctM0+TJJ5/k9ttvJzY2FsArRxvbbDb2799PbGwsffr0oV69erzzzjvu9XfffTd//vkn8+fP92CVFSMwMJDNmzdTv359mjVrxiuvvEK/fv3c6xcvXswNN9zAjh07PFhlxbDZbOzevZvly5fz7rvv8v333xMVFcWIESO4+eabadGihadLrDC1a9fms88+o2vXrhw4cID4+HjmzJlD7969Afj555+57rrr2L17t4crLX8tWrTgscce47rrrgPg999/Z/DgwezcuRPDMBgyZAj5+fle2cQAaNOmDYMGDTrpqMrHHnuMmTNnsmbNmkqurOKd7kB1z549/Pe///XK1ze9e/emXr16vP3222RlZXH//ffzySefMHfuXNq1a+fVr+3atWvHqFGjGDt2LPPnz+fyyy/nqaeeYvz48QC88MILfPHFFyxZssTDlZY/m81G9+7dadCgQYnl06ZNY+DAgURGRgJ45Wjj4q/trrjiClwuFzNnzsThcOByuRg+fDiZmZl88803ni613BV/bXfOOefwyCOPcM0117jXf/fdd9x9991eOdraZrOxdu1afvjhB959913WrVvHOeecwy233MLw4cOJiorydIkVpn79+rz77rv06tWLbdu20aRJE2bOnMmgQYMA64yDW2+91SvPoGzXrh233XYbt99+O2BlHTt2LBs2bKCgoID+/ftTr149r3ysA+jcuTNt2rRh8uTJGIZRYp1pmtx+++2sWbPGfXalTzPFa9hsNrN9+/bmxRdfXOZHx44dTZvN5ukyK0R4eLi5ZcuWEss+/PBDMyQkxPzmm2/M/fv3e232oKAgc8eOHe7bAQEB5tq1a923k5KSzMjISE+UVuGWLFliNmrUyHzkkUdMp9PpXu5wOMx169Z5sLKKZxiGeeDAAdM0TTM+Pt5ctmxZifVr1641a9as6YnSKlxCQoK5YMEC0zRNs06dOuZvv/1WYv369evNkJAQT5RW4Yr/3k3TNPfu3Ws+/fTTZpMmTUybzWZ26dLFfOeddzxYYcUJDg42t2/f7r7t5+dn/vnnn+7b27Zt89rfe1BQkJmcnFximcPhMPfs2WOapmn++uuvXvs4b5qmGRgYaG7cuPGk6zdu3GgGBgZWYkWVxzAMs3bt2mZiYmKZH7Vr1/ba1zdRUVHmpk2bSix75plnzKioKHP58uVe/douJCTE3LZtm/u2n5+fuXr1avftDRs2mNHR0Z4orcLNmDHDrFu3rvnuu++WWO5rr+3q1atnLl68uMT6P/74w4yPj/dEaRWu+GvZuLg4848//iixfvPmzWZQUJAnSqtwJ762+/XXX83Ro0ebERERZlBQkDl06FBz/vz5Hqyw4px4HOvn51fiODY5OdkMDg72RGkVLjAwsMRrO5fLZfr5+Zl79+41TdM0Fy9ebMbExHiouooXGBhobtiw4aTrN2zY4LWv7c6W5rT1Io0bN2b8+PEsXLiwzI+33nrL0yVWmICAAI4ePVpi2bBhw3j77be57rrr+OKLLzxTWCUIDg4mKyvLfTsmJobQ0NAS23jrJP7dunVjxYoVbN68ma5du7J161ZPl1SpMjIySE9PJzAwkICAgBLrAgMDyc7O9lBlFWv48OE89NBDHD16lBtuuIEnnniCzMxMALKzs3nsscfo1q2bh6usGCe+Ex0fH88DDzzA5s2bmT9/Po0aNWLs2LEeqq5iNWnShG+//RawTpcPDAzkhx9+cK+fM2dOqZFZ3iIxMbHE2RJ//PEHNpvNfSpZjRo1KCgo8FR5FS4xMZHvvvvupOu/++47EhISKrGiypOQkMBLL71EcnJymR+n+rl4g9zc3BK3//nPf/Lggw/Sp08fli5d6qGqKp6fn1+JOXwDAgJKvLYLCAjw2vkvhwwZwk8//cQ777zDVVddxZEjRzxdUqUxDMP9PG+z2YiIiCixPjIy0mt/HoMHD+app57C6XQyaNAgJk2aVGIO21dffZW2bdt6rsBK1KlTJ/73v/+xd+9eJk2axK5du9xnFXmb+vXru0dS/vbbbxiGwfLly93rf/31V+rUqeOp8ipUnTp12LRpk/v21q1bcblcREdHA1C3bl338Y03qlWrVonf9YmWL1/ufp3r67x/Ejwf0rFjR1asWMH1119f5nrDMLx2Ave2bduycOHCUvP/DBkyBNM0GTlypIcqq3jNmzdnzZo17tOiT7xA0caNG0lMTPRAZZUjIiKCGTNmMGXKFC644AIef/zxUo0tb9W0aVPAOoXk999/p127du5169ato3bt2p4qrUI9+uijrF27loYNG9KxY0d++ukn4uLiqFOnDnv37iU6Opq5c+d6uswKcarH8IsvvpiLL77Yay9c8I9//IORI0fy8ssvs2vXLj744APGjRvHr7/+is1mY+bMmbz44oueLrNCjBkzhltuuYXffvuNwMBA3n77bW644QbsdjtgHdQUPR54oyeeeIJhw4axaNGiMuc9mz17NtOnT/dwlRWjQ4cOrFixgmuvvbbM9d782q5169YsXbq01Pyd9957Ly6Xi6FDh3qosorXuHFjNm7cSLNmzQBrGoyiuVzBOrivW7eup8qrcImJiSxevJjHH3+cNm3a8NZbb/nEazvTNGnatCmGYZCZmcmaNWtK/P1v2bLFa+e3fPrpp+nVqxfNmzenS5cufPrpp8ydO5emTZuyZcsWUlNTmTNnjqfLrFTBwcHceOON3HjjjWzevNnT5VSI22+/nRtvvJG3336bFStW8N///pcHH3yQjRs3YrPZeOONN5gwYYKny6wQI0aM4JZbbuGhhx4iICCAF198kYEDB+Lv7w9Y12jx1sEIYD2Xjx49mhUrVtCzZ89Sr+3eeust/vvf/3q4yqpBTVsv8sILL5zyiult2rTB5XJVYkWV54477mDx4sVlrhs6dCimaXrtSONnn32WkJCQk67fuXMnt912WyVW5BmjRo3iggsuYPjw4V47sri4hQsXlrgdHx9f4nZycrJXXnwOwN/fn6+++orZs2fzzTffYLfbcblcxMfH061bN4YNG3bK/4nqbOTIkQQFBZ1ym/Dw8EqqpnINHz6cxMREfvnlF7p06ULXrl1p2bIl//nPf8jOzubNN9/02jfoxowZg81m44MPPiAvL48bb7yRhx9+2L2+U6dOXtu0BLjmmmuoU6cOEydO5IUXXmD//v2ANUqjS5cuLFq0iC5duni4yorxxBNPnPKsiZYtW5KcnFyJFVWeESNG8OOPP7rn+yvuvvvuwzRNJk+e7IHKKt6DDz5YYh7LEx/Xf//995M28r2FzWbj8ccfp3fv3owYMcIr5y4+0YlzVzZu3LjE7V9++YXBgwdXZkmVJiIigqVLl/LOO+/wzTffkJiYiMvlIj8/n6FDh3LHHXd47RsVF110kbtRdzLe+sbs3XffTWxsLMuWLeOmm25i6NCh7jmNs7OzGT9+PA899JCny6wQDz74IFlZWTz55JPk5eXRt29fXnnlFff6OnXq8MYbb3iwwoo1ZswYatasyUsvvcSkSZPcj/F2u50OHTowdepUr3+eO1O6EJmIeBWXy0VGRgbh4eE+MSpDRERExJtlZmaydetWWrRocdrmloiIVC8FBQWkpKQAULNmTfz8/DxcUdWipq2IiIiIiIiIiIhIFaILkYmIiIhItbVhwwYaNmzo6TI8QtmV3dcou7L7GmVXdm+1evVq/v3vfzNp0iT3SNsi6enp3HTTTR6qrGpR01ZEREREqq38/Hx27Njh6TI8QtmV3dcou7L7GmVXdm/0ww8/0KlTJz766COeffZZmjdvXuKaLTk5Obz33nserLDq0IXIRERERKTKuueee065/tChQ5VUSeVT9pNTdu+k7Cen7N5J2U9O2b3XY489xr333stTTz2FaZo8//zzDBw4kE8//ZR+/fp5urwqRXPaioiIiEiVZbfbadu2LeHh4WWuz8zM5I8//vDKq8sru7KXRdmV3dsou7KXRdm9MztAREQEf/zxB40aNXIvmz59OqNHj+ajjz7ivPPOo3bt2l6b/2xopK0XcjqdTJ06lfnz53Pw4EFcLleJ9QsWLPBQZRVP2ZVd2ZW9iLJ7J2X3veyNGzdm/PjxXH/99WWuX7VqFR06dKjkqiqHsit7WZRd2b2Nsit7WZTdO7MDBAQEcPTo0RLLhg0bhs1m47rrruOFF17wTGFVkJq2XmjcuHFMnTqVSy+9lNatW2MYhqdLqjTKruzKruy+QNmV3Zeyd+zYkRUrVpz0wMYwDLz1xDFlV/ayKLuyextlV/ayKLt3Zgdo27YtCxcuLNWYHjJkCKZpMnLkSA9VVvVoegQvVLNmTaZNm8aAAQM8XUqlU3Zl9zXKruy+Rtl9L/v+/fvJy8sjISHB06VUOmVXdl+j7Mrua5Rd2X3RF198weLFi3nppZfKXD99+nTeeuutEhcn81UaaeuF/P39ady4safL8AhlV3Zfo+zK7muU3fey16pVy9MleIyy+yZl903K7puU3Tf5cnaAwYMHM3jw4JOuHzZsGMOGDavEiqoum6cLkPI3YcIEXnnlFa8eTn8yyq7svkbZld3XKLtvZhcRERER8TWaHsELDR48mIULF1KjRg1atWqFn59fifUzZ870UGUVT9mVXdmVvYiyeydl983sIiIiIiK+RtMjeKHIyMhTDjX3Zsqu7L5G2ZXd1yi7b2YXEREREfE1GmkrIiIiIiIiIiIiUoVopK0XO3ToEJs2bQKgWbNmxMTEeLiiyqPsyq7syu4LlF3ZfS27iIiIiIivUNPWC2VlZXHXXXcxbdo0XC4XAHa7nREjRvDqq68SHBzs4QorjrIru7Iru7Iru7fy5ewATqeTqVOnMn/+fA4ePOj+GRRZsGCBhyqreMqu7Mqu7EWU3Tspu7L7WnZQ/jNh83QBUv7uuecefvzxR7755huOHj3K0aNH+eqrr/jxxx+ZMGGCp8urUMqu7Mqu7Mqu7N7Kl7MDjBs3jnHjxuF0OmndujVt2rQp8eHNlF3ZlV3ZlV3ZvZWy+2Z2UP4zYorXiY6ONhcuXFhq+YIFC8yaNWtWfkGVSNkXllqu7MrurZR9Yanlyq7s3iw6Otr87rvvPF2GRyi7svsaZVd2X6Psyu6LfD3/mdBIWy+UnZ1NXFxcqeWxsbFkZ2d7oKLKo+zKXpyyK7u3UnZlL84XsgP4+/vTuHFjT5fhEcqu7L5G2ZXd1yi7svsiX89/JtS09UJdunTh0UcfJTc3170sJyeHxx9/nC5duniwsoqn7MpeRNmV3Zspu7IX8ZXsABMmTOCVV17BNE1Pl1LplF3ZfY2yK7uvUXZl90W+nv9MGKZ+Ol5n7dq19O3bl7y8PPc8IKtXryYwMJA5c+bQqlUrD1dYcZRd2ZVd2ZVd2b2VL2cHGDx4MAsXLqRGjRq0atUKPz+/EutnzpzpocoqnrIru7IrexFl907Kruy+lh2U/0yoaeulsrOz+fDDD9m4cSMALVq0YPjw4QQFBXm4soqn7MoOyq7syu7NlN03s48aNeqU66dMmVJJlVQ+ZT85ZfdOyn5yyu6dlP3klN17+Xr+M6GmrYiIiIiIiIiIiEgV4vB0AVI+vv76a/r374+fnx9ff/31KbcdOHBgJVVVOZRd2ZX95JTdeyi7svta9pM5dOgQmzZtAqBZs2bExMR4uKLKo+zKruzK7guUXdmV3Xeyg/KfkilewTAM88CBA+6vT/Zhs9k8XGn5U3ZlV3ZlV3ZlV3bvy36izMxMc9SoUabdbnfndjgc5k033WRmZWV5urwKpezKruzKruzK7q2U3Tezm6bynwmbp5vGUj5cLhexsbHur0/24XQ6PVxp+VN2ZVd2ZVd2ZVd278t+onvuuYcff/yRb775hqNHj3L06FG++uorfvzxRyZMmODp8iqUsiu7siu7siu7t1J238wOyn9GPN01lvL33nvvmbm5uaWW5+Xlme+9954HKqo8yq7sxSm7snsrZVf24nwhu2maZnR0tLlw4cJSyxcsWGDWrFmz8guqRMq+sNRyZVd2b6XsC0stV3Zl91a+nN00lf9MaKStFxo1ahRpaWmllmdkZJz26nzVnbIre3HKruzeStmVvThfyA6QnZ1NXFxcqeWxsbFkZ2d7oKLKo+zKXpyyK7u3UnZlL07ZvTs7KP+ZUNPWC5mmiWEYpZbv3r2biIgID1RUeZRd2YtTdmX3Vsqu7MX5QnaALl268Oijj5Kbm+telpOTw+OPP06XLl08WFnFU3ZlL6Lsyu7NlF3Ziyi792cH5T8TDk8XIOWnXbt2GIaBYRj07NkTh+P4r9fpdJKcnEy/fv08WGHFUXZlV3ZlB2VXdmX3Zq+88gp9+/albt26tGnTBoDVq1cTGBjInDlzPFxdxVJ2ZVd2ZVd2ZfdWyu6b2UH5z4Satl7kiiuuAGDVqlX07duX0NBQ9zp/f38SExO56qqrPFRdxVJ2ZVd2ZQdlV3Zl92atW7cmKSmJDz/8kI0bNwIwdOhQhg8fTlBQkIerq1jKruzKruzKruzeStl9Mzso/5kwTNM0PV2ElK/33nuPIUOGEBAQ4OlSKp2yK7uvUXZl9zXK7pvZRURERER8jZq2XmjXrl0YhkHdunUBWL58OdOnT6dly5aMHj3aw9VVLGVXdlB2ZVd2b6bsvpX966+/pn///vj5+fH111+fctuBAwdWUlWVQ9mVXdlPTtm9h7Iru7KfnLdlB+U/a6Z4nQsuuMCcNm2aaZqmuW/fPjMsLMzs0qWLWbNmTfPxxx/3cHUVS9mVXdmVXdmV3Vv5YnbDMMwDBw64vz7Zh81m83Cl5U/ZlV3ZlV3ZlV3Zld3b+Hr+s2XzdNNYyt/atWvp1KkTAJ988gnnnHMOS5cu5cMPP2Tq1KmeLa6CKbuyK7uyK/tUzxZXwZTdt7K7XC5iY2PdX5/sw+l0erjS8qfsyq7syq7syq7syu5tfD3/2VLT1gsVFBS457ubN2+ee0h58+bN2bdvnydLq3DKruzKruzKruzeypezA0ybNo28vLxSy/Pz85k2bZoHKqo8yq7sxSm7snsrZVf24pTdu7OD8p8RTw/1lfLXqVMn8/777zcXL15sBgYGmqtWrTJN0zSXLVtm1qlTx8PVVSxlV3ZlV3ZlV3Zv5cvZTdM0bTab+3S64lJSUrz+FDplV/bilF3ZvZWyK3txyu7d2U1T+c+ERtp6oWeffZb//e9/XHzxxQwdOpQ2bdoA1oTPRadVeitlV3ZlV3ZlV3Zv5cvZAUzTxDCMUst3795NRESEByqqPMqu7MUpu7J7K2VX9uKU3buzg/KfCYenC5Dyd/HFF5OSkkJ6ejpRUVHu5aNHjyY4ONiDlVU8ZVd2Zbcou7J7K2X3vezt2rXDMAwMw6Bnz544HMdfvjqdTpKTk+nXr58HK6w4yq7syq7soOzKruzexpezg/KfDTVtvZRpmqxYsYKtW7cybNgwwsLC8Pf39+qDuiLKruzKruzK7t2U3beyX3HFFQCsWrWKvn37Ehoa6l7n7+9PYmIiV111lYeqq1jKruzKruyg7Mqu7N7Gl7OD8p+Vyp2NQSrD9u3bzebNm5vBwcGm3W43t27dapqmaY4dO9a87bbbPFxdxVJ2ZVd2ZVd2ZfdWvpzdNE1z6tSpZm5urqfL8AhlV3Zfo+zK7muUXdl9ka/nPxOa09YLjRs3jo4dO3LkyBGCgoLcywcPHsz8+fM9WFnFU3ZlV3aLsiu7t1J238wO0KNHDw4dOuS+vXz5cu6++27efPNND1ZVOZRd2UHZlV3ZvZmyKzv4VnZQ/jPi6a6xlL8aNWqYGzduNE3TNENDQ90jcZKTk82goCBPllbhlF3ZlV3ZlV3ZvZUvZzdN07zgggvMadOmmaZpmvv27TPDwsLMLl26mDVr1jQff/xxD1dXsZRd2ZVd2ZVd2b2VsvtmdtNU/jOhkbZeyOVy4XQ6Sy3fvXs3YWFhHqio8ii7shen7MrurZRd2YvzhewAa9eupVOnTgB88sknnHPOOSxdupQPP/yQqVOnera4Cqbsyq7syq7sUz1bXAVTdmX3teyg/GdCTVsv1KdPH15++WX3bcMwyMzM5NFHH2XAgAGeK6wSKPvL7tvKruzK7r2U/WX3bWX3jewABQUFBAQEADBv3jwGDhwIQPPmzdm3b58nS6twyq7syq7syq7s3krZfTM7KP8Z8fRQXyl/u3btMlu2bGm2aNHCdDgcZufOnc3o6GizWbNm5oEDBzxdXoVSdmVXdmVXdmX3Vr6c3TRNs1OnTub9999vLl682AwMDDRXrVplmqZpLlu2zKxTp46Hq6tYyq7syq7syq7s3krZfTO7aSr/mTBM0zQ93TiW8ldYWMhHH33EmjVryMzMpH379gwfPrzEhUu8lbIru7Iru7J7N2X3zeyLFi1i8ODBpKenM3LkSN59910AHnzwQTZu3MjMmTM9XGHFUXZlV3ZlV3Zl91bK7pvZQfnPhJq2IiIiIlItOJ1O0tPTiYqKci/bvn07wcHBxMbGerCyiqfsyl5E2ZXdmym7shdRdu/PDsp/Og5PFyDlb9q0aadcP2LEiEqqpPIp+8kpu3dS9pNTdu+k7CfnzdmLmKbJihUr2Lp1K8OGDSMsLAx/f3+Cg4M9XVqFU3ZlV3ZlV3bvpuzK7mvZQflPRyNtvVDxdyjAmtw5Ozvb/YefmprqocoqnrIfp+zKruzK7o2U/Thfyg6wY8cO+vXrx86dO8nLy2Pz5s00bNiQcePGkZeXx+TJkz1dYoVRdmVXdmVXdmX3Vsrum9lB+c+EzdMFSPk7cuRIiY/MzEw2bdrEBRdcwIwZMzxdXoVSdmVXdmVXdmX3Vr6cHWDcuHF07NiRI0eOlJjDd/DgwcyfP9+DlVU8ZVd2Zbcou7J7K2VXdl/LDsp/Rir90mfiMb/99pvZrFkzT5fhEcqu7L5G2ZXd1yi792evUaOGuXHjRtM0TTM0NNTcunWraZqmmZycbAYFBXmytAqn7Mqu7Mqu7MrurZTdN7ObpvKfCY209SEOh4O9e/d6ugyPUHZl9zXKruy+Rtm9P7vL5cLpdJZavnv3bsLCwjxQUeVRdmUvTtmV3Vspu7IXp+zenR2U/0zoQmRe6Ouvvy5x2zRN9u3bx2uvvUa3bt08VFXlUPbjlF3Zld17Kftxyu4b2QH69OnDyy+/zJtvvgmAYRhkZmby6KOPMmDAAA9XV7GUXdlB2ZVd2b2Zsis7+FZ2UP4zoQuReSGbreQAasMwiImJoUePHrzwwgvEx8d7qLKKp+zHKbuyK7uyeyNlP86XsoM16qJv376YpklSUhIdO3YkKSmJmjVrsnjxYmJjYz1dYoVRdmVXdmVXdmX3Vsrum9lB+c+EmrYiIiIiUi0UFhby0UcfsWbNGjIzM2nfvj3Dhw8vcfEKb6Xsyq7syq7s3k3Zld3XsoPyn46atl4sJSUFf39/wsPDPV1KpVN2Zfc1yq7svkbZfTO7iIiIiIiv0Jy2Xubo0aM89NBDfPzxxxw5cgSAmJgYRo0axcMPP0xwcLCHK6w4yq7syq7syq7s3sqXsxeZNm3aKdePGDGikiqpfMp+csrunZT95JTdOyn7ySm79/L1/GdCI229SGpqKl26dGHPnj0MHz6cFi1aALB+/XqmT59O8+bNWbJkCWvWrOGXX35h7NixHq64/Ci7siu7siu7siu792UvLioqqsTtgoICsrOz8ff3Jzg4mNTUVA9VVvGU/ThlV3ZlV3ZvpOzHKbtvZAflPyOmeI1x48aZrVu3Nvfv319q3b59+8xzzjnHvPrqq83w8HBz6tSpHqiw4ii7sp9I2ZVd2ZXdW/hy9tPZvHmz2bNnT3P27NmeLqXSKbuy+xplV3Zfo+zK7ot8Pf+J1LT1IgkJCaf8w/7+++9NwzDMxx57rBKrqhzKruxlUXZl9zbKruxl8ebsZ+K3334zmzVr5ukyPELZld3XKLuy+xplV3Zf5Ov5i7N5eqSvlJ99+/bRqlWrk65v3bo1NpuNRx99tBKrqhzKruxlUXZl9zbKruxl8ebsZ8LhcLB3715Pl+ERyq7svkbZld3XKLuy+yJfz1+cLkTmRWrWrMn27dupW7dumeuTk5OJjY2t5Koqh7Ire1mUXdm9jbIre1m8OXtxX3/9dYnbpmmyb98+XnvtNbp16+ahqiqHsh+n7Mqu7N5L2Y9TdmX39uyg/GdCFyLzIjfddBNbt25l7ty5+Pv7l1iXl5dH3759adiwIe+++66HKqw4yq7syn6csiu7snsXX85enM1W8gQxwzCIiYmhR48evPDCC8THx3uosoqn7Mcpu7Iru7J7I2U/Ttl9Izso/5lQ09aL7N69m44dOxIQEMCYMWNo3rw5pmmyYcMGJk2aRF5eHr/99hv169f3dKnlTtmVXdmVXdmVXdm9L7uIiIiIiM+qvOlzpTJs27bN7Nevn2mz2UzDMEzDMEybzWb27dvXTEpK8nR5FUrZlV3ZlV3Zld1b+XL2Ex06dMhMS0vzdBkeoezK7muUXdl9jbIruy/y9fynoqatl0pNTTV//fVX89dffzUPHz7s6XIqlbIru7L7DmVXdmX3DUeOHDHvvPNOMzo62rTZbKbNZjPj4uLMf/7zn2ZWVpany6tQyq7syq7syq7s3krZfTO7aSr/mdL0CCIiIiJSZaWmptKlSxf27NnD8OHDadGiBQDr169n+vTpNG/enCVLlrBmzRp++eUXxo4d6+GKy4+yK7uyK7uyK7uyK7s3ZQflPyue7hqLiIiIiJzMuHHjzNatW5v79+8vtW7fvn3mOeecY1599dVmeHi4OXXqVA9UWHGUXdlPpOzKruzK7i2U3Tezm6bynw01bUVERESkykpISDBnz5590vXff/+9aRiG+dhjj1ViVZVD2ZW9LMqu7N5G2ZW9LMrundlNU/nPhqZHEBEREZEqKyAggK1bt1K3bt0y1+/evZvExEQKCwsrubKKp+zKXhZlV3Zvo+zKXhZl987soPxnw+bpAkRERERETqZmzZps3779pOuTk5OJjY2tvIIqkbJvP+l6ZVd2b6Ps20+6XtmV3dv4cnZQ/rOhpq2IiIiIVFl9+/bloYceIj8/v9S6vLw8Hn74Yfr16+eByiqesiv7iZRd2b2Rsiv7iZTde7OD8p8NTY8gIiIiIlXW7t276dixIwEBAYwZM4bmzZtjmiYbNmxg0qRJ5OXl8dtvv1G/fn1Pl1rulF3ZlV3ZlV3ZlV3ZvY2v5z8rHphHV0RERETkjG3bts3s16+fabPZTMMwTMMwTJvNZvbt29dMSkrydHkVStmVXdmVXdmV3Vspu29mN03lP1MaaSsiIiIi1cKRI0dISkoCoHHjxtSoUcPDFVUeZVd2ZVd2X6Dsyq7svpMdlP901LQVERERERERERERqUJ0ITIRERERERERERGRKkRNWxEREREREREREZEqRE1bERERERERERERkSpETVsRERERERERERGRKkRNWxERERGRcmQYBl9++aWnyxARERGRakxNWxERERGpdnbt2sVNN91E7dq18ff3JyEhgXHjxnH48OFKq+Gxxx6jbdu2pZbv27eP/v37V1odIiIiIuJ91LQVERERkWpl27ZtdOzYkaSkJGbMmMGWLVuYPHky8+fPp0uXLqSmpnq0vlq1ahEQEODRGkRERESkelPTVkRERESqlTFjxuDv788PP/zARRddRP369enfvz/z5s1jz549PPTQQ0DZ0xRERkYydepU9+1du3Zx7bXXEhkZSY0aNRg0aBDbt293r1+0aBGdOnUiJCSEyMhIunXrxo4dO5g6dSqPP/44q1evxjAMDMNw7/fE7/vnn3/So0cPgoKCiI6OZvTo0WRmZrrX33jjjVxxxRX897//JT4+nujoaMaMGUNBQYF7m0mTJtGkSRMCAwOJi4vj6quvLrefp4iIiIhUPWraioiIiEi1kZqaypw5c7jzzjsJCgoqsa5WrVoMHz6cjz/+GNM0T7uvgoIC+vbtS1hYGD/99BM///wzoaGh9OvXj/z8fAoLC7niiiu46KKLWLNmDcuWLWP06NEYhsF1113HhAkTaNWqFfv27WPfvn1cd911pb5HVlYWffv2JSoqit9++41PP/2UefPm8X//938ltlu4cCFbt25l4cKFvPfee0ydOtXdBP79998ZO3YsTzzxBJs2bWL27Nl07979r/8QRURERKTKc3i6ABERERGRM5WUlIRpmrRo0aLM9S1atODIkSMcOnTotPv6+OOPcblcvP322xiGAcCUKVOIjIxk0aJFdOzYkbS0NC677DIaNWrk3n+R0NBQHA4HtWrVOun3mD59Orm5uUybNo2QkBAAXnvtNS6//HKeffZZ4uLiAIiKiuK1117DbrfTvHlzLr30UubPn8+tt97Kzp07CQkJ4bLLLiMsLIyEhATatWt3Zj8wEREREamWNNJWRERERKqd042k9ff3P+0+Vq9ezZYtWwgLCyM0NJTQ0FBq1KhBbm4uW7dupUaNGtx444307duXyy+/nFdeeYV9+/adVZ0bNmygTZs27oYtQLdu3XC5XGzatMm9rFWrVtjtdvft+Ph4Dh48CEDv3r1JSEigYcOG3HDDDXz44YdkZ2efVR0iIiIiUr2oaSsiIiIi1Ubjxo0xDIMNGzaUuX7Dhg3ExMQQGRmJYRilmrvF54nNzMykQ4cOrFq1qsTH5s2bGTZsGGCNvF22bBldu3bl448/pmnTpvzyyy/lnsvPz6/EbcMwcLlcAISFhfHHH38wY8YM4uPjeeSRR2jTpg1Hjx4t9zpEREREpGpQ01ZEREREqo3o6Gh69+7NpEmTyMnJKbFu//79fPjhh9x4440AxMTElBgZm5SUVGKEavv27UlKSiI2NpbGjRuX+IiIiHBv165dOx544AGWLl1K69atmT59OmCN5nU6naest0WLFqxevZqsrCz3sp9//hmbzUazZs3OOLfD4aBXr14899xzrFmzhu3bt7NgwYIzvr+IiIiIVC9q2oqIiIhItfLaa6+Rl5dH3759Wbx4Mbt27WL27Nn07t2bpk2b8sgjjwDQo0cPXnvtNVauXMnvv//O7bffXmJE6/Dhw6lZsyaDBg3ip59+Ijk5mUWLFjF27Fh2795NcnIyDzzwAMuWLWPHjh388MMPJCUluee1TUxMJDk5mVWrVpGSkkJeXl6pWocPH05gYCAjR45k7dq1LFy4kLvuuosbbrjBPZ/t6Xz77bdMnDiRVatWsWPHDqZNm4bL5Tqrpq+IiIiIVC9q2oqIiIhItdKkSRN+++03GjZsyLXXXktCQgL9+/enadOm/Pzzz4SGhgLwwgsvUK9ePS688EKGDRvGvffeS3BwsHs/wcHBLF68mPr163PllVfSokULbr75ZnJzcwkPDyc4OJiNGzdy1VVX0bRpU0aPHs2YMWO47bbbALjqqqvo168fl1xyCTExMcyYMaNUrcHBwcyZM4fU1FTOO+88rr76anr27Mlrr712xnkjIyOZOXMmPXr0oEWLFkyePJkZM2bQqlWrv/mTFBEREZGqyjBPdxUHEREREZEq7tFHH+XFF19k7ty5dO7c2dPliIiIiIj8LWraioiIiIhXmDJlCmlpaYwdOxabTSeUiYiIiEj1paatiIiIiIiIiIiISBWiIQgiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiItXQ9u3bMQyDqVOnerqUKm/RokUYhsGiRYs8XcpJGYbBY489dtb3q+5/B9XhdyMiIiLiCWraioiIiFRBU6dOxTCMMj/++c9/ero8nn76ab788ktPl1Guiv/MlyxZUmq9aZrUq1cPwzC47LLLPFDhX1fUHDUMgw8++KDMbbp164ZhGLRu3fovfY/p06fz8ssv/40qRURERKSIw9MFiIiIiMjJPfHEEzRo0KDEstatW5OQkEBOTg5+fn4eqevpp5/m6quv5oorrvDI969IgYGBTJ8+nQsuuKDE8h9//JHdu3cTEBDgocr+vqJs119/fYnl27dvZ+nSpQQGBv7lfU+fPp21a9dy9913n/F9unfvTk5ODv7+/n/5+4qIiIh4IzVtRURERKqw/v3707FjxzLX/Z0Gm5zcgAED+PTTT5k4cSIOx/GXy9OnT6dDhw6kpKR4sLq/Z8CAAXz99dekpKRQs2ZN9/Lp06cTFxdHkyZNOHLkSIXXkZubi7+/PzabTX/HIiIiImXQ9AgiIiIi1VBZc5neeOONhIaGsmfPHq644gpCQ0OJiYnh3nvvxel0lri/y+Xi5ZdfplWrVgQGBhIXF8dtt912Rg07wzDIysrivffec59yf+ONN7prSExMLHWfxx57DMMwSu3n//7v//jyyy9p3bo1AQEBtGrVitmzZ5e6/549e7jpppuIi4tzb/fuu++W2m737t1cccUVhISEEBsby/jx48nLyzttpuKGDh3K4cOHmTt3rntZfn4+n332GcOGDSvzPllZWUyYMIF69eoREBBAs2bN+O9//4tpmiW2y8vLY/z48cTExBAWFsbAgQPZvXt3mfs808xnY9CgQQQEBPDpp5+WWD59+nSuvfZa7HZ7mff74IMP6NChA0FBQdSoUYMhQ4awa9cu9/qLL76Y7777jh07drj/Jor+DoqmZvjoo4/417/+RZ06dQgODiY9Pf2kc9r++uuvDBgwgKioKEJCQjj33HN55ZVX3Ov379/PqFGjqFu3LgEBAcTHxzNo0CC2b9/+t34+IiIiIlWFRtqKiIiIVGFpaWmlRnYWHyF5IqfTSd++fTn//PP573//y7x583jhhRdo1KgRd9xxh3u72267jalTpzJq1CjGjh1LcnIyr732GitXruTnn38+5bQL77//PrfccgudOnVi9OjRADRq1Ogv5VuyZAkzZ87kzjvvJCwsjIkTJ3LVVVexc+dOoqOjAThw4ACdO3d2N3ljYmL4/vvvufnmm0lPT3efjp+Tk0PPnj3ZuXMnY8eOpXbt2rz//vssWLDgrGpKTEykS5cuzJgxg/79+wPw/fffk5aWxpAhQ5g4cWKJ7U3TZODAgSxcuJCbb76Ztm3bMmfOHP7xj3+wZ88eXnrpJfe2t9xyCx988AHDhg2ja9euLFiwgEsvvbRUDWea+WwFBwczaNAgZsyY4f57WL16NevWrePtt99mzZo1pe7z1FNP8fDDD3Pttddyyy23cOjQIV599VW6d+/OypUriYyM5KGHHiItLY3du3e784aGhpbYz5NPPom/vz/33nsveXl5J50SYe7cuVx22WXEx8czbtw4atWqxYYNG/j2228ZN24cAFdddRXr1q3jrrvuIjExkYMHDzJ37lx27txZ5psGIiIiItWOKSIiIiJVzpQpU0ygzA/TNM3k5GQTMKdMmeK+z8iRI03AfOKJJ0rsq127dmaHDh3ct3/66ScTMD/88MMS282ePbvM5WUJCQkxR44cWWr5yJEjzYSEhFLLH330UfPEl56A6e/vb27ZssW9bPXq1SZgvvrqq+5lN998sxkfH2+mpKSUuP+QIUPMiIgIMzs72zRN03z55ZdNwPzkk0/c22RlZZmNGzc2AXPhwoWnzFT0M//tt9/M1157zQwLC3Pv+5prrjEvueQS0zRNMyEhwbz00kvd9/vyyy9NwPz3v/9dYn9XX321aRiGO9+qVatMwLzzzjtLbDds2DATMB999NGzzlzW30FZFi5caALmp59+an777bemYRjmzp07TdM0zX/84x9mw4YNTdM0zYsuushs1aqV+37bt2837Xa7+dRTT5XY359//mk6HI4Syy+99NIyf/dF37thw4buuk9cV/S7KSwsNBs0aGAmJCSYR44cKbGty+UyTdM0jxw5YgLm888/f8rMIiIiItWZpkcQERERqcJef/115s6dW+LjdG6//fYSty+88EK2bdvmvv3pp58SERFB7969SUlJcX906NCB0NBQFi5cWO45TqZXr14lRumee+65hIeHu+s1TZPPP/+cyy+/HNM0S9Tbt29f0tLS+OOPPwCYNWsW8fHxXH311e79BQcHu0cDn41rr72WnJwcvv32WzIyMvj2229POjXCrFmzsNvtjB07tsTyCRMmYJom33//vXs7oNR2J46aPZvMf0WfPn2oUaMGH330EaZp8tFHHzF06NAyt505cyYul4trr722RB21atWiSZMmZ/W3MnLkSIKCgk65zcqVK0lOTubuu+8mMjKyxLqi6TWCgoLw9/dn0aJFlTL/roiIiIgnaHoEERERkSqsU6dOJ70QWVkCAwOJiYkpsSwqKqpEcyspKYm0tDRiY2PL3MfBgwcBa2qGnJwc93J/f39q1KhxNuWfVv369UstK17voUOHOHr0KG+++SZvvvnmKevdsWMHjRs3LjV3brNmzc66rpiYGHr16sX06dPJzs7G6XSWaAYXt2PHDmrXrk1YWFiJ5S1atHCvL/pss9lKTSVxYn1nk/mv8PPz45prrmH69Ol06tSJXbt2nbQhnZSUhGmaNGnS5KT7OlMNGjQ47TZbt24FoHXr1ifdJiAggGeffZYJEyYQFxdH586dueyyyxgxYgS1atU643pEREREqjI1bUVERES8yMkuJFWcy+UiNjaWDz/8sMz1RU3fcePG8d5777mXX3TRRaUuGHWiExumRU68ENrp6jWPXcDL5XIBcP311zNy5Mgytz333HNPWdNfNWzYMG699Vb2799P//79S438rCiVkXnYsGFMnjyZxx57jDZt2tCyZcuT1mIYBt9//32Zv6sT5609ldONsj0bd999N5dffjlffvklc+bM4eGHH+aZZ55hwYIFtGvXrty+j4iIiIinqGkrIiIi4mMaNWrEvHnz6Nat2ykbaffddx/XX3+9+3ZUVJT765M1Z6Oiojh69Gip5UWjTc9WTEwMYWFhOJ1OevXqdcptExISWLt2LaZplqhv06ZNf+l7Dx48mNtuu41ffvmFjz/++JTfd968eWRkZJQYbbtx40b3+qLPLpeLrVu3lhhde2J9Z5P5r7rggguoX78+ixYt4tlnnz3pdo0aNcI0TRo0aEDTpk1Puc+T/U2cjaJRyGvXrj1t9kaNGjFhwgQmTJhAUlISbdu25YUXXuCDDz7423WIiIiIeJrmtBURERHxMddeey1Op5Mnn3yy1LrCwkJ307Vly5b06tXL/dGhQwf3diEhIWU2Zxs1akRaWhpr1qxxL9u3bx9ffPHFX6rVbrdz1VVX8fnnn7N27dpS6w8dOuT+esCAAezdu5fPPvvMvSw7O/ukUwycTmhoKG+88QaPPfYYl19++Um3GzBgAE6nk9dee63E8pdeegnDMOjfvz+A+/PEiRNLbPfyyy+XuH02mf8qwzCYOHEijz76KDfccMNJt7vyyiux2+08/vjj7tHPRUzT5PDhw+7bISEhpKWl/a262rdvT4MGDXj55ZdL/X0Vff/s7Gxyc3NLrGvUqBFhYWHk5eX9re8vIiIiUlVopK2IiIiIj7nooou47bbbeOaZZ1i1ahV9+vTBz8+PpKQkPv30U1555ZWTzt9apEOHDsybN48XX3yR2rVr06BBA84//3yGDBnC/fffz+DBgxk7dizZ2dm88cYbNG3a9C9fPOs///kPCxcu5Pzzz+fWW2+lZcuWpKam8scffzBv3jxSU1MBuPXWW3nttdcYMWIEK1asID4+nvfff5/g4OC/9H2Bk05PUNzll1/OJZdcwkMPPcT27dtp06YNP/zwA1999RV33323e/Ro27ZtGTp0KJMmTSItLY2uXbsyf/58tmzZ8pcz/x2DBg1i0KBBp9ymUaNG/Pvf/+aBBx5g+/btXHHFFYSFhZGcnMwXX3zB6NGjuffeewHrb+Ljjz/mnnvu4bzzziM0NPSUze6y2Gw23njjDS6//HLatm3LqFGjiI+PZ+PGjaxbt445c+awefNmevbsybXXXkvLli1xOBx88cUXHDhwgCFDhvzln4eIiIhIVaKmrYiIiIgPmjx5Mh06dOB///sfDz74IA6Hg8TERK6//nq6det22vu/+OKLjB49mn/961/k5OQwcuRIzj//fKKjo/niiy+45557uO+++2jQoAHPPPMMSUlJf7lpGxcXx/Lly3niiSeYOXMmkyZNIjo6mlatWpU4tT84OJj58+dz11138eqrrxIcHMzw4cPp378//fr1+0vf+0zYbDa+/vprHnnkET7++GOmTJlCYmIizz//PBMmTCix7bvvvktMTAwffvghX375JT169OC7776jXr16fylzZfjnP/9J06ZNeemll3j88ccBqFevHn369GHgwIHu7e68805WrVrFlClTeOmll0hISDjrpi1A3759WbhwIY8//jgvvPACLpeLRo0aceutt7q/99ChQ5k/fz7vv/8+DoeD5s2b88knn3DVVVeVT2gRERERDzPME89zEhERERERERERERGP0Zy2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViJq2IiIiIiIiIiIiIlWImrYiIiIiIiIiIiIiVYiatiIiIiIiIiIiIiJViMPTBVQ2l8vF3r17CQsLwzAMT5cjIiIiIiIiIiIiPsI0TTIyMqhduzY228nH0/pc03bv3r3Uq1fP02WIiIiIiIiIiIiIj9q1axd169Y96Xqfa9qGhYUB1g8mPDzcw9WIiIiIiIiIiIiIr0hPT6devXruHuXJ+FzTtmhKhPDwcDVtRUREREREREREpNKdbtpWXYhMREREREREREREpApR01ZERERERERERESkClHTVkRERERERERERKQKUdNWREREREREREREpApR01ZERERERERERESkClHTVkRERERERERERKQKUdNWREREREREREREpApR01ZERERERERERESkClHTVkRERERERERERKQKUdNWREREREREREREpApR01ZERERERERERESkClHTVkRERERERERERKQKUdNWREREREREREREpArxaNN28eLFXH755dSuXRvDMPjyyy9Pe59FixbRvn17AgICaNy4MVOnTq3wOqubGz5/khu/eKrMdTd+8RQ3fP5kJVdUeQ5NuI5D9w0re919wzg04bpKrkgqy6yHRjLrkVFlr3tkFLMeGlnJFVWeWWMuZtZdPcted1dPZo25uHILqkQPTLmCh6ZcWea6h6ZcyQNTrqjcgirRpC+GMvmr68tcN/mr65n0xdBKrkgqw6RVk5i8enKZ6yavnsykVZMquSKpDL78OO/L2fU4L75If/c+auEz8ONzZa/78TlrvXgd/b+fnkebtllZWbRp04bXX3/9jLZPTk7m0ksv5ZJLLmHVqlXcfffd3HLLLcyZM6eCK61e7DYbK9I/KtW4vfGLp1iR/hF2m/cOsN6VlkfK1ytLNW4P3TeMlK9Xsistz0OVSYWz22jwyS+lGrezHhlFg09+Abv3/t1js9Ng7t5SB7Wz7upJg7l7wWb3UGEVz4aNr21JpRq3D025kq9tSdi8+IQSm83O60dXl3qhM/mr63n96GpsXvx792U2w8brq14v1bidvHoyr696HZvhvX/zPs2HH+d9Obse58UX6e/eR9nssPCp0o3bH5+zluv37pX0/356hmmapqeLADAMgy+++IIrrrjipNvcf//9fPfdd6xdu9a9bMiQIRw9epTZs2ef0fdJT08nIiKCtLQ0wsPD/27ZVVZRg7Z18LU813MC981/gbXZn3BO8LW8PuB+bDYDu83AbhjYbGA3rNuGYXi69L9l4vwkEl4fQ9P1u6h5eTtinn6HQw/dTMrXK9ncsh47xrzO2J5NPF2mVJCiBm3ytZ0Z8MQUvr3nGhrNWkvylecx4OlpABw5sp+slH3g54etZg33fV0ph6GgECMqEiMwAAAzNw/zyFH8/AOJS2jh3nbvjvW48vOoWasBgWGRAKRnpJC+fxfYbdhiY47v93Aq5BdgRIRjBAdZ+83Pxzx8BJvdQe2G57i3PbAniYLsTKJi6xESUROArOw0juzZBoaBrVbs8f0eTYOcXIywUIzQEJb/cyjN5h1kywU16PjyZ6x84HoazN1Lcs8Yzn/kNfKc+UT6hxHssGrIdeaRWpgNIcdrJfMAuJxl/3BtdgiNo1ZILasplL6PIzkp5DjzCPMLIcwvBIB8ZwEp+WkQVuv4fbMOgbOg7P0aBoTFExsci8PmgMyDpGUfIqswhxBHEBH+YQAUupwczD0M4bWL7fcwOPN46ft/Mduxh94FtRnV/RGmLH6CuX576eVM5D8jPiPAEQBZKWRkHyKjIIsgewBRARHWz9F0sT8nBUJrQdEbWtlHoDCn7HoBQuOICq5JkCMIslPJzj7E0fwM/G1+1AyMcm+2L/sgZkgM2P2sBblpkJ918v0GxxAZEkOwXzDkHCU3+xCpeWn42RzEBB7/W92fk4IrqAbvLNtN6v7nmGdLZkRwQ+69cib/+3Ykrx9dzUBXE2rW/Q/jezc9+feTaquoQXvHuXdwa5tbeefPd3h91euMaTuG29vc7unypIIUNSmTe8cz4MmpzHroehrMP8TWrpG0e/A5jFqNADALCzF3JIGrAFtcTff9XWkZkJNHeFQs4Q3aA5CXn8OhDcuwuQqo3eD489yBgzspyMzECA3CqG29bjJdLsztSVCYhxEX7X7NaGZkYWblEBIeSVTDTmCzke/M5+C6pRiFudROaIZhtw66Dh7aTX5GOgQFYKvT2H0Q7kreDAW5GDFR7m3NrBzMjCyCQsL49bWHaTB3H8m94zn37v/w+wv30GzBYZJ7xjDgqQ84kpdGjvPYG/PBMeAoetxNh/zMMn+eAXZ/oqObgcN6zt97eBPkZRAbGI3jWF1p+RlkFT0fBEe7tyU/09p3GfxsDmKim4Gf9Xy7/8g2XDmp1AyIwv/Y80FGQRYZBceeD4IiwS/Y+rogG3KOltjfhz8/ybTsbdwY3JAJAz9k8uzbef3oasZEtuH2QR+UWYNIdVfUsBkd3oq7rpjB5K9v0N+9Lyhq0F7yEFx0X+nb4pWK/t+L/r9PvO2tzrQ3Wa2att27d6d9+/a8/PLL7mVTpkzh7rvvJi0trcz75OXlkZd3fHRleno69erV8/qmLRxv3JqmHcNwkneoN/kpZZ9eVsQwONbINXC4m7pWQ9dmGNhtlFhW1Ow9vv7YOoMSy+wlmsTH71e0bdEyh73Yfkrtu2RtJfZTbH8/bjrI5V89RNP1uzBsJqbLUMPWhxQ1bgvs4Hes/xj05oskdu8PwIeT7qL9xHlsqAuP3uBw3+8/UwppuB+eutbG6kZW867NVhcPfeJib50ges7/w73td33b0nBHHs4n76H1NbcC8PknT9LykensqQHjbzu+3wc/ctI22eTVy2z8dI613wb7TJ6d6uRIuJ2uy4+/CTXzms60+DON9LHDOP/Oh608C9+iwR0vkhEIN48/vt//+9pJ93Um03rY+PZ8a7/Xz3cycLmJ0wC7Ccm9azOg/k7GhJosDg7iiUOHGZxpHSQuDgpkTLEm8Jn6ddivVlNxUlf+5drHV2GhjE89wk1pGQCs8/dnSJ1ap9lLad9f+T11w+rCe5fz4tE1TIkMZ2RaOvemHgVgr8NO33p1znq/WcljcBQk8IpjIgci/uSF6CgGZmTyVEoqADmGQafEeme93ytrP0zLyM50XP8f1h76kn/FRHNBdg5vHDjk3qZTQl1yzvLMhie6PsHgJoPhx+dY/MsLjKkVS8u8PD7ee8C9Tb+6tdnj5yh1X8M0MQ2Dga4mfLjpZu7p3VSPeV5s0qpJvLH6DfdtNWx9wJ+fMeulx2mwtND9HLerJtRLgXd725jd0Xq8iTti8upkJzn+MHLC8ceK279z0mONya7zC+jz3hYAVm/8Ef8rbsdlmLS6bp9726kb4zl/lcHnXQ0+vshqYAbmmUx70XpiHf4POwUOq2k7bKGTK34xSW5TwID314B/MDuPbCeri/W822TwPhwB1uHGu9vj6PKLnbntDN7qd3z0zAfPFeLvhDvG2Dkcbu33sl9djFjgIrmZkwFT5zLrkZtoMHcvJmAAu1vm0/vcFAAeiInm29CQs/pxXpyVzauDPoE6HQDoMK0t+aaTuTv3UMtp5XyuRiTvR5zd8cK5uXl8eMlr0LQPAL2md+VAQQYf79lHy3zrDcy3I8J5pUbkWe0XwM80KTAMrz+QFQF4/OMBfJa7C5tp4tLfve8oatTa/cGZr4atj5j8xTBeT//Tp57nzrRpW/qorwrbv38/cXFxJZbFxcWRnp5OTk4OQUFBpe7zzDPP8Pjjj1dWiVXK1MEP0XrqxxiGE9O0UXj41A1bANOEQtMEl0l+JdRYUeY1HcfsjRMwXQaGzVTD1ocMeGIKaz5vgZ8TTCDfAcHFGmc2u4M8B7gcdgLtge7lhY5s8hwu/Oz+BNqth0Y/eyF5jlycfiUbb04/G3kO8LM7iu3XTp4DCv1sJfbr8sshz+HE4fAj8NgIG3+7kzxHTqn9mg5rH4bj+IGszWZ9rwI/o8R+TUcueY5CbA4HgQCuQjbXB5ZbDdsCOwx4dT68fA7+5BHoMrHb/ODYSFub3Y9AE3Ac3yeFudaDQFkMo+S2jgD8CuzWfo3j+zXsjjL2mwem6yT7BRxBx0f52wNw2Kz9OgxHsf3aCDRN920TE7MwH6PYfvOMY3WaJgEm5OIPGOQ7XWTbbLhMOwEuE8O0kWP6A5ALBLhMcvE7Vgz4UYidk9QL5OHHB7/swpkVwkOOI0RHWPu1m4Z7vwABJuS4HJjH9uugEMcp9ltg+PHCD0m8/f1PDMnbTV38CHCZOEyDfCPAvZ2fCQ4cmNhwmSY2sxCnzWrY+pmmGrY+ol9iP3fT1mFzqGHrjdL2QESxN6t+f5f+9Xbyp702fk7rcf5ALMQeBZthup8j/O0u8hzZFDgg0HX8Md20YT3H2Is9x2A9x1iP8cVeRx/bFhvu/QbaTfIc1ht/gS4T+7GHM8M4tl9bsbO1ipaBtV+HVYdhM459Pwi0B1D0uFvgl4lpgL/LJPDYfm2GaW1rN8AwGPDqfNa0auF+Q7Zby2x3zc3XOujxZyE/nmswp0sQFE0T4ioAZ2GZP14/w358OyDAsGNzFlrPX4ZVhMNwHP8ZOvzBOPazcxWe9AwSP4wT9mvtw7AHgsN63eCwnWS/phMKy34FnmtAwbHHeW8/kBUB2BkcDrng0t+9b7noPlj8vNWwtfurYesjrq57Ca+v/1PPc2WoViNtmzZtyqhRo3jggQfcy2bNmsWll15KdnZ2mU1bjbT9yH27Q/gQplzxIC4TnC7T+jCtz65jX7tKLOP416ZJodP67Cy+rftrSi0r+h6uUl+X3LZofWGZdVDG/Y/Xc3xbSnzvG354lJjft8Gx8RjRlzQg9o1ZHvtdSOXIWbeOX/9xC3HbjrpHIRVNleB1sg7DTy/Ab29ZL2qA2dvqkbDcSYEN/FzHRtq+Ot/DhZavtXvSeH/ZDr5avYfcAuugOjzQwYUxr7I4dIf73dnLnU3457CPyS90Hf9wnvD52EeB01qWd8K2BcW2zStj2Un3WWy7vGLLKurZ9oLYiayO3uu+PdDVhKdGzayYbyZVxr9/+Tcfb/rYfVsjbb2Aywm7lsOmWbB5NqQkwT+2QMixKQ5Wf8zsp58mYXU+LgNspnc+zp9M0fQQ7uf3Ytn3/vMB0r78kpp33knM2LsAMAsK2HnzLQS2aEHM3eOwlXGcUF0UnSrqSyOQRHp/cD77ndnu2/q79xGTL4D9f1pvZplOjbT1BS4Xj/6vJTOD/awBRD7yPOeVI21r1arFgQMHSiw7cOAA4eHhZTZsAQICAggICChznTcratjWC2jPrrw/iHTUZUX6R4z60hqBay8+GsLLHLpvGCm/bzt2y8p5eGEyxn3DiHluuucKkwq35h93ErftKNtjoNk337HxxSeti5Mxyrsat0lz4dNRkG9NSUDihcz69QgNlqe6D2KLDm5n3dWz2h/Q5xe6+H7tPqYt28GKHUfcy1vEhzOySwJr1t7NN/Yd7mblQ1Ou5Gt7EsaM66pM89I89oZT8eZuXrGG8WmbyydpOGfsf5BZfnsJKYQsB/RId/J1eBL1vrre61/o+LLJqyfz8aaPaRzZmC1Ht9C1dldeX2Vd1FWN22omLwO2LoBN38PmOZCTenydzQ/2roImvQCY9fabNFhtvUm3uWkIAfUjvOZx/nSOz+db9nNc7L0TCOvTB//EBPd98pKSyF6+nNyNG4m9//gB/9HPPqNg/wHC+vQmsGnVn/e7qGF7e8Q5dOo8nhmLH+X1o6tBj/PixSZ/dX2Jhu0NrjD93fuChU9bDVuwGrZgTZUAatx6sckfDWBmsB+3pGVy+ZCv+GHZc/p/L6ZaNW27dOnCrFklR0vOnTuXLl26eKiiqqmoYdshfAijO13CbfNuo0aIP43MIaxI/4gbv7Aat97o0H3DSPl6JUebhrKtXm+aLZ5FTI10HEFOUr5eCahx661mPTKKBtsOsisa3u/jx3vh9Wj4xBRmMcr7Gre1zrVeyMS3gZ6PMuuVp2iwMLXEqCNvaNzuT8tl+q87mL58FymZ1hkTDptB/3PiGdklgQ4JUfxr6lV8Y08qMbr0qVEzYcqVfG1LgilXVonGrXFszm6H3Uaw/+m3PxOTv7qej/22MtDVhDk5BRC2nfPyMwl3ttMLHS9WdBGyMW3H4DJdbDm6hVohtRjTdowat9XFsVEkAKz7Er7+v+PrAiOhaV9o2g8a94RA66KJRY/nST1iCRk4lHCgZ7/bq/3j/Jk4sWELZT/HhfW4pMT9HPHx1H72PzgzMjGKTZN09IsvyVmxAr86ddxN28JDh0j//nuC2rYl6NxzKy/caRS/GMsVvf5L78964zBN7nAG63FevNbxkeVQcOyhskuhSXhsG/3de7Mfn4Mfn7W+Do2zngv/mAY1Gqtx68Umfzmc1wv2MObIUW5vfQvEnmv9fx97HND/u4ebtpmZmWzZssV9Ozk5mVWrVlGjRg3q16/PAw88wJ49e5g2zbrq++23385rr73Gfffdx0033cSCBQv45JNP+O677zwVoUpyulx0CB/C1MEPsS/TuqjEzoydfDb8M279ylrvrfakZHK4ZTQ9z/2TTgEHaDrgVd4IeJU+tt84GhLKnpRMYjxdpFQMp4vVg1rwVMskEsMT8LNZ88cOONa4xVlN/+5dTljzMez8BQZOtJaFxcEt8yGmOdhs4HrSfTDrys8nY84csv/4g/6vzOX7cb2tfVQTpmnya3Iq05ZtZ866AziPzfkXGxbA8PMTGNqpHrHhxeYMxlXmdABFjVvXKeaQre5cLqf7omNRdeYA29npZ+PwlnMZ2MSOqxr93uXMuUwXY9qO4cZWN7Jw10IAktOSebzr4+71UsW4XLBvlTWadtP30HYYdLnTWte0L0Q3sT436w/1OoO9jJfnLifJvWsz8ITGbFHzsjo9zp+1Y9lPbEqfLrsjKoqIQYNKLY8YNBC/OrUJbt/OvSx7xQoOPP0MgS1b0mDm5+7lWb/8giMmBv8GDUo0fiuLy+V0nyJqmiZNguOJT9nG0Ox8bI3O0+O8eCWXy8mNhDPVSHcv25axi9uHfAEL7tXfvbdyOaFuJ9i9HJr0hm53w8oPIHULnHeLdz/P+TBXzhGrYZuZD53vdC8vatzq/93Dc9ouWrSISy65pNTykSNHMnXqVG688Ua2b9/OokWLStxn/PjxrF+/nrp16/Lwww9z4403nvH3PNN5I7yFy3TReXpncgpz+OaKb0iMSPR0SRXqpbmbuXDHa3Tc/R6uVlfSfNW1tHBt5quARyi0BfDmebO4s39HT5cpFWT6huk8s/wZLql3CRN7TPR0OX+PaVrzGs5/Ag5ttJbdOAsSu536bvn5bOrcBTM7mwZfzCSwRYtKKPbvy8or5IuVe3h/2Q42HchwL+/UoAYjuyTSp1UcfvbKP2CuyibOT+LFuZu5p3dTluyfxYbCt+mck8Od9t4M3nqZLkbmxUzTpNuMbmQUWP8rkQGR/DTkJw9XJSUU5EDyYutxfNNsyNx/fF2D7jDyG8/VJqVkLV1K6vsfENCsKbF33w1Y/2dJ3bvjPJRCwocfENyhAwCFqamYhYX4xcZWfqGF+fB8I8hLh5vnQr1OlV+DVIqX5m7GbjPKfB6fOD8Jp8tkfO+qP73HX/XL+/251bXbffuq9Ewe6/YEtL/Bg1VJhZvYHlK3wrXvQ8uB8OmNsO4LOOcauOptT1cnFeGDq2HLXB5s3pmIhj24+ZybqRlU09NVVYpqMaftxRdfzKl6xlOnTi3zPitXrqzAqryLzbCRGJ7IhtQNJKcle33TdnyvJvDaz+QedeBXrycNdoew+kBjNre+j7jCeG7vWXVOeZPyU7BnD5k//cTumE0ANIho4OGK/qbkn2D+47D7N+t2YCRceA/UaX/auxr+/kRdcw2GnwNbWFjF1lkOth3K5P1fdvDZ77vJyLOu8B3kZ2dw+zqM6JJA81re/+baX+V0me7G7MFZjdhwCNYE1aLdyDe459gBnXinw7mH3Q1bgKN5R0nNTaVGYA0PViVuzgJ4sQXkHJ+DG/9QaNTDGk3bpM9f2m3uxo0s+3gigZ060r7PcALsvnfNhooS0rUrIV27llhmZmfjn5BAXk4ugS1bupcf/exzDr34IpHXXkv8E4+7l7tycir+YmcOf+vvZ+1nsOEbNW29mN1m8OLczQAlGrfF37D1Wi4nfgc30C3YQW6ddtzoiKPZzumwfYmatt7s8FarYWvzg4YXW8suGG81bdd+bl2QrEY1P8aT0i5/mZylE/nm4Pew4QNGnzva0xVVOdVqTlv5axIjrKbttrRtXELpkc1eJWUzZsoWdiyohWv2U3S67T9sMu1kvL4Y576d1ImqS3jfv3awJFXX0c9nkjJpEq1aRsEgaBjR0NMl/TUZB+DLO2DrsVNA/YKh8x3QdSwERZ7xbuIe+GfF1FdOnC6TBRsPMm3Zdn5KSnEvb1AzhOs7J3B1h7pEBPl5sMLqofgIm/bxzfh0UzdqBidgmqZG2Hq5mkE1WTJkCXsy93DPonvYk7mH5LRkNW0rm2nCwQ3WaNrDW2DwZGu53Q/qngcH1kOzflajNvFCcPy9JmvKgrnUmrGQZSsXcW6v69S0rWC2kBASP/gA0+nEsNvdywtTDoHNhn9ionuZMzOLzZ07E9CgAYkfzcAWElJhdZnNLyVn3ecEb/wWej9xfH5k8SpFz+Mvzt2MaZqM69W0RMPWq5/nD6yjQ8YROuSFwW0fQfpeOGcE1Ong6cqkIiX9YH1O6AKBxwZtxLeBxr1gyzxY+ipc9qLn6pOKEVGX7Z1GwbffExUQRVRglKcrqnLUtPUBRaMOk9OSPVxJJdj4La4CA1uAP65CF9FNG8KBnWxt2p72NidmYQHkprkv7CHewa9ePQKaNObHVoeBajzSNigSDieBzQEdRkH3f1jz13qJI1n5fPz7Lt5ftoM9R3MA61izR7NYRnRN5MLGNbHZdPD5V7SuXYu8A5eTEnDsad3lsuY7Fq8VERBBREAEiRGJ7qZthzgd0FY4ZwHs+Pn4/LRHdxxfd8lDEFnP+vrKt6zXGuXYUEutE8ovrQx2NI0gxK/imoJSUvGGLUCtBx8kdtw4zGLXiMjbtBEKC3FmZpZo2O5/6mly160jevSthF188d+u5dd9vzLhzxdIiI/jw73b4NAmiG3+t/crVdPYnk3YfSSbl+Yl8fK8JExg+Pn1uatHY0+XVrF2/mJ9rtcJbHbrcbXosVW8V0gM1OkITfuXXH7BeMhKsc5WEe/hclr/3xzvU1XbY/gKpqatDygadZic7gNN2w3fYvc3afLGPbhaXsfmzUeAnXx1Tl+uefFejG/vhlfGwLg1x9/Bk2ovcvAV2Af0ZPaMLoBRfaYBSd8Hv70NF//TGpnlCIDB/4OwWlDj740WNk2TvM1J2EJC8K9bp5wK/mvW7D7KtGU7+Hr1XvILrYPcyGA/rutYj+s7J1CvRrBH6/MGCdHB2AwIzEshb/oNBKZugDG/qXHrAxqEN+DnPT/7xhuznrb8LZj/JOSlHV9mD7BO42zWDwKKTUlzFmdHnKltraJ4daCd8+Nblfu+5eycOJI2uEMHGi/+kcIDB0osz16+nLxNm6Cw0L0sb+tWDj73PMHnn0/0TaNO+X0Ovfoa2G3E3GldnKVGYA3S8tNJDgjk0NpQeO5xYv47o5xSSVUU7G8drhdNePThrztZtOkQvVvG0bdVLc5LjMLhbXP+71xKhmEQltAFgCV7lrAuZR19EvuoqePNzrna+jhx+syEbjB6kc4q8Dbf3m2dZdrrUXefSv/fZVPT1gcUH2lrmiaGtz7gZaVYV2fGgGYDsAUH0yimAICNRwowgiLh4HprjrkVU6HbWA8WK+VtR8YOTMOgZlBNwv2reEM+5wgseRl+nQyFudbogQ43WusSup7qnmfswJP/5sj06UTfcjOx995bLvs8G3mFTr5bs49py3awatdR9/LWdcIZ0SWRgW1qE+hnP/kO5KwEOOzUq2ljf0YqSXsWc052Guz9A+rqwove6JU/XiGnMIdrm17rW2fTVKbDW62RtE37Qs1jpyEHR1sN25AYa3mzAVbD1r9yRr26R6KE66CmKvKLjS11cbI6L71EzprVBHU4Pgo+Z+VKMn/8EVdOTommbcrkydiCQwgf0B9HzWMXYbHbSJn4KgAxd95JQngCNsNGn58KSVkbTs3zq8fFRuWvW7jxIAA2A1wmOGwGe47mMHXpdqYu3U5ksB89m8fRt1UcFzaJIci/+r+2ynIE0DWxHjV2f8H3BWOYtm4ay/YtI2bzXBpk5+lCjt7uxF6Ft/YufFnaHlg1A1wF0P1ejbQ9DTVtfUBCeAIGBhn5GRzOPey9V+MLqQkTNsOuX92nlDeMsQ6kDmflcyTHSWTXceS+dw+BS1/HOP+2vz2/nHiWMz2d3PXrCe7UiW1p24Aq/mCfn2U1ape8cnykVr3OENvy1Pf7C4LatuHozJm48vLLfd+nsudoDh/+soOPf9vF4Szre/vZDS49J54RXRNpVy/Se9848rDQmn/gX/MjXnPW5X8706yL1Khp65W+3fYt+7P20y+xH93rdueNXm/QONLLT5etaC6ndfHHTbOsZm2KdQEgCrLhovusr5v0hpvnWfMqVvIo9sIjR9izz6qpSj/PSQkBDRsQ0LDk7yv4vPOIe+ghHNHH56A2nU4Ov/U2rqwsgs8/3920DevRg9wNG0o0bm9cHkK/n46QN+pKYv7xSOWFkUo3cX4SO1KzAfj09q78vCWFF+duZlDb2vjbbczbcIAj2QV8/sduPv9jN4F+Nro3iaFvq1r0aB5LVIi/hxP8NbsuGg/fLMWw+xPsF8wFdS4gJiCCOr9+ANnZkLrtb5+RJlXM3pUQ1eDUZ6nkpsPv71pntZx3c6WVJhVg2etWwzbhAqjXieSVzwJ6fXMyatr6gAB7AHVC67A7czc703d6b9MWIDSG/V+swTllMdGjRhHcogV1IoPYczSHbYcyiH7uO3JWx5DQM4Xg1TOOj26Uaint22858MSThF50Ecm3WaNNquRFyFwuWPEu/PgcZB47dTK2FfR61LoKdAU0McP69iWsXz9s/hX/gt00TZZuPcy0ZduZu/4ArmNnNdUKD+T6zvW57rz6xITpDZKK1jCiIdtTIsh11ALWwcbvoPfjp72fVC/ZBdnsz9oPQGJ4IpGBkdQKqeXhqqqxrMPww78gaQ5kHz6+3OawTsmsWexiPwFhUO+8yq8RSH3vPW6evIgaXQwa9qmCz3NyxvwTEqhxQ0KJZWZ+PjVuvom8DRsIaHT895s+63sy584joGVLUia+yuE3JtOvoICPL7TRYtC5tK3k2qXyFF10rEjj2FA6JFgX6Cm6GNlvD/Xi9x1HmLNuPz+sO8Ceozn8sP4AP6w/gN1m0CmxBn1bxdG7VS3qRAZ5KspZa16jOb8M+4WD2dYo4xGtRlgrkjfA9p8gaS6cf5sHK5RyZZowY5h1jHTTnJM/z276HuY9ap3t0nYY+FWfv2kpJjvVOusZ4ILxOF1OdqRb1wdQ07Zsatr6iEm9JhEdFF31TxsvB5nz5lOwdy9R114LWKNt9xzNYcuhLOIbNyZ3/Try0h0E//wKtLvBPQG2VD9mTi620FBCunYhOW0lUEUf7A0D1n1pvRiJTIAe/4LWV1foSC1bQMU3STNyC/hi5R6mLdvBloOZ7uVdGkYzoksCvVvGed88a1XYhXUv4JtfwvFvGAS2RdZF7Q5thpimni5NytH29O2ANbdlZGCkR2upltL2QNpuqH++dTsgDDZ+C3np1oXDmvSBZv2hUc8KmZf2r8rfuxeAlHBD0yN4IVtQkHve2uL86tQhqEMHIq+5mv0PP4JZUIDTYePzC2wMP5JknVGRkgQX3uOBqqUiOV0mQzvVZ8byncSEBRAR5AdYFycrWu+w2+jcMJrODaN55LKWrN+Xzpx1B/hh3X427s9g2bbDLNt2mMe+WU/rOuH0bVmLPq1q0TQutOqe9ZSXCQGhhPiFlH5N36S31bTdPEdNW29yYC1k7AW/YKh1zsm3a30lLPg3pO2ElR9Ap1srr0YpP8vfhIIs63fduCd7M3eT58zD3+ZP7ZDanq6uSlLT1kdUyUZWefrzM/h9CnQcRdyDD5C3ZSsBTawXNY1jQ/kpKYWth7K4cuw44sbfhf2dztapNeu/hNZXebZ2+cuib76JqGFDwTQJXrGJqICoqnEwa5rWKIA6HSAk2mra9n4c9vwB7UeCo3JPVzNdLoxybBBvOZjBtGU7+HzFbrLynQAE+9u5qn1dbuiSQNO4sNPsQSpCo5hQANYeNqHhRbBlHmz8BmImeLgyKU9FU8Ekhie6l/2y7xdWHFhBt9rdaBvb1jOFVVWmac13v+l762P/GusUzLErrcdmhz/0fxYi6kH9ztZFIaug/Adv5+YGs/D3DyY2OPb0dxCvEHXdtURddy2HJk3CLCgAhwN7YSFXLYFtNTfBnOfBsFtnjgXXOO3+pPoY37spn63YzYzlO2l87Pm9SFHjtjjDMGhVO4JWtSO4p3dTdh7O5of11gjc33aksnZPOmv3pPPC3M0kRAfTt1Ut+rSMo139KOy2KtTAfauHdb2Ha6dB7bbuxQWuAnbFt6Ye4Ld9iTXlWCXNJy4VbPMc63ODi8Av8OTb2f2g613w/T9g6UToMArsamdVK3mZ1lSBABeMB8Nwz2ebEJGAXYPpyqS/cvEO67+CHUsgoQthvf5FWK9e7lVFjYytBzPxizt2wYbzb4dFz1hD89W0rdZsQdapMU9f+DRgnarvUTt/gXmPw86l0OX/oO9T1vI6HayPSlSwZw/7Hn6Egn37aDjru781qqLQ6WLehoNMW7adpVuPn0LcMCaEEZ0TuLJDXcIDq2azw1cUHdTtS8sl5+J+BG2ZZ02RcKGatt6k6MVtw8jjp1B/t+07vtzyJTbD5t1N24XPWGfHFM0xW9yPz1nz0l7ygHU7+SdY+7l1MJixt9iGBoTGHR9ZC9ZpllXcK4t/JiPYoFV0w1KP5RPnJ+F0mYzvrVH13ujQpEmkTHyViKuvIu2zzzGDA7nup1y+898MrVrBwXXW33nboZ4uVcpZ0VlMjWNDT7NlafWjg7nlwobccmFDUjLzmL/hAD+sO8BPW1LYcTibNxdv483F26gZGkDvlrH0aVmLro2jCXB4sGmSdRhSNvGvmjUI3PoZN4fHER8aD0DPT3pyJO8IX9SoT+PUnZC82DorQqq/pB+sz016n37bdtfDj8/C0Z2wbiace23F1ibla9V064LcNRpCyyuAYq9rq+IUh1WEmrY+IiUnhbfWvMXRvKM82/1ZT5dTvgpyYMt86+vml5Va7W7aHjp++jadRuMs9Md+4ejKqFDKmZmfT+GhQ/jVqVNqncdO99q/FhY8CZtnW7cdgR6fa8leowbZv/2GWVBA/vbtBDQ4+1HIKZl5fPzbLj78ZQd703IB6wrGvVrEMaJLIt0aR1fdU+x8TESwH5G1f6Qg+GdeyR3EP+t0sB4TTVNX3vUiP2z+E6DEWQVd4rtgM2xs3RXJSwc3e2/zzmaHhcfeCCveuP3xOWv5xQ8cX7ZuJqyYYn3tFwKNe0CzAdb0ByHVb27/DOceAPJyStZeNO/lPd76O/dxRQ3bmmPvInrkSNJnfY9f44bMCF3H1fPT2RvYmtph66wpPtS09TpFTdsmcWfftC2uZmgA151nXWMgK6+QHzcf4od1+5m/8SApmXnMWL6LGct3EeJv5+LmsfRpGcclzWMr/834Xb9QCHwXGkrhtq+4qd3xKUPqhdXjSN4RttVtYzVtN89R09YbZKdaFwEF6/n5dPyDofMd1jHXkpcqfLo5KWftj01NGVzDPUVlUdPW688M/xvUtPURDsPB9I3TAXis62MEObxo4u5ti6x5UcLrkpseiHl4LQENG2ALsU6ZaRRrfd6Zmk1ugZMAu8GeB54gY/58Gn49mICGf++FkFS+jIWL2HP33UQMvJzaz1pvQrw0dzN2m1HmKWMVOgopNdkatb3mE8C0TlNsdz1cdD9ElG4qVyZbUBC1n3+OgMaN8U9MPOP7mabJql1HmbZsB9+t2Ue+0wVAjRB/rjuvHsPPr0/dqOAKqlr+jhrBDg75pbMh4wDcusDT5UgFSCu0mnd/JgdAK2vZgIYD2JLchPeWbOae3l7coC9q1C58ynozouVAmP1P63UAQMNLjm/b+irAsBq1iRec+pTLKu7wO+9y5ZKFZDQyWR0QwOPfrGP4+Ql8vmI3b/y4lXt6Ny3zuU+8gNNFzbF3uee7bfbbcgy7nXkfX4TTnsK1YXHUBmvwQn621dAQr1E04OTE6RH+jpAABwPOiWfAOfHkF7r4NfkwP6w7wA/r93MgPY/v1uzjuzX78LMbdGlUkz4t4+jTMo7Y8Ep4DN25jN0OB4UGBDmCSlxks0FEA9akrCE5Mh6iG0NkvYqvRyrelvlguqwLNJ/p7/S8W2DJy3BwvTVKt1m/Ci1RypFfEJx3c4lF7qZtVZjisIpS09ZHRAZGMvrc0dQNrev508fL28Zvrc/NL+Xwm2+SPut7Yu+7j+ibRgEQExpAeKCD9NxCdhzOplmtMMzcXCgsJGvJEgIaJEJuWpW64IicWu6G9WCaOOKsF3NT107l0wMfkLK3PXBriYPXCh+FtOw1WPOx9XWrwXDJv6Bm44r5Xn9BeL8zfyGTW+Dkm9V7mbZsB3/uSXMvb1MvkhGdE7j03HgC/TTXUFVWPzyRQzmwK3OHp0uRClDoKiTTtQ+Az37Nx5H/J61qh7MkKYXZ6/YzuF0dzm9QgzW7jxLsbyfQz06Qn51gfwcBDhu2qjRv4V9gvTk3mLHdC2DR09ZHMYsWfs/FI49dYCzxAuvDg0zTJLfARUZeAVl5TjJzC8nMsz6y8grJOPb5xOUlvs4t5L7vPqNZyk5iatpwRcYw5eftTPl5OwAGMHXpdr5evZcaIf5Eh/hT44SP6JCAErf9HRqVVF3E3PV/JW4bdus5ODE8kc8vSKXjBf1o+fWP1oV5ti2E5pd6okypAHmFTnYczgL+2vQIZ8LfYePCJjFc2CSGxwe2Ys2eNH5Yt5856/az9VAWizcfYvHmQ/zry7W0qx9Jn5a16Nsqjobl2EQuYccykv2t0b2J4YnYjOOPVUWj8JIddrhrRcV8f6l8ZzM1QpGgSDh/NBzZATXU6KsWXNYAoLJGRfdv0J96YfVoHt28kouqPtS09SF3tbvL0yWUP2ehdWERgBaXYVs2F3vNmvg3PP4AbhgGjWJDWbnzKFsPZdKsVhgx4+8mZsI9BAalwRvdILI+DPvIQyHkbMXefTeRV1+NLSAAgC1Ht5DhPEDXJhG8OHczYF2koXjDttxGIeWmQV4GRNS1bnf/h3U18ovvh9rtyud7VLJdqdl88OsOPv5tF0ezCwDrhfxl58YzoksibetFerZAOWMtohuzYjccKdiNaZoYeenWRfEa99IbU15gb+ZeClwFBNgDGNGtI68v2nZ8pVHAV+tX8MXqbeAKKPP+QX52gvztZX/2s1uNXn87wceWBx5bduL2we51Dvd9g/zt+NmNCp0uxW4zeGnuRvrXXYH7Ed0wWNDofh5cW4dhdbtw8d/8HqZpklPgtBqnuYVWs/UsGq3WfY5v5yqH98mnNelFmwZfsa5WLq70khchM4HUrHxSs/LPeH9hAQ5qhB5r4gYf+xxa1PANIDrEn6hiDeBgf7umwaliGkQ04I+Df5Ccnmw1an99AzZ8q6atF9meko3LhLBABzFhZT+mlyebzaBtvUja1ovkvn7N2XIwk7nrDzBn3X5W7TrKyp3Wx7OzN9I4NpQ+LePo26oW59aNKJ/Hh/xs2LeK5DDrbNDEiMQSq91N2/Tkv/+9pOro9ah1EdD6nc/ufj0e1tRf1cnGb2H+E3DxP+Gcq0usGtJ8iIeKqj7UtJXqbdevkH0YAiOhflfin+xOPKUvRtUoxmraFs0NFdj82Ds5KUnWqRUH18GB9RDXsnLrl7/Mv25d99cTOk7gisZXEBccxyuFqbw4d7O7eZsYHcz2w1k8PWuD+wC0ZmgA0aH+RIdaB6dnNHq0IAeWv2nNn1TvfBh2bHRtWK0q3/DP+nU5mYsWETHwcgJbWBfjc7lMlmxJYdqy7czfeJCif5k6kUEM71yf6zrWIzq04g8SpHx1qN2U93cZFBpZpOamEv3eINj/J1z5li7W4AWKTiFLDE+kd6t4d9PWMKBG0zfIt+0lIu0OnFnNyClwkp3vJL/Q5b5/ToGTnAJnhdVntxkE+x1r/B5r8p6s8Vv0dcntHAT52wjyc5RYX7SPMZc0pvP2N2iy69g89jYHuApZuWEL1/S4kiGd6rHtUCZZec7jo1vzCsjMc1Z6o7U4w4BQfwehgQ5CAhyEFvsICXAQFuggJMBOaIAfoQH2ktsEOggJuIiwgDu5N8DB2z9t46V5SfjbbeQ7XdzWvSGD29chNTOfw8eat8U/DmflHfu6gCPZ+ThdJhnHsu44nH1G9Qc4bO5G7vHRvNbzaNSxpm906PF14YF+5Tqq22PTH1VBBQcOsu+hhxi4fQOfX29ajwnNB1tN27Rdni5PylHSwQzAGmXriTdNGseG0jg2lDsubsSB9Fx3A3fZ1sNsOZjJloOZTFq0lVrhgfRpFUeflrU4v2EN/Ox/cST/nhXgKiQ52LpA5InzW7qbtmnJuEwXNmeBdT2JupV7kV8pZxF1S50uf0bUsK0+TBOWvAiHk+DQRk9XUy2paetDsgqy2HB4A/mufLrW7urpcsqH3Q+a9IXweLAf/3M+8cVN0WlFJS5GBlCzCWbzyzA2fgM/vwxXvlnRFcvfYJomZn6+e4RtkajAKDrW6njsVmqJddsPZ7P9NAemIf52q4F7bKRRdMjxpm7NIBstD35N4trX8Mvaf+xbJFsjbouuPF7FHZkxg4zZs7GFhJDfoDGf/b6bD37ZwbaULPc2FzSuyYguCfRsEYe9mp9C7ctaxEdjFkRh+KeSdGQb0U36Wk3bDd+oaesFil+sYdLCLYB1erxpQp2Q+iTn7OW2XmHc0LKn+z5Ol0nusQZu0eecAic5+U5yCgrJyXcdu13obvTmFDjJzT9x25JfZ+cf26bAifNYl7N4Q7Ci/McvmU52mFV4HncWjucu+0wm+H3GCz9CpwVXltv3OdtGa2iggxD/4o1WB2HHti2vkaoT5yfx0rwk99kjRWeThAQ4zuhsEpfLJD23oOzmbmY+qVl5pGYXWJ+PNYHzCl3kFbrYm5brvhjl6dhtBlHBfiWmZ4gK8XOP4nU3fkOtkb5RIf6nbPTYbUaJs2iK/zx87SJs9sgIsn75haDCQt5p818aNj8f/CPhrj8gupGny5NyVDTQpDzns/2r4sIDub5zAtd3TiAtp4BFmw7yw7oDLNp0kP3puUxbtoNpy3YQHuigZwtrDtzuTWMICTiLVkNwNJx3K9vSfoOC1FJN27phdXEYDnIKczh4eDO13uwJBdlw3zYIiirnxFJtpCTBz69Ap9EQf66nq5GybFsEe1eCIwjOv73Eqr2Ze8ktzKVeWD387JV84cNqRE1bH7LiwArGzB9Dk6gmzBw409PllI96nWD4J3CaeXobxZRu2pqFhRx6+WXSvt5Og642HH9+Bpc8CFGJFVmx/A05f/zBrjvuJOraa4i9994yt1mSlAKA3TBwmiZ9WsbRrn4UqVl5HM7MJyUrn8OZee6D1Hyni6x8J1mp2exMPd7cNXDR37acCY5PaWSz5pDcbdbk5cKrmJ9yCVGvryzV4I0+NtooOiSAmsdGHUUG+1d4E/R0o5Bq1j6XdgP8mJETxeSn55Odb420Cw1wcHWHulzfOaHC5kuTyhUfHggFseCfysp9m+jc/FL46b/WhR4KcqwLAEi1tS3NGll78HA4P64/AMCILglEhwbw2qogAmoeb+wWsdsMQo41DytKfqGrZHP3hIZwdn5hiYbxyRrCJRrLJzSZAe6yz2SIfQGvFgziBed1ALzqtBq1E/w+wzBgiv3as2+0BhbbppwbrX9X1vLlGMHBvLPXzosLtpWY7qfoc1kNzbLYbAaRwdbzUqOY039v0zTJzneeMHLXau4ezsrnSIll+aRm5pORV4jTZZKSmU9K5plP2RAe6CA61Jp7Nyr4eFM3OsSfulFBDG5XhxfnbiY9p4B/9GvG/37cVv7TH1UDtoAA6jz/HH516xHYvBmG37GDXDVsvY67aVvFXp9FBPkxqG0dBrWtQ26Bk6VbU/hh3QHmrj/A4ax8vli5hy9W7sHfYaN7k5r0aVmLni1iT3/2VlxLzAHPk/xRN6D0RYn8bH7UC69Hcloy2/JTqRVRDw5tsF7fnHC6tVQTs/4BNZtagwr+6kCYRf+BtZ9BfhZcM6V865PyseQl63OHkRBSs8Sq6Rum89769xjeYjj/7PRPDxRXPahp60OK3rHckbYDp8uJ3eZFFxQyDI58/AlpX35JxKCBRA0pOTdKo5gQALYezMLlMrHZDAyHg6xffqXw4GHSs86hRuBqWPoaXPpfTySQM5Dxw1xc6ekUHjniXrYpdRPfbfuOc2POZV1SffanWyOBZt99Id+v3c+LczfTuk4ED11aeuoL07RGhVkjivJIyTw+2qjOjq8YvGMiAEeNcN42ruLtnIvJNf3A6eJIbhbbDmWV2ueJbAYlLgQTHXpsegb3AanV4C06WA0PdJx1o+Bko5BemruZV+YnUScygT3+sXAYwEmT2FBGdE1kcLs6hFZgI0cqn81mEOGoTQYbWXswCdoPh/A6kL4Htv2oK+xWc+fHn8+6vWksWRtGg+hgkg9n0yQujOs7J5CUdS6Ljixkyfb10KVy6/J32PB32IgIqphREubBDeTnZuPa2IB08z4O5w2CpTtw2AwKXSb53e4lP6AJ420u7rmkb4XU4CkHnnqavE2bWH9NJAO63MDYniXnLC16zHeW95wOWGctFTX869UIPqP75Be6OJJd9FxqPbee2Nwt3vA9kp2Py4T03ELScwtJTjn18+rbS5J5e4n1xsRt3Rv6VMO2SHj//idfmZ9tvTlXBd5wkL+nqGnbJK5qNW2LC/Sz06N5HD2ax/HUYJM/dh45diGzA+xMzWbehoPM23AQmwEdE2u458E92ePJ4dzDZORnYGCUmtMWrEZucloyyWnJdG3S22raJv2gpm11lL7XmnYOw7qQ8191wd1W03b9l3D4X3oDq6rZswKSf7Sms+ryf6VWO00nwY5gGkY09EBx1YeO1n1I7ZDa+Nv8yXflszdrL/XC6nm6pL9n9+8QGmtdRAzIXbuWnJUrCelSeiLz+jWC8bMb5BQ42ZeeS51Ia7RZzNi7MPPzCa1nwIwrYeX7cNF91n6lyom9/z5CL+qOI/b472fVwVVMWTeF+oEdWbfSetHmsBkk1gw57SgkwzAID/QjPNCPxJoh1gXGAmpZKwvvgjc/h5aDiOwyhnsDwrjHZXI0xzp1tKjBe/jYCF7356Kvs/I5ml2Ay+SsRhz52Y2TNnhrljGqN9jfUSrnkE71GDdjFcu2HQZgz9Ec7DaDPi3jGNElkc4Na1SJEWRSMWqHJLCpELanb7cO3Jtfar0w3viNmrbV3KUNL2Xz1iZc1M3gk9+t+SubHBuFdXvXLiz67hWOFu7xZInlL/MgxofXEpCdAsM+4a1ttZi6cHPpKQJ6X8XYS7yrgWe6XDjiYknbuYV1cRmMbxlf5nZVqXHp77ARFx5IXHjgGW3vdJmkHXteTc0qcI/iLZqe4Uh28ekb8t1vzAJMXbqdAqfJ7Rc3JDbszL6fN/lx14+sOrSKXvV70apmK/j8VmsqnJvnQHwbT5cnf4PTZbqnsGocE+bhas6M3WZwXmINzkuswYMDWrDpQAY/rDvAD+v3s3ZPOsuTU1menMq/v9tAi/hw+rSMo0+rOFrGh2Nk7Icj20l2WG8+1QmtQ4C99MjchpENWbBrgXVGSdO+sHQibJkHLid402AkX5D0g/W5TodSoy/PSq1zoHFv2DLX+nu4/JXyqU/KR9Eo23OuhcjSvaf7O93PfefdR6FZcdNqeQM1bX2I3WYnISKBpCNJJKclV/+m7Tfj4MBauO5DaHEZ0TffREjXLvg3LP0Om8NuIzE6hKSDmWw9mOlu2oZ2725tYJrWk8aeFfDnp9BlTGUmkTNk2GyEdC05H3PRVWTD7bUZ3K4OX6zcQ4OaIe758c5oFNLBjbDgSUjZDHcss+ZHdgTA7T+D7fg8ezab4Z6jr/EZ9PULnC6OHBtVVFaDNyXz+GmmhzPzycwrpMBpsj89t8SB6akE+dndc/E2qBlS4iJsADVD/RnaqT7Dzq9PzdwMCg/swTCiz2jfUj01iWrEpkNwKO/YRWmKmrabvteBjRcY37sp2fmF7v/zJnHWAX3RqKQc11HS8tKICKgec26fUn42zBgCaTshqgFvbgrkxUWb/9YUAdWJYbNR/803Cc89ysT07dQNq3v6O1Uz9mLPq6dT1KAvGmGdV+ji3Z+Tmb58Bzd0TuC2ixpR0wcuoGkWFJC1dCk7Z73IOy22EuEfYTVtC3Osjw3fqmlbze0+kk1+oYsAh406UdVvWiPDMGheK5zmtcIZ27MJu49kM3f9AX5Yd4Dl21PZsC+dDfvSeWV+EnWjgvhXzcX02/USyY06WTsoiGXi/KRSj+dFZ40uTl4H590PARHWBan3/AH1zqvsmPJ3JM21Pjcth7NjLrzHatqumg4X/dO61o14Xuo26/kIoNu4k25mGAZ+huazPRU1bX1Mw4iG7qZt97rdPV3OX5eabDVsDTskWE08/8RE/BMTT3qXRjGhVtP2UCbdm54wmZthQO8nrflwmvSuwMKlvBXN33hNmw4c3hfOFyv30DSu5KiEkx7EH91lzYW0ejqYLjBssOtXSLTm0yresP0r/Ow2YsMDiT3DEUe5BU73iKKUY43d1GIN3qKrgFu388g7No/k7iM57D6SU2JfBvDSdW3pf04tAhx2Mn9awpZbb8W/QQMafT/rb+WSqq1draZ8ewhyXCnkFOYQlNANAiOtA5v9a6B2O0+XKH9Bam4qqTmpJIQnsOWgNQKraO5sgBC/EOKC4ziQfYDktGTaxrb1YLXlwOWCL26z3kwNjIThn5G10sU9vSNLPaZX5BQBVUFkYCRtA9t6ugyPKn7RsbE9m/DKvM28NC+JWuGB7E/P5a2fkvngl52M6JLA6O4NTz9/ZjW3e+w4OublcVOXy2lWo5m1sPll1kjbjd9Bj4c8W6D8LUVTIzSMCfWKi8PWjQpmVLcGjOrWgCNZ+czfeJA56/azePMhdh/JoTBzKdjhi6MGhINRGFvmG3HLN1uti7TCPdbFqBtdYp0Wn/SDmrbVSWEebF1ofd2kz9/fX0JXqNcZdv0Cv0yCPk/+/X3K3xfVAEZ8ZR1bxzb3dDXVmpq2PqboHcoTL1RS7Wz8zvqc0BWCa5zRXRrFhsC64y+EipgFBRz98ksyZs+h7uuvYdNp41VOXnIye+4eT+TVV1PjhutLrCt+NfVfVmcAZ3DRhqwU+OkF+O1tcB6btqD5ZdDjYY8+qQT62akdGUTtyNOPqjBNk6x8J6nFGryf/L6TuesPukch7UzNJsBhjaoManMu+PlhCw3FlZ2NLfjM5ieU6ufc+DqYK4MxHNlsT9tOi+gW1sUZajaFCO8bqecrFuxcwOPLHqdbnW70jvoXUPqxrkFEA+9p2s5/DDZ8DTY/GDIdajZm/CneU/WmEbZFTNPUVDaUbtgCjOvVFMOw5nMf3K4O2w5lsnp3Gv9bvI33f9nByK6JjL6wIVFnMIK3ujH8/Ajr1QvsNu449w4Cah+7YFPTvtZghoPrrBFONTRHYHWVVEUvQlYeokL8ubpDXa7uUJfs/EJ+2nyIzl9ugUJIPtagTtodjOPY9RrW703n5SFteXPxNj74KYewZtYZJVkFWYQ07XusaTtHb1RUJzt+hoIsCI2DWueWzz4vGA8zroPf37VG3gZFlc9+5a8zDGh4kfVRhvk75/PqH69ySf1LGNf+5CNxRU1bn1N0JU6vado2vwyAgj17yFm9moBmzQhoVPYE5EUvfLYeKtm0xW7n8BuTKdi7l4x584m47FLIy7TewXV490iN6iLtiy/J27SJrCVLSjRtswuy2Ze1D7D+tjcfWAdQaqRtCUe2wxvdIP/Y30HihdDrMajbsYKqrxiGYbivil4/OpiJ85OYu/5gqXkewWpm2MPDabpsKfZQ7zsAkJIaxITgyo/B7tjB6gNJVtO2UQ9PlyV/U3ZBNiF+ITQIb+A+oG8SW/KxrkFEA37Z94t72phqa8VU+PnYvHSDXj9+9oOPSR44iKxAgx9vaE3rdr25uN7Fni7JI5wus0TDtkjxEdYvXtuGBRsP8tK8zazdk84bi7Yybel2buyWyK0XNiQy2Luat3VeKOOiuUFRkHiBddGXjd9B17sqvzApF0UDTBrHePdrtmB/B31r50LhYUybH9ecex0/7l7DAXsjDhw7c2L2uv20fGQ2LhPu6X0u/dp/Tr2wegQ5gqy5THs8XD6n2Evl2XxsPtsmvf/2WY1uTftax3SJF1hnTopnOQusXsopJB1JYmvaVlpnt66koqovNW19TNFI221p2zxcyd+Qecg6/QGsuRqBrGXL2Pevhwnp1o3677xd5t0axRQ1bUtemdiw2Yi+7TZcmRmEdD4flr8FC5+yXgScd3PF5ZAzFn3zTfjF18K/QclRI9vTtwMQFRBFxLJJ9DyYzCquKH2l3UXPWtMfXPIARCZAfFvIz7CatQ0vqfZXWS5rFFJZ8zyqYesbAhx2IvMHcCAlhxrnt/J0OVJORrQawQ0tbyDflc+d768BSl9V3CvOpjHN43PdXfRPaHOdZ+vxkMIjR8hLSsIBvL9vK5fGBPhs03Z876YnXVe8kduzRRw9mscyb8NBXpq7mfX70nl94VbeW7qDm7olcvMFDYkI9q5587ILsklOT6ZhREOridX8Mqtpu+FbNW2rsS1ePNK2lJ3LADDqtGfChTcxAessg3V705mxfCcf/roTlwn+dlvpMypCY6D7vZVfs/w9eRlgc0CTcmy2GwaM/KbaH9N5hazD8EZXaDMELnnwpIPgip8tK6emtyF8TEJ4AgBH845yJPeIh6v5izZ/bzXg4tu4r0JoCwkhqF07As8956R3a3isaXsoI4+0nIIS66Kuu5bom2/GUbOmdaGenCPWKB+nrmRYFdgjIogaOtRqqhdT/ME+I9/kLuMTxjm+IDE6xNrA5YSPhsOip8GZZy0zDLjufbh1kTX60Aue3E81Cume3k1LzfNoulyYpnfO/SiWFhHn4cxqzoGjxd6b3TIf3r/SmhpEqiXDMAiwB5xypC3A9rTtlV1a+TEMuOY9uGIyXPxPT1fjMfaICBrO+o65o9uRHWjooOYMGYZB75ZxfDf2AiZf34HmtcLIzCtk4oItXPDcAl6au7nUa8DqbPjHgxj69XWsP7zeWtB8gPV516+QedBzhclfZpomW4se408chOCNjjVtqd/ZvcgwDFrXiSA04PhrmHyni4nzkyq7OqkIV7wO9yWXz3y2xXnBMZ1XWP4/yNwP2xaB/eRnuahpe+bUtPUxwX7B1A6pDVTjkTibZlufj02NABDevz+JM6YTO+7k86GEBjiodeyCUKWmSCiu/QgIjoajO2DdF+VSslSM4g/2KxuM5oWCqxnv+BT/Jc9bo0z+2xQ2Hrtqpb3Yu3zBNcrvdJwqYHwZDdsiY3s2KTFKad/Dj5B0wYXkbdxYWeWJB7ingyk+h3fGftg6H9bqca06y8l3sutINlDGSNtjUyDtythFgbOaNabyMq1RtgB2B7Qd6tMHYIbNRkDDhixoaP2uG0ZoftKzYRgG/VrXYtbYC3ljeHuaxYWRkVvIK/OTuPDZBUycn0RGbjX7HynGNE22XXklT/97N3UPF3tNH1HXeh3b+wlrJJtUOwcz8sjIK8RuM44PQvBmO62zJ/fFNWdj6kZyC3MB6yyy/y3eRoDDer1+fef6vDh3M49+/wOPLX2MiX9MtO7vLIQ/P4Mvx0BhvkciyF8QGA5+Z3ah5rNimtb0C5+MtE7Rl8qVlwG//s/6+oLxJ30d5zJd7jNm9frm9LynayFnrNqfPnnl/+DqKXDutWd91zIbGceYpknW8uUceOEVzPNusxYueen4QaRUusLUVHaPu5uMhQvLHBlavGmbdCCDV51XMj/iKmtk7cfDITsFHIHQ+0m44O5Krr5qKjx4EGdqKllLl3m6FKlAiTUDcISt5ueUGbhMl7WwWX9rnq8Df1pzO0u1kXQkiYFfDuRfS/7F1kOZmCZEBfsRfcJFlmKDYwnxsw7y92bt9USpf01+NkwbCF//nw6yiil0FbIjYwegkSh/lc1m0P+ceL4fdyGvDWtHk9hQ0nMLeXHuZi54diGvLUgiM6/6nVVlGAb2YOt/ve4hs+Rr+oGvQrexZ3yhXqlaiqZGSKgRjL/DBw7VB02C3k8yM28v13xzDc8sf6bEtF/n1o0AoGNCDe7p3ZQPfl/P50mfM2/nPOv+hg1mPwCrPjg+aleqrty0it1/YR58Nca6QN2fn1Xs95LSVkyF3KMQ3RhaXH7SzQ5kHSCnMAeHzUGdsDqVVl515QPPBHKiat+0DQiD1ldCVOJZ37VRjPUC98R5bQHMvDx2j/k/Ut+bRratPfiHWlfgTfrh71Ysf1H6N9+QMWcOKa9PKvMK2kUX27GatpnYcNHSuen4BoYdJmyyDl78giqr7Cot+vbbSPjg/RIXdBPv0ygmlMDan7Df/hV7M48174JrQMKxCzptnOW54uSsbU3bSnJaMtvTt5N0MAOAJnFhpR4XDcPg08s/Zfnw5e7pkKo8lwu+uA32rLAunpRejZrNFcQ0TQ5NnMiOL2dg5BcQaA+kVkgtT5dVrdlsBpedW5vZd3dn4tB2NIoJIS2ngP/+sJkLnl3A6wu3kFXNmrfx/8/eWYfHUa1//DNr2bgnTdI0nhottNSFlgKlSHF3uEhxd3e4WLHC5YdcuLhb0ZYCVSg1Si1JI4007skmq78/JhtpbDc7u5tszud5+ux0Z+acN7Zzznve8/0+8jA7372d9eNUQ3dML+iGPWmbNhz0bAESp8Ls67Bq/AjRhZAamtpF9stuLrynrIHrjsjg8umzOTjoNK46+Cr5fpVKNrQCMWcb7LQ2yrsgXzscDLXu6UOrhxlXysdrnpPHGALPYG6F9S/Lx7NvAJW610vtz6xRwaPQqnxLa94diH0zw5CU0BSCtEFYbBZvh6IILVlZ7Lv4EvwnTiTxlWV9XmsfAOX0UGmr0usJO+N0rPUNaEamwqEXwfqX5A984UrqFQLnziWiZD/6cWO7nbNYLRTUdVQgZZXv4wz1r8Q1/iNfoNbKFVt/vgbzbvNg1IObgEmTvB2CwANkxoZhbpiIzaam2djps37M8ZC/WpYNmXmV9wIUOEVebdcFKoCMXib0icGJHotLEVY+ALu+lnXPznofwodIstmNmEtKqFz2Cja1Cm6SSA5NRiXcsBVBrZI44eB4jpsQxzfbSnhhZTa5lU089eMe3liTx+WHpXLBzCQCdIN/iqRLTmaUfjzs6KEQo7kasn6UCxySZnolPsHAGFYmZJ24dtK1XHPINVhsFjTjO/7+7Enb7DJ5wfL2hVOBqV1vzlgIW9+Tf+ePftRTIQucJfdXMLeAoRr0oe7rZ+q/5Pl75R7ZC6fNuFzgZrZ9CA37ITgeJvZtItu58ErQP2IEOAw5JeMU1p29jtun3e7tUJzDbIS3joPf/i1vpWzDmJuHpaoKc3VVv02kt5mR5faiaRt7663EPfwQfmlpMPMaeRK5bwNUZCnzNQicwi81ldg77yD0xBO7nStpLMFoNaJT6YgLiCOnrJEEKuSTmYvg3ko4/G5Y9aj8OyMQDCPCAnQE1Z9P6/7TaDWEdZywm9TsWw9NlV6JTeA8XaRgyvtO2g4pNv1XNv0EOPFlSJrl1XAGCzYbhJ11JpWzx2DSCBMyd6BWSZw0KYGfbjyMZ884mOTIAKqbjDzx/W7mPrmK//s9F4Nx8Bc32H83ihuL27VAAVi7FL5cAhtf905gggFj301hn7P4NL8/LSd62rbMS5KE5gAtZrt2e1ZZH34kaYfLGs5V2VCd67ZwBS5ir4TOONq9mvX6UJh6qXy8+lkhdegJbLaOKttZ14CmdwMygNxa+e9UjG8cQyRthyEalabHreaDnoI18r8//w80HaZSQfMOI/mTT4i9vX+XaXulbUF1M0ZzP9slQuJkXbBr/oLozL6vFXgcGzaOTTmWBaMWUNlo5iLzx1yr/QrzvDvhnI/ki+bdJhK3PWAsKKDyP69R88kn3g5F4EbS2iZ8ORUNHW+GjYK4g8FmhawfvBSZwFnsFQmpoant1UYZbdVHB1JQX8C9a+/lvrX3eSy+AbH3F/j2Jvl4/p0D0qn3VXQjE4h74AFWXDgeEJMad6JRqzhl8khW3DSPp06byKiIAKqajDz63S7m/nsVr6/OpcU0eJO36u9+44bvVMRXWimoL+g4YTfrzf5JmDMNMXLKZQk3n6+0NTbBr4/L8jh96JyObnvW7atuptkoS5hUGapYV7yOHZU75Iv0oTCqraI8+2e3hi0YIDZbx88mc6H7+5txpexrUvyXvMNM4F4kCc79RC56m3xhv5d3HtcK+kckbQVDh13fyq+jj+mikaLy98d/wkEETO5/23dMsB9BfhosVhsFVd11be2YSkup+fhjOPgsiEp3OXSBc1hbWihfupTW3N412kaFjOLJw57kqXlPUZC7B7Vk5S3dOWgOPyB5b0/cWgfvpMvTGP7+m4rnnqPmgw+8HYrAjaTFBIFkZmvJAVUn406ClHkQGO2VuATOYbVZya/LByA+YBT7quWdJvbqowOxWC18mfMlP+b/2KOB46CgtQE+vQRsFnkL3bwhtvPHQ+TWiUoUT6FRqzh9SiIrb57Hv0+dyMhwfyobW3lkuZy8fWtt3qBM3jYsX86sbUbGFNraJ8EAJEyBoFhorYf8370XoMAp6ppNVDa2AsNA07boL7CaISSBtY37OPbzY3n8j8e7XRYZ5NduummXjvgs+zOuWHEF7+9+v+PCjLZEYNaPbg9dMADK/oGGEtAGQNIc9/cXFAOT2vw71jzn/v4EsrzV0Y+CX/+fXZ13kAn6RyRthynPbXqOxV8sZkXBCm+H4hhWK+xpM86xVw8MAEmSOpmR9bzNxtLYyN6FR1N63/207OkkiyC2EnuMhhUrqXr1PxReemn/SYfWBsb/cDozVTvZE9ddRgFoS9zeqXygQ5TAmTMJOuIIwk47bfAmdQQuExpSRdDoe/mm8oDf/bk3wYVfC63uIUJpUyktlhY0Kg2thjCsNgj11xId5Nfj9YkhiVx18FU8OPvBwatd7xcMp74uS9mc8KJ7t0kOMWwWC6bSUqxWa8ekJkRMajyFVq3ijKmJ/HLzfB4/ZQIJYf5UNLTy4Dc7mffUKt5elz+okrehJ57I3yeMIyde6qprq1LB6DY5nN3LvROcwGnsO2PiQ/UE+Q1+XWWX2LdBfh01k5y6vRQ2FFJp6HmudaBEQo+m2vYxTWu9MJ8ajNiT6SnzZLMwTzDrWnl32SHnCokEd+Lkbo56Y33733pySLIbAvI9RNJ2mFJlqJJdqGuzvR2KY5RskYWtdUGQOq/9bZvVSuVr/0f9Tz9hMzr2gWFfud5b0XOlrTooiKB58wiYOhVba4tsZvXpv+DZsUInyUNoR8QSNG8eoaed2quUR1lTGRarBVY9RlBrGXFUET8izsORDk00UVEkvvwSEeecMzSlUgQOMSk+FUmyYaKeutbetx4KBjf2asuk4CRyKw2ArGfb29+uVqXlykOuZFHyom7agIOK9CNlKRtNz8nn4YoxN5ec+YeTs+ho6lvrkJBIChHmbJ5Gp1Fx9rRRrLplPo+efBBxoXrK6lu5/+sdzH/qV/63oYBWs/eTt6EnnkjTBcdRECu1Gxa2Yy9y2P2dSGINEeyVpD5fZQuytj7AqBntydfUsJ63So8+wIzMvpCVV5fXUXwQlQk37YZLV8iLFoLBRbue7VGe6zM8Ga74HSacJhaH3cl7p8KH5zqcJ7H/vcf4xxCkGwafdQowiEfzAndyzthzOC71ODLDh4hW6+5v5NeMo7pM8MxlZVQ8+yxoNIzZstmhpuw6j3vLexe0T3j2GSSttuMNQw1YjLDuRThebLFwNwFTphAwZUqfVaCnfXMaTcZGPi0qIhW4x3wJp8WL7d4CgZ1xI6KwmkJRaevIrtnLlBGTu17QUAYVu7sshAkGH50ns9ltVUa96dkOaqxW+Oke2dU5Ms3b0QxaWvPzQa3GGB4IkkRCUAJ6jYeqkgTd0GlUnDs9idMOHcnHGwt5edVeSutbuPfLf3hlVQ5XL0jn9EMT0Wm8lySyawJ2kUcASDkM/EKgsRSKN0HiVC9EJ3AGe9LW5/VsLWYo2igfj5pJ3pangN53FdifeXvakrajQkahklQ0mhqpNFQSHRAtJ+VCRPHGoGXqpRAS3yFjIfANCjdC3u+g0sIxjvnHjAoexRNzn8BoEXrrjiKWoYYp4yLHMTN+JpH+kd4OxTHsW7sOkEawWayEnngCwUce2TXJ2gcd5jy9J227tTW3zSxly3tyokPgEXqrJKs31mMwGzDZzMSZTHzHbH63HuwbbuoexNrUROPqNUIiwUeJD/UHUywAm0v2dD25/294ZjR8fL68m0AwaLEnbZNDkttdxfv7rKtuqeb3ot/ZsH+D2+NzmBX3w4aX4b/Hgcng7WgGLSFHHcXovzaivu8mTkg7gQWjFng7JAHgp1Fz/sxkfr11Pg+eMJ6YYD9K6lq4+4t/OPzpX/nwz32YLN6pZk0OGEnKfhs1RblYbZ1i0OjaqtokeceaYNAzbJK2ZdvB2Ah+oRAzlvz6fKB3fcvM9kpb+fujU+sYGTQSOEAiwY6xSWyHH2xMPANO/y+EJXq+79ZGWL8MVj7s+b59Hbte8MFnQmiCQ7eE68M5LvU4Ts442Y2B+RYiaSsY/JgMMGICBER221KhG5lA/JNPMnKp49Wv6TFtmrbljf0mq6xGI80bN0LSbBg5DSyt8qRT4BZsVit133yDpbF3kziAEF0If6ZcyE/7ivHThXBfy3moJEht0ysW9I/NaCR77mEUXnYZxrx8b4cjcAMqlUSIOh6Afypyup6MHQ+BUbJjc/4aL0QncJTOZg3Z5fZK274n9L8X/c7VK6/m9e2vuz0+h/jrLVj3gnx81EOg9fduPIMclb8/Y8cfxqNzHuXWqbd6OxxBJ/RaNRfOSub32w7nvuPHER3sR3GtgTs+386CZ37l442FHk/equ55hif/a+HQXSYqmiu6nlxwD9y8B6Zf7tGYBAPD/hmfHu3jSdvS7fJr4jRqjQ1Ut1QD9CoFk9n2zCuuNdDQIi802xO8dgkhQE7UfnguPJkM5TvdE7tg6FG2A368E9Y+D3XF3o7GdyjfBXuWAxLMvsHb0fg0Imk7jPk+73te2PxCr6LvgwatP5z2JtySDfpQl5tLigxEo5JoMlooq2/t9TpLbS05cw+j4MKLMFVUwJwb5RMb3wRDrctxCLrT/OeflNx6G7nHHYfN0odWXO0+VL8+RpzFQs4ht1FJKMmRgfhp1J4Ldogj6XToJ0xAm5iIubzc2+EI3ER8gDwByqs7QGdKpYbRx8jHu7/1cFQCZ7BPSEcGJVFQ1QxARkzf8gg9mrR4i5yVsPxm+Xj+XXK1jUAwxNFr1VwyJ4XVtx3OPceNJSpIR2G1gds++5sjn/2NTzcVYfZQ8tZ/3HikkGBumXAtsYGxXU9GpEJwbM83CgYVBqOF4lp5F4LPV9pOvgBuzoJFj7fLesQFxhGgDejx8rAAHTHBsjyePbHdLgvS+TknSfLuIYuxw/hK4F2sFlj/MpTv9l7186jpcgGW1QQblnknBl9kzVL5dexiiMpw+LYvc75kbfFaDGax68pRRNJ2GPPqtlf5v+3/R1Z1lrdDcQxV94Sc1UHzsc5o1SpGRcqDgpw+dG3VYWHoUlPRREdj2rdPdrmOHgvGBtg4SKqXfAyrwYAuOZmgw+cjqftIwLY2yOLyo2ayJkhOPPVXeSboTuKyl0n/+ScCZ0z3digCN5ERLk9qylsKu58cs1h+FSY1g5a61rr2CiRMMVisNoL1GmJD+jbvsidty5vLaTL1vXPBrZTthE8uApsFJp4F827zXixDgJadOym69lqq33+fgvoCTEK6ZNCj16q5dG4qq29bwN3HjiUyUEdBVTO3fLKNo577nc83F2GxujdREXn5ZYz+4w+iL7us7wut3jdOE/TO3opGbDYID9ASGTQMDBqDYyEqo8tukr7IPNCMrLfFSfuuTLvxlcC7FP0FP94Fby707meQvfjqr7egudp7cfgKtftg+yfysf176wAmq4kH1z3IkhVLhEmyE4ik7TCm/WF3oHHBYKK5Wi6972Vlbu+RR5E9bz6tuY65FdppNyPrQ9cWIGHpUtJ/WUnAlCmyE+mcG+QT2z4QWkluIPjww0n9/jtib+t7Yv94/tfcOm4WO466m+wKxyrPBN1RBQo5CV/n4BGy2WSztby74H/KYaALgoYSoXc4SNGqtPz7sH9z06E3UVQlJ9YzYoJ61fu2E6ILIco/CvBitW1DGbx/BrTWyxUuJ7wg3Jv7oXnTZhp+XkH9qlUs/mIx096bJiY1QwR/nZrLDktl9e2Hc8cxYwgP0JJX2cRNH2/jqGd/46utxW5L3qr8/Pr+TCjfDW8vhjcXuaV/gTLY5yTDbTzbbrbZVjnbG/bijKw2Xdte57GZR8uvhX/IRtIC75LdVvGcdgSoNd6LI/1IWW7R1AR//p/34vAV/nxNXpBPnQ8Jk/u93E6TsYn5ifMZFzmO2ACxC8RRRNJ2GGN/OObWOpfw9Cg7PodlM+DTi7udsjQ2Yi4vx1xWhiYmxqlm7duO+kvaamNjulZ8HnSq7Ix42Sox+XQTkiShCuh5e5Sd34t+54eCn2jW6ttX3EWlrWv0KUchGLIcEp+IzeIHko2C+oKuJ7X6joqU3d94PjhBvwRoAzgm5RguPujiDj1bByf0XpdIUKkhOA4i0uDMd0EzDCrHXCRw1kxibrsN8zGH4a/xJ0gXRKif67JQAs8RoNOwZF4aq29fwK1HjyYsQEtuZRPXf7iVo5f+ztfbStySvM2qyeKO1Xfw4PoHewgqAvJWQ9GfQs9xEGPf/Zfm69II/3wG/zsZtn4AdEgA9VdpO7qt0jarbdyfHJIMQGlTKc2m5o4Lw0bJOyNtVlmeR+Bd7BXP9mS6t5CkjorQP16VzeoEA2f+nbDoSfnVCcL0YTx3+HN8dPxH/RYgCDoQSdthzJCotN3VprUYd0i3U+qgIDI3rCf5449QBzk3wHG00rYzprIyUGth+hWgD3GqP0H/GP7Z0a8xHD/eTeuqJyhulCcdKSEpTicyBF2p/exz9i46hspXXvV2KAI3kBIVhNUoL2ptLetBCmfM8fLrnh88GJVgIDi7QJUS4uWkbWAUXPgNXPi1nDQS9ItfWhqRl1xM+inns+GcDXx10lfeDkkwQIL8NFx9eDqrbzucWxZmEuqvJae8kes+2MKipb/z7d8lWBVM3pq+/pHpD3yF9ZPlPQQTA4ltMkh7vlOsT4Gy2JO2Pq9nu3cV7P0FKnYDOCyPkHFA0jZMH0aEXn625NfnH3CxXSLhZ4WCFgyI+pI20zlJrnT1NmNPhPAUMFTDpre9Hc3QRhcIM5bAqBnejmRYIJK2wxivV+H0h6EW8lfLx/bEwgGow8LwnzjR6abTouVt4X1p2toxV1aSe/Ip5B5zLNbmTiu5Nhs0VvR+o8BhWrKyyD/tNHIXL8Zm6kXDr/BPWP8y+euexoaNYF0wFlMgdQYTKglSo8VW/4Fgs5gx5ufTtH69t0MRuAG9Vo0/cQBs299D0jbjKDjpVbhYTOQHI7/s+4U1xWuoN9Z3LFDFDvJK26JNHcdaPYSO9Gz/PoIkSe0JCcHQJViv5ZoFGay+/XBuPDKTYL2G7PJGrnl/C8c8v5rvt+9XJHkb0SQxphhOaM7seQF8zHHy6y6xq2KwMmyStvvaxptJs2i1tHYUYvSbtJW/L2X1rdQ1y3MFe7Vt7oFmq/aqzpyfhZazN7FX2Y6cIi/kehu1Rq62HXciJM/2djRDE7PRJR+MemN9/0Vagm6IpO0wxv6gqzRUUm+s924wPZH9M1jNED0GotIVbdq+9aisvpWGlr6NPtSRkVibm7CZTBj+3i6/WbEHXpsna4QJAx+XMebkoAoIwC8lBUmr7X6BxQTfXA/YyMs4HJDlPXIq5K0tSZGB6LV9GJcJeiX48MNJePEFEl8Rbqq+Sow+EYDsmh6kcPShcMjZohJykPLspme5csWV/F2+g/xK+fMuw8EJfY/O2u7mrzfh9QWwooct2oI+MRYV0/Tnn1ibxJZNXyREr+X6IzNYc/sCrj8ig2A/DXvKGrjyvc0c+8Jqfvin1KWJbMTRxxD/7yeZcNdjPW85tSdt89cInc9BiMliJa/tM96nk7aNFVCVIx8nTmNf/T6sNivB2mAi9ZF93hqi1xIfqgcgq1yutk0NSyVCH0GLuaXrxYnTYfSxMPcWeQ4h8A5ZbUnbjIXejaMzh14IZ7wDcQd7O5KhyYZl8OrsAe/QW/LzEma8P4P1JaJYyBm8qAYt8DZBuiBiAmIoby4nry6Pg6MH2YeXXWPRPtA8gOp338PW2kLwwoXoEhOdajpEryUm2I/yhlZyK5o4ODGs12slSSLh6WfQjkxAEx4uvxkUC1W5YGyQBdZHH+NU/4KuhBx7LEHz5mGp72XxYN2LUL4TAiLJS50JO3aTEprSvkXKpwe4bkYTHU3IUUd5OwyBG0kJTaGoHvY37/N2KAInsNlsjI0Yi1alRWOJxWytJshPQ1zbpLU/7FVLBQ0FmK1mNCo3D/lyVsDyW+RjjWMxCjqoX76ciueeI+TYY3h6sZy8u27ydf1WnwmGFqH+Wm48KpNLZqfw+ppc3lqbz+7SBpa8u4nx8SHccGQmR46NcVrrzy8tDb+0tN4viEyDmHHyWCrrJzj4TBe/EoGSFFQ1Y7baCNCp2xOTPom9yjZmHPiHE6tW8+z8Z2kwNjj0O58RG0xJXQtZZQ1MTY7grul3cf/M+7tfqNbC2R8oHLzAKawW2QwOBlfSVjBwTC1y0raxDJqrnL7dZrORV5dHs7mZaP9oNwTou4hK22HOoJVIMLVA9gr5uBdphJr336f8qacx7htYIsKua+uIRIL/hIM6ErYA/mEw9RL5ePWzslSCwCVUgYFo4+K6n6jOhd+elI+Pfoy85lJA/t21bxfOFCZkAkGvTI+bRlPuDSS23trzBTabvDDyxkKo3+/Z4AS9IkkST817ii9O/IKqOn9AXqByNJkTGxiLv8Yfs9VMUUORO0OFsh3w8UWyk/DEs2Debe7tzweR1Co0cXHoxo9ndfFqVuxbgUYStRW+SmiAlpsXjmb1bYdz9eFpBOrU7Cip57J3/uKEl9aycleZ05W35c3lrCxYyZ/7/+z5AnsRhDCeHHS0m5BFO/4ZPyTZt0F+HTUTgBBdCEclHcUpGac4dLt9vJ9dJn+/tKoeducJBgcqNdz4D5z32eCsaq3Og29vhN1CHsxhtr0vJ2xDEmDC6U7fXmGooNHUiEpSMSpklBsC9F1E0naY43Wjkt7I+w1MTRAcD/GTerwk9IQTCDn2WPwyMgbURVqMrIHqjBkZ0KG5OuNqUPvJbrwFawcUgwAsdXW9n7TZYPnNYG6BlHkw8Uzy6/IB+Xc3p0yYkCmBpbGJmo8+pvSRR70disANHBQXi7V1BHnlrT1fIEmw8yu5ImJPDyY2Aq+T3bYV1FFpBACVpGqXQXLrM76hFN4/U955kjQbTnhB/p0SOEXkpZeSseoXWk49klZLK1qVlvigeG+HJXAz4YE6bj16DKtvX8CSeWkE6NRsL67jX2//xUkvr+X6D7fw/Ioe9MiBF1Zm89zPHed+2/olHy27jl++fL7nzsYuls2ARve8g03gPexzEZ/fOWavtG1L2jpLZpum+57SBsduaCiFze9AY/mA+hO4iNZf/swZjGOCre/Jkk6/PyWKrxzBYoa1bc+WWdeCRud0E/axaGJwIjq18/cPZ0TSdpgzaCttU+fDuZ/Cwod7/aCPWnIFCc8+gzYmZkBdpLdV2jqatDVXVlJ0443sPfY4bBYLBMfCpHPlk2ueG1AMwx1TeTnZc+ay7/LLsbb2kFCq2AP5a+Xk+PHPYcXW7hCbHJrcrmnlqJu6oBesFkoffJCad9/FtF9UWvoaqVHy30dJXQtNreaeL2qvwBJJ28FCs6m5vdKuw4TMuc+69md8vZue8cYm+OAsqCuEyHQ4813Q+Lmnr2FCfqO8eygpJAm1Smi1DxciAnXcccwYVt92OFccloq/Vs22ojq+2lrCcyuyuenjrV0qb19Ymc2zP2ehVnWMkdM2lnDjV1ZSV+zpuZO4g+Wqt0POdveXI3CSYWFCZrXKZlTawHbH+S9zvuTXwl9pMjmm5W1P2toXMgFuXHUjR396NPvqe9h5+eE58PW1kPWjy+ELfIzpS0DjDyWb5WIxQd/s/BJq8sE/AiZfMKAm7Pkme9GgwHFE0naYkxrmBaMSR9D4ya7mE05zWxd2M7K9FY4NFFQhITSvW4+psJDmTW3u2LOuBUkla/nt3+auUH2W5vXrsZlMWOsbUPn1MNGPGQNXrYeTlkFkGqVNpRjMBjQqDf7EUNtsQiV1SF0IBoY6JISw008n6pprkDRiO66vER6oIzwqC7+4T/lwRy9J2TGL5de838FQ67HYBL3zyIZHmP7+dD7P/pzsMvsClXO7ClJCU4gJiEHCTVUuOSugZIs8iD/nY2FoN0A6J+PaJzVCy3ZYEhnkx53HjuX32w7n0jkp+Gnkqdrnm4uZ/cQvrM2pbE/Y3nRUJtcd0bHbLObQ2eTEQVZ4CyZhvjSkGBZJW5UKzv0E7tgHYYlYbVYe++Mxrv3lWsqbHauEtX9/KhuNVDXKxR6FDYWUNJX0PJe1a6lmi6StR6naCy/PgJUPezuS3gmM6kg+rn7Wu7EMdmw2WLNUPp5xJegCB9SMGN8MHDE7H+bYVzoKGwoxWUxo1UNDG8hcU4PK3x+VfuBi/fZEX35lEyaLFa267zUMlU7HiIceQpc4Ev24cfKbEakw/hT451PY9tHg1OwZxISeeCL+Bx+MpaGPbU6RafI/Oj7sk4KTyK0wADAqIgC9VlQjuUrcgw94OwSBGwkNL8Gs+Yt1JSP41+QTu18QlQ5Ro6FyD2T/DBOd16oSKEtuXS4Gs4FATXC7q3hGTBAVL74EahXRV13V7Z6KZcvAYiX62msAuHzi5Sw5eIn7ghx3Ipz6hqxvFtmHCZKgTypffImGlSuJuOgicmNzATGpGe5EB/txz/HjuHxeKq/+msvb6/MpqWvh3NdlY58DE7YACbOP5LTLQmkyNXF2wz7Swnr5m6wrgqwfYPJFoBZTQW9jtdqGjzwCtP/OtZhbmD9yPgUNBYwMHunQrYF+GhIj/CmsNpBV1sjMID9umXoLWpWWMRFjut+QsRB+fRz2/gpm44C2dAsGQNaPULELgga2G9ZjzLoG/npDrrQt3gQJh3o7osFJ/hoo2w66IJh66YCbya0T45uBIipthzkxATFMjJrIEaOOcHhrittZ9yL8dC9UZvd6SfkTT7Jn0mSq3/nfgLuJC9UToFNjttrYV93s0D0hRy/sSNjamXcbnPU+LHxkwLEMZ3TJyfhPmND1zaK/oLC7kUbnFbrs9qoEoWcrEPRHZvBUWiuOJNw2pfeLxraZPu7+1jNBCXrF7rAL4GeLw2Sxu4r7g1pF5QsvygnaTlQsW0blCy9CpwVIleSmYZ7V2nE84TRIGpg+oUDGsHULrXv2YDMa23/uqaGpXo5KMBiICdZz3+JxrLtjQbtamEqiW8IWZPPCfr0qrBZ4da7sF1C4wV1hC5xgf30LzUYLWrVEUkSAt8NxHy1dPSwCtAH8e96/+ej4j5wyFMuM6SqRMCNuBofGHkqgtofqv7hDIDBG1lzft27AoQucxF7ZbK90HqyEjeow1BJSh72TPEeW1ln4iEs7qkSl7cARSdthjiRJvHfcezwz/xnC9GHeDkcuv//z/2DdC1C+q9fLTOVlYLOhiY0dcFeSJLVX2+4td86MrAvRo2U9SJX4c3KGXl2Rza3wxRLZyX77p11Odf6wz2rbLpwp9GwVw2Y2Y9i6FVNZmbdDESjMtLgpGCuPpKkuufeL7Lq2OSvA1OKRuAQ9U95cTrO5GY2koakpFJArsFQqieirriLqumu7JG7tCduo667tsQJXUXJWwBtHQb3Qv1aKuMcfZ+RLLxI077B23XYxqRF05qONhe1eOVabrGnbEymhKWCzUVDWs3kZKjVkLpKPhYb5oMAuf5McGYimn11/QxaLGZ4dBy9MdvnZYZcJss8D+kSlkuX2QN5FJHA/rY2yHwlA5tHejcURZt8gv+76Fip6+dwc7kiSbCg35eIBN9FkaqKsWZ5fivGN8/jok0EwZCnbAbUFoNFD+hG9XjbqzTfJWLuGoLlzXOouLVpelc1x0IwMZGmG8qVL2XfFFd0Tj8bmbivJgu5Ym5vZe9RCSh97DKvB0PXkmuegKhsCo7v9Dhwx6gj+ddC/mBk/c8DGPILeKb75FvLPOpv6b77xdigChUmPccB4MX6ybCaVOh8M1Z4JTNAjduOwkcEj2VsuJ9AzOu0qiL7qKiKXLKHyhRfZNWFinwnbu9fczREfH8HG0o2uB1a2Az6+CIr/gvUvud6eAABtbCzBRx5JU5ie6hb5by85JNm7QQkGDXYN23OnjwJAr1Xx7M9ZPSZuD8228tqLFkY99XHvDdoX6HZ9K1zTBwHDQs+29G8wNkJzJQTJBTd1rXVYrBanmxo9Qv4+ZZXK3zeD2cBnWZ/xwuYXei4IsVd7CjMyz5D7K1hNEJ4ijykHOzFj4OBzYO5NEBDp7WgGHwoVcdgXpCP0EYT6hSrS5nBCJG0FAFhtVupaB0Gy0b7qn7agT5FrSZLQREaiCnBtG1FHpa3j0hCSJFH9xps0/fY7LTt2dpz4+xNYOgF+f8qlmIYDDStXYioqovG335A66xJXZMHqZ+TjY54A//Au981KmMUNh97AlNgpHcY8Qh5BMQIOPRRVSAhWo9HboQgUJi06CElTy77mzZQ0lPZ8kSTB1RvhrPcgJN6zAQq6kFvbofvV2wJV4AzZfRuTCUmr7bXCtra1lnJDueuGow2l8N4Z8jbT5LlwxP2utSfohj1ZPyJwBAFaH94mLXCYzqZj9x4/DrVKosVk5fK5qT0mbqPj0ghrgpB9fSy8pS2QXdPr9kHpdjd/BYL+GBZ6tvvapDgSZ7TvTLzx1xuZ/v50ftn3i1NN2cf9WeUN7UnaB9c/yP9t/z9qWmu635B2OKg0UL0X6ksG/jUIHMMujZB5NO2aLoOdk1+BI+6DQJG07ULZDnh2DKx63OUFvs7jWoHzCPV5AetL1nP9qutJC03jg+M/8G4wu9sq/MYc75HuHKo+OwB1WBhR112LLjERv8xOmmL6EHkF+a+3YO7N3RKOgg5CjjkGdWgo1tZWJPsD3WaDb28EixHSj5IN3nqhqslITbMJSepIvAtcJ+zMMwg/9xwktTB28zUSwvwJSPgEVcBevtubwKWHnNHzhULmZVDQWQrmx7/tC1RdP+tqv/hcPpAkbCYTFcuW9Zi4vfLgK7l84uWkh7lQ8WJsgg/OgvoiiMyAM94Rhi4KUf/991ibmwmcPZu8BqFnK+iKxWrrYjqWGhVIdnkjM9MiCdJrsFi7TqQTDj2MWy94nvJ4Pattto4xVmd0AfJOpt3fysUScRM98aUIemFYVNruWy+/jprR/lZeXR6tllZiApwzq0qPCUIlQW2ziYrGVmKC/YkPiqe4sZi8ujwi9AdobupD4ZyPIX6SS3qcAgew2TpkKOyyFIKhy5rnwFADFbtdTsALvX7XELMzATEBMRjMBooai3rXGfUENQXyir+k6tDb6oGGlSvZf/8DNPzi3MpsT6R1Sto687VHXXYZIYsWodJ1mrRmLISY8fL2nz9fdzk2X0bSaAg67DBCjur0QN/6HhSsAW0AHPdMt4dDRXMFf+z/g0pDJdll8gA3MTwAf51IMCqFys9PJGx9FJVKIkSdAMA/Zb2bPLZTnQu1hW6OStAb9orLpOBkcivknSCZsR27CiqWLaP+q6+JXLKEuEcewX/SpB7NyQAOijqIg6MP7tmkxRGsFvj8cijZIm8dPPdjMfFVkOr/vs3+u++heeNGYdIh6MaNnRK2AGPjQgDYub+e647I4MajMrtcPyoyldyRGuokA+XN5b03bJdIEMaTXsfnk7Y2W0fSNmkWAPXGeioNlYDzUjB6rZqkSPl5ZpdISA6V2+h1R0n6EeK55QmMjXIlf3gyJLkmYehxbDbY+wu8dzo0VXk7Gu9TnQf/fCYfz7nR5eaEXr9riKStgKSQJL456Rt+OeOXnlfkPcWe79oCmt3n9oSm9Ruo/egjmjdtcrnLpMgAVBI0tJipaGh1rTFJ6vhQ++MVWd9W4BjN1fDTPfLx/DshPKnbJWtL1nLpT5dyx+o72h1jhQmZ+7CZzd4OQaAwIwJkPcTc+n62yf98P7wwCf541QNRCXrCPvH0Jw6jxYpeqyIhzB/oajoWecnF7L/3XgxbthBxycW9Jm5d4rd/y4kdtR+c9T5EiCoJJQmcPZuAadPwnzixI2kbIiY1gp4ZEycv3uwu7dmESavWkhicCHQs/vRI5iKQ1FCxBxr7SO4K3EpVY6vv7xyrzoWmCvkZEj8JgPy6fABi/GMI0jn/ddt3ntjNyOzVe7l1uQoELBgwfsFw0jK4bito9f1ePuhY8QBk/wR/vubtSLzPuhfBZpWT8PGHuNzcfTPu451j3uGoJFGBPRBE0laARqUhOTQZrUrr3UBsVtl8yr763wvBRywgcskVBM2d63KXfho1oyJk3ThnzMgArAYDtV9+SfnTT3e8Of5keXWxuQq2/M/l+HwNm8VC4RVLqH7vPawtnYTN/cNh4SOQMg9m9O58Pip4FBlhGe2VtulCz1ZxWvZkkX/W2eSffY63QxEoTHqYPKkpN/RTQZtwqPy6W5jUeINGY2N7hVyrIQpo2w6qaltUtVjbTcfUISGEnXoKkUuuIOKii4i67lqwWLu0Z7VZ+Wj3Rzzx5xMYzAcYPzrCpHMhZpw8Eeu0tVWgDNHXXUvSO2+jS0ri9MzTuWLiFUyKneTtsASDFHul7a799b1ec5BlBCevtdL86lu9NxQQAed9BrfthSDntqcLlMNeZTsy3B+91kd3OhWsk18TJoPGD8DlXQX2nSf2Ig57O31qt29+B95cJAzJPMEAi8AqXnyp14XnimXLqHjRjQaokgRzbpKP/3gVWp3LC/gUjeWw5V352P49cZEwfRiTYiYxInCEIu0NN4SmrWDwMPNqmL4ELKY+LwucOZPAmTMV6zY9Joj8qmb2VjQxKy3K4fvMFRXsv+NOkCTCzzkHbXw8qDUw6zpYfpO8QjXlElB7ORk+iGhat47G337DsHUrYaef3nFCkmDSeXDIub0+6E9KP4mT0k/CZrNx1muyoYGotFUeTUQ4hq1bATDX1KAJF9rMvsLBIzL4oQqarKWYrKbeF+rSjwCNHmryoXwnxI73aJzDHfsWsij/KIrbvIQ6Gy5GX3sNrdnZ5Bx5FAGHHkr8k090nOtB01YlqXhp60vUttZyYtqJjI0c61xAYaPgit/Fs8wDzEucx7zEed4OQzCIGdeWtM2taKTFZOkx0ZdBLDN/t2IO+hPbA73o2oJs0CTwKnajyXRfrbIFiMqEQy+SF//asFfE2mUNnCVzhPxM3NNWcW7fndBn0nb/37JMw54xskmWQFla6uSq6hEHD9wfQa2i8oUXga7jmc47jNzK2MUQmQ5VObDpvzDrGvf2N1jZsAwsrTByKiQPMZkLH0VU2goA+GP/H9y5+k7e3vG2dwNRqT2+ncK+HWlvuXMrarpRowg9+WSir78Oyd+/48Qh50JgDNQVQsFaJUMd8ugPOojYu+4icskSWQ/YZICWTtUiDqzMSpLUXpmQISptFUcTHU3C0udIX7lCJGx9jMkJydisOpCsFNb3UW2rC5S3QwHsEnqHnqZzBZJ962fGAQtULbt3YyoqwljomO6wfetonxPazpTtgN3fdfxfJGzdgrmyEpvV2v+FAkEbMcF+RATqsNo6toYfyEWL7yXk+OOJv/p6MPVdCNGO2FXhFXxezxZg1HRY/DxMv6L9LVdNiexFG9llsieJvdK2pLGEFnNLLze1JWqzfxK/7+4g60d4bT7878QBNxF91VVEXXctlS+8SPnzz2MzGrskbHtamFYUlRpmXy8fr38JzC5KJw5FTC2yqTrIso8KSGf+XfE3/974b1YWrHS5reGKSNoKAPkh923ut6wpXuOdACqzwYGJi6WujtacHGxGo2JdtydtnZRHAIh//DGilizpmtzS6uHEl+CqDZA6X6EofQNNeDgRF5xP5MUXyW/8/hS8PL3DabQXbDZbu1FcVWMrVU1GJMnHB7leJGTRIrQJCd4OQ6AwqVHBWI3RAGzrz4xMmNR4jc66pnYpmAMXqIIOP5xR//0v0dddB4DNaqVl507M1dU9ttm+dbQ/PWOAhlJ47wz48BzY8eUAvwqBI+y7+BKypk6jedMmCuoLWFu8tm/zKMGwR5IkxrRVGe7e33PSVuXnR8LTTxF58UVInQ1ze2LrB/Da4R1bYQUexT73GG7jWVflEVKiAlGrJBpazZTWtxChjyDULxQbNgrqC3q+KXkOaPyhvljeRSRQluyf5Fe7xNYAib7qKiIuvZSqV15l98GHeC5ha2fimRAcBw374e+PPNPnYEKrh8t+gcNuhcxjFGlyU9km/rfzf/yYL6RJBopI2goAB7WA3IWxCV6dA8+OlSeLfdD4++/kHr+Yff+6VLHu02IGVmnbJ5lHQ4yTW1CHG2U7Ye3z0FDS70pmbl0ucz6cwxU/X9G+lWxkuD/+Oh/V/xII3IBeq8bfFgfA1tI9fV+cuQgkFZT+DTW9TIAEbuGigy7if8f8jzNGn9U+oc84YEKvDgoicMZ0AmdMB6Do6mvIO+VUGn76qcc2HX7GG5vg/TOhvkjeIphymItfjaA3rEYjppISrE1NaBMT+SHvB5asWMLzm5/3dmiCQY5d13ZnH7q2dmz9VRTWFUHJZrFA5yU6Km19dOdYZQ4U/dVF+s5kNVHUUAQMPGnrp1GTEhUIQFZZI5IkdUgk9LY4qfXveKYJXVtlsVogZ4V8nOG69IQuSTbOxWZD0mo9l7AFWXd5Zpsswpql8tc23IhMgwX3DFzm4gAOijqI88aex2GJYkw5UETSVgB0PDTLmstoMjV5tvOclWBukVd2gmL7vNTa2IgqMBBdinLOymnR8kO/pK6Fplaz0/fbbDaat2yh9rPPer6gvkRswwHKn36axjVrsVksclX1tzeA1Qxjjoexx/d5b15dHvXGeupa68i2bxf21QHuIKHhl1Xsf+ABjAUiYedLROtlV/Gs6n4clgOjYNQs+Xj3cjdHJehMiC6EQ2IOwc8aT6vZip9GRWKbYWZv+E+cgCowEEt9z5V39md8n87aVgt8dhns3woBkXDux7JZkcAtqHQ6Mv/YQOo3X6ONiSFAG0BqaCqZ4ZneDk0wyHHEjOzGVTey8L9z2Lm+n2SsfVfF3lXQ2vPnh8A9NLaa2V8nb+X32Urbv96A14+AH+9uf6uwoRCzzYy/xp/YgL7nfX1hl0jIKj3AjKy2j8XJjDbn+n52+AmcpGgjGGpAHybroLqI3VsDtRqbydSrOZnbOPQiSJwBh90yvObwpgGY1TrA1BFTuX3a7Ryf2vd8X9A7ImkrACDUL5QIvTw5s5ugeAz76v6Y4/vVTQk/+2wy/9pI7F13KtZ9WICOqCB5+1huhfMJ65YdOyk4+xxKH34ES+MB1brf3ghLJ0D+aiVCHbK07NxJ1etvUHTllVgbGmDTW1D4B+iC4Zh/93t/Z+0re6XtgRqPAmWpfucdaj/8iMbVXpJMEbiF5JBkAEqaHEjGz70RznwXDr3QvUEJesT+WZcWHYRa1fFstNTWUv322zT9+Wf7exEXXEDmhvVEXX5Zj23ZJ7MFdQVYeqsa+fk+2LMc1H5w1gcQMTCtQYHjSBoNfhkZAJw/7ny+OukrLhwv/t4EfdMuj1Da0GslraqwlOefqIKr7+lbNzlmrPy3bmmViygEHsO+wy862I9Qfx/VDS9YJ78mTmt/q7M0Qq8meQ5gL96wazs7tKPErmtb+IecZBQog10aIf0I2ZTbBSqWLaPus8+Juu5axu74p13j1qOJW78g+NePcMg5Ln89QwaLGZbNhE8ugoYyb0cjOACRtBW0016JU9tPBZaSWEyQ9YN8PMax1RdJklDplTUrS3VB11Y/fhz6gw4iZNEirE0HJH0ltVxNuvpZJcIcsqiCgwk/5xxCTz0FtboFVjwonzjiXgjtXzu18wCvN41HgbKELl5M+Hnn4T9xgrdDESjIuKh0AGrNxf1vm00/UnbS1QV6IDIBQGlTKU/++SRf7/2a7PKeTcgM/+yg7PEnKL3v/vb3VIGBSNreJ/3xgfHoVDqMViMlTSXdL9j4umy6AXDSMtk4RiAQDEoyYoPQqCTqDKb2Ss0DuXjhHaD3QxseibmisvfGJKmThrnYVeFJ2qURon20CKG1QZZYAhg1s/1tV/Vs7YxuW7zIavs+thtu9qXdHjYKEqbIElAiaascWW1JWxelETqbjoUsOobK/7yGJiLSO4nb4caOz6EmD/JWg59yc+xmUzNby7dS11qnWJvDkWGydCBwhNTQVDaVbfKsrm3BWmipg4CoLquwniY9Jog/86oHlLSVJInkjz9C6kn3Zda18NebkLsKSrZA/CQFoh166BITGXHfvfJ/Pr4QWusgfjJMdUybuEvSti2RkSkqbd1K2KmneDsEgRuYOjKD13IlrJKBqpYqovyjvB2SoBO7qnbx7q53GRsxlpHNdwHd9WxVAf4EHXkE2vj4Htuw2WzdqpfUKjVJoUlk12STV5dHYnBi15uq9sqvC+6BCacp88UI+qT41tvQxsURcfFFqMJCkZBcqjoTDB/8NGrSooPYU9bArv31xIf5d7vmoBEHY1m9GnWwA5PvMYth3YuyzqfZCJp+zMsEipDdrmfro+PZor/AZoXQUV0KNDrvnnMF+zwgu6wBq9VGalgq4yPHMyZiTN83Xrqi352dAieoL4Gy7YAkV9q6gsVKxKWXEnHOOTRt+IOK555DP348KZ992n7eo5gMsklj8WY4+RXP9u1JrFZY85x8PGMJ6PqW5HKGnVU7ufjHi0kISuCHU39QrN3hhqi0FbRjX/H0qDzCLrs0wrGg6ttUylhUzL5LLqHsqacUDyOtbZU7Z4BmZD0mbAHCkzomwPYPw+GM2QgqjVyBvPj5fn/mICcg7KvmEbqRVDYagY6fmUAgcJwxsZHYTBFYTcEU1vVt/AhAXTGsegx+uMv9wQmID4rn/HHnsyhlEVntlbZdky4BkyeT+NJLjLir68/EsHUrBedfQPF11/XYdrtJS08Ls4seh/M+h7m3KPBVCPrDXF1N/TffUPXaa0gaDRv2b2DG+zO4+debvR2aYIgwNk7+XOhL19ahhC3IGpSBMfKCeoGQRPIUOb6etN23QX4dNaPL23dOu5MPjvvAZX3LpMhAtGqJZqOF4loDicGJfHj8hzww64G+bxQJW2UJjIaLlsOiJ2Q/BBeIvvYaJAmyZsykcdUvhBx7LKEnLJbPXXUV0ddeo0TEjtNcBT/cCdvelxchfJXsn6B8pyxbOLVnma2BYp/Du1pZP9wRSVtBOw67SyuFzdaxFcsBaYTW7Cya1q2nae06xUOxm5ENpNK2M6bycpr++LPrm7NvkF93fi27qA4zaj//AmNhofwfjQ5OewOu2QhxEx26v7y5nCZTE2pJjaEpDICR4f4E+omNAu7GZrNhzM/HsP0fb4ciUIjwQB1+pbfRlHM3Wkti/zcYauC3J2UzEaOHTSqHIaMjRnPb1Nu4aNzF7RP6Aytte0Py86N540aa1q7DZjJ1O58a1rZ11P6Mb6rq4uhN+hFiMushJK2WEQ8+SOQVV6AODiavLo9mczNmq/NmqILhyRi7GVlpz+ZhJouJd3e+yyMbHsFk7f550AWVCiaeAeNOAr9QhSMV9IZ9zuG7Sdu2+VrSzC5vB+mCOCjqIOKDet4t4ihataq9gMO+C88pqvOgfr9LMQgAtRaS58gVmgpg2i8XFATMmEnCs88QcaEXdd5DR8LEM+VjX5U6tNlgTdvXNuVi8A9TtHml5FCGOyJpK2inc6WtxyYOp78Fs6+HlHn9XqofO5a4Rx8l8l+XKB6GfcCUX9mMeYBbLwxbt5Jz+AKKb76564Q5dhxkHgPYYO1S14MdQpj272f/3Xezd+HRmMrKO05Epjnchn2FLjE4kbzKVsDxJIbANeq//pq9i46h7PHHvR2KQEHSo8MBBxepYsdDWBKYW4RJjQcprjXQYrKiU6sYFdGxTc1msWA1Gnu8x2/0aEY89CApn38Gmu6LWl0qbY1N8O4p8O6pYKh1y9cg6B11cDDhZ55BzI03AGJSI3CesfakbS+VtmqVmtc2PE/4s++z99RTe1zI6cLRj8IZb8PIQ5UOVdADrWYLBVXyQqhPJm0tpo7KxFEz+77WBew7UfaUdoxnzFYzDcZ+krjf3w4vHCIvSAsGFQlPP0XGurUEH3WUt0ORmX09IMlGreW7vR2N8hSsk4351H4w82rFmxfjG2UQSVtBO3GBcejVesxWM8WNxe7vUJLkLTNHPQTa/o3FtCNGEHbqKYQuXqx4KPGh/ui1KowWK0U1hgG1oR8/HnV4OLqRIzFXHmD6MPcm+XX3cjA2uxjt0MHa2EjgrFkEHDIO7W+3yrpHTmL/sE8OTSa7zK5nK0zIPIH/oVNAq0Wl9+vbfVowpEiLadtZ4IgcjCTJZmQgTGrcjM1mY2v5VmpbatvdsFOjA9GoO4ZqLbt3s2fyoRScf0G3+yWVivAzzkCXnNyjNmqX3TSfXQb7t0LZP8KMZRAgJjUCZ7HLI+RXNmEwWrqdV0kq4iKTmbHHhm1XNq1793o6REEf5Fc2Y7VBsF5DTLCft8NRHkklS+4c9RBEjW5/e1fVLh774zG+y/1OkW4yYzp0bQH+t/N/TH1vKs9u6qcqMu5g+TX7J0XiGLbs2wDf3Qr5ysqqaCIiUAfJY1VLYxOmYg/kJXojOhPGtu0I9sXiqz//I78ecg4Ej1C8+fbxTYgY37iCSNoK2lFJKpJDkwEPSiQMElQqidQoF3VttVrSvv2G5A8/QBsX1/Vk4jQ4cRlct0VRce/Bjl9GBqP+7z+MOqwCdn4JKx92uo3c2lzAbkLm41vJBhm6kQmM/mMDo958s3fdZsGQIzqsCf/E1/l0/22O3WB3Fs/6vut2eoGiVLVUcf735zPv43nsKq0GuuvZtmZng9ksb2d2kqSQJCZGT2SeFIRpz3K5quKsDyBCDKQ9ic1mo+HXXzFXVLS/p5Qxj2D4EB3kR2SgDquN9kWeA0kKT+HtI1Tsvv1ktCNHOtZwxR7ZPVzgVjrr2fqkAaFKLcsizL6+y/NqW8U2Ptj9Ad/nfa9IN/ZnpF0DPlwfjtlqprChsO8b09uqOPdvgwYH9P0FPbPjS/jzNdj2oVuar//uO7KmTqXk7nvc0r7DzLlRft3+CdTu824sSnPiy7DwEZjdsx+CKxjMBkoa5YItu0SXYGB4fRb+8ssvk5ycjF6vZ/r06fz55599Xr906VJGjx6Nv78/iYmJ3HjjjbS0tHgoWt+nT6MSJanaC9/eCHtXOXS5zWaj8bffMBYUuK3iL60tEeiKrq06LKz3k5POVVwnZkjw52tIZVtlnbQjH3D6drs8QmpoKlllbRqPotLWY6gChs8iw3AhMyYKTVAODeTSbHKg8j9xOgREQUsdFKx1f4DDFPtzNyEogbwKWQLhQCmY0BNPJH3VL4y4t+cJjM1qpWHlSkofewyroeuukQBtAO9FzefhXWvRguyEPGq64l+HoG9MxSUULbmS7AVHYDUaaTA2UGGQE7j2hXOBoD8kSepXIiE1NJVVB6vYlKFCHeTAYvfu5fDyNFh+k5KhCnqgPWk7zEx1x0aO5eKDLubIpCMVaW/0CHk+kF3WiMVqY97Iefx46o+8dtRrfd8YFA3xk+VjUW07cLJ/lF8zFirS3P5776Pk9jtoycoCQJuUBDYblupqRdofMAmHylKOVjOse8m7sSiNXzDMuhYilE+q7qvfhw0boX6hhPuFK97+cMKrTj4fffQRN910E6+++irTp09n6dKlHH300ezZs4eYmJhu17///vvccccdvPnmm8yaNYusrCwuuugiJEni2Wd9VBzaw8xPnE9MQAwToia4t6NdX8Nfb0JNPqQd3u/llpoaCq9YApLE6C2bkfT9yyk4i33g5KoZGYDNZMK0fz+6UaN6OGmD+mJZ3NyHad68GX1CCKpfHpHfOOoBCI51uh17IiNKl0hlo7waLiptPY/NbEbqQSdTMPQ4OD4Bw9dnoLZEI6Hu/waVGkYfA1v+J0/qU+e7PcbhSOct8jm75KqhA5O2kiR138nR9QJKH34Ec2kpwfPnEzhrVse5rJ/g+7bq6gX3wkGnKhq/wDEsNdX4ZaQj+Qeg0unIr9gDQLR/NME6sSApcJyxccGsyansNWnbLolS72AhRvIcUGmhMgsqsuRtwQK3kOPLJmQ2G/x8H8QfIhtNazrkHw6OPpiDow9WrKtREQH4aVS0mq0UVjeTHBXs+Odo5tFQshmyfoTJ3SWHBP1QtReqc+XPDAfm8v1hs1io/+EHrA0NhF9wPgD60aPJWLcWTUSEy+27zNybQRcomzb6AsZm0Pq71YC2szSCT+4o8CBerbR99tlnueyyy7j44osZN24cr776KgEBAbz55ps9Xr9u3Tpmz57NOeecQ3JyMgsXLuTss8/uszq3tbWV+vr6Lv8EvXNs6rHcMvUWpoyY4t6O7NqI9m23/WCprcVvzBh0aamo3JCwhQ6dx4HKI9gxbNtG9vzDKbzyKmw2W9eT9fvh9SPh1Tk+7cRuaWxk3yX/InvhiRirDXKl3uSLnG6nydREebNsYGZujQIgIcyfID+RPPQUNquVomuvJWv6DEz7hcuuL5AQ5o+6eQqtTYmU1TloOjnmeNCHdZl8CZSlXb87JLldCsbZXQWSJBF64omEnXUm6sjIjhOmFvjmerBZMR58NtVTL1YsboFz+E+YQOo335D8/nsA5NZ1SAAJBM7QXmlb2rM8QkpoCthsmPdkU/PxJ1hbW/tuUB8KqW3GwLu/VTJUwQHYNVh9MmlblQPrXoAvr3J7V2qVRFpb0U1vMiG9Yq8Ozf0VzD0bfAr6IKutyjZpllytqQAJzz5L1FVXoR8t6yBLGs3gSNiC/Nl49gcw0s05Ek/x453wn7mK6xF3RoxvlMNrSVuj0cimTZs48siO7REqlYojjzyS9evX93jPrFmz2LRpU3uSNjc3l++++45jjz22134ef/xxQkND2/8lJiYq+4UInKd+PxRtlI9HO5a09UtNJfXLL0j95hu3hZXWXmnb1D3Z6gS6tDSsTU1Y6uswl5d3PRmkNYvTAAEAAElEQVQUA4Zq2fhl09uuhDuoMe3bhyY0AI1fK9pgCY5fOiANRp1KxxsL3+DBWQ9S3LYzJiPWBwe4gxhJpcJcXoG1qYmmDX94OxyBAqhUEqnO7ixIPxJuzZF1rwRuwZ60DdeOpNloQauWSIrskCcxlZdT+tDD1H7+RZ/txNx4A3EPPNA+6QFks8/zPuPbsUcwrWED9667zy1fg8Bx7DsXhAmZYKCMGdEhj9DTuDUpJAkJiZveaaD0vvto3bPHgUbbxuXCeNJtWKw2civlwg2fTNrua5vHJxzaZaG31dLKxtKNVBoqe7lxYNglEuxJ26/3fs3Nv97ML/t+6fvGuEMgMAaMjbBvnaIxDQsUlkaQ1GqC5s4h+rprxc4+d9NQClvfh9LtsmmgmxDjG+XwWtK2srISi8VCbGzX7dKxsbGUlvYsCH7OOefw0EMPMWfOHLRaLWlpacyfP5+77rqr137uvPNO6urq2v8VFvYjTC6gpqWGzWWbHdM6HAh72hxDE6ZASB/bPHvAnaX1KVGBSBLUGUxUNQ18xVUdFETy+++RsWoV2gN+v1GpZVF+gPUv+ezKrn7sWNLOD2DU/CqkOddD7LgBtaNVa5kWN41TMk4h265n64sD3EFOzK23kPzZp4SeeIK3QxEoxMjoVrRhf/DN3q8cu0GtAbXWvUENc+wVCZhkeaiUqEC06o5hWsuOHdS8/z7Vb701sA5ixxE7704sNgulTcJ4ZbAgJjWCgZIeE4RWLdHQYqa41tDtvJ/aj4TgkWxPljBPHofNYum/0dHHARIU/wX1JcoHLaCophmj2YpOo2JkuA/6BuzbIL+OmtHl7ZzaHC758RJO/VpZaR57MYfd9+Kfyn/4qeAntpZv7ftGlQqOvB/OfFeekwocp7UB8ts8DjKPdmtXLbt2UXLPPZQ99ZRb+3GY2kL4/nb44z/ejmTgrH8ZLEZInCFXSrsJYbKqHF43InOGX3/9lccee4xly5axefNmPv/8c5YvX87DD/fuSO/n50dISEiXf4K+OXv52Vz4w4XsrNrpng7sW67GHu+e9geIXqsmsW3wtNdFiQT9uHG9rxIefDYEjZB1bbd/7FI/gxZJQjr/C7TH3QWH3apIk9ltzrDChMzzBEyZgv/48UgDqJYWDE5CQirRx33BH9V9V212w2aD8l3uCWoY02xqZn+TLD/S3CjLGmTEdP2s08YnEHHJJQ4vnpgKCzG9dUnHxAqYGD2Rlaev5NPFnyoUucAZWrKyyDniSEru6TCSs+uNiqStwFl0GlX7LrFd+3uXSHj+JDW77j+TgEmT+m80OBZGTpWP7UUWAkWxy7ClRQehVvmgzqO90nbUzC5vu2uBKjOma6WtPUHkkKn2pPNg7GLwEwUhTlFTAMFxEJ4CkekuN2ez2aj56GNadu7strhkqaun7tPPaPjhR5f7UYS83+GPV2H1M7L01FDDUCP7CgHMudFt3VhtVvLr8wExvlECr83Ao6KiUKvVlJWVdXm/rKyMESNG9HjPvffey/nnn8+ll17KhAkTOPnkk3nsscd4/PHHsVqtngh7WJAamkpCUALNZjdU2hpq5Q87kDUSHaTg/AvYd/nlGN1cKZ0W3aZrq4AZmR1r8wHfR40fzLxaPl6zFKwOVD4MIUxlZfI2Pb8gmHerLHI+QL7Z+w1f5nxJeXO5qLQVCBRkYoxsMNNkLcXi6GeQxQwvHgrLZkBljhujG34U1BcAEO4XTmGlPIk/UApGPzqT2NtuJfLSS/ttr3zpUnKOWkjV5z/DB2fJg3TkyruYgBhhCOElWrZvx1RcjGmfPJYxWU0U1svHohJFMBDsura7ezEjcyqB1d5o2/g8+2eXYhP0jD1p65PSCA1lsjkVUkfyvw13JW3t8gi5FU2YLVbnDfgEzjPiILjhb7h0hSJGVqbiEkrvv5+8M87EZuy6C1U/biyRVy4h5vbbXJIvVIwJp0NIAjSWwbYPvB2N82x8XZYEiRnn1irpSkMlKkmFVqUlPijebf0MF7yWtNXpdBx66KGsXLmy/T2r1crKlSuZOXNmj/c0NzejOqDSS62Wna8HxR+xj/Dighf54dQfOGzkYco3Xlsgf9BFjYaoDIdusba00PzXXzT9vhpVgHu3EbXr2pa7bhJm+GcHeWecSeGSK7ufnHKxbPhQle1TZg+2lmbyTjie3OMXK5Jgf2P7G9y79l62lu6ivEE20BCVtt6hZc8eypcupe4b3/l9Hc5MGZmCzarBJpkpbix27Ca1BsKT5GMf+twaDHSezLabkMUM/LNOry0GyYbVpILFz4N/uCJxClwj+OhFjPrvW0RdJRv0FDYUYraZ8df4ExsQ28/dAkF3xsbJnxO7SntO2rYnsOrysFksjkkkTDgdzvkYTvdd7wVv0p60jfbBpK29yjb2IPAP63LKXVulE8L88deqMVqs5Fc1t//OFzUUYbQ4IENXvgt+eRT++VzRuHweSYLAKEWasjY3EXjYXAKnT0fl37XYRx0SQsz11xOycOHgWHDW6GDWtfLx2uflgoahgrEZNrwiH8+5UZGEe2/EBMSw4ZwN/Hjqj2hUQqPYVby61/Wmm27i//7v/3j77bfZtWsXV155JU1NTVx8sexqfMEFF3DnnXe2X7948WJeeeUVPvzwQ/Ly8vj555+59957Wbx4cXvyVuA6apUbv5dxB8P12+Bix7dcSWo1Se+8TdwjD6N2s4OkfdXbYXOePtBEhNOyfTvNW7ZgrqrqetIvGKZdLh/70CCh5bNHsTbWYyktQNtLxbwzzEqYxfQR07EZ5clsfKieID/xwe8Nmv/4g6pX/0PdF77z+zqcSYsOwWqMBmBbWZbjNwqTGrdgrwhKDklun9B3rrS1Go205uU5lnDJ+omgstfJPKWU+Luvg4NO6XL6p/yfuP6X6/l4j4/K8wxi1EGBBM6YQeCM6QDEB8bz9qK3eWzOY4NjMioYctgrbfuSRwA47D8byZo6DcO2bf03GhIvV2Bp9YrFKejAvpvPJytty9uk9Q7QswX3VdqqVFL78zK7rIFo/2gCtYFYbBb21e/rv4Hsn+H3f8OWdxWNy2cxNimeqNRnZjLqtdcY9cbrirbrNiZfAP4RUJMHuxz0hhgM7PwSmqsgLAnGn9Lv5a6iklREB0S7vZ/hgFezH2eeeSYVFRXcd999lJaWcsghh/DDDz+0m5Pt27evS2XtPffcgyRJ3HPPPRQXFxMdHc3ixYt59NFHvfUlCAaCkytzklZLwNSpBEyd2v/FLpLWNoDKcVHTFkAbH0/Cc88SMHUqmsjI7hdMXwKx42Gsj5g71eTjn/cGGSe2YJx0F5LWddOi26beBsD7f8iDrnRRZes1AufMJeT47QTNm+ftUAQKoNeq0TMCE/vZvH8PizOOdOzG0cfC8puh6E/ZfTbY9cUZQcdkNtIvkcZWMxqVRHJkYPv51l27yD/zLLSJiaT//FPvDZVuh08vRqWywqHnwZybul1S2FDIL4W/oNfoOWP0GYp/LQLH0Wv0TI6d7O0wBEMYe9I2v6qJZqOZAF3XqV1KaApTYqcQqynE2lxMyz//EDBZ/M55C5vNRk6ZDydtD78LJl8Itq4LjGaruV0GyB36lpmxwfxdVMeesgaOmRBHamgq2yu3k1efR3p4P5qrmUfDz/dC/ho5IakL7Pv64c6GZbDuRdmzxF5x6mZsJhOtublYm5oGx+eXLlCex//6GKx+Tk6ADoWF14lnyTuvbFZ595xgyOD1n9Y111zDNddc0+O5X3/9tcv/NRoN999/P/fff78HIhu+tJhbWLJiCfl1+Xx/6vf4awauSdqF5mrQBcnbCgYpdnmE4loDBqMFf51rVcchixb1fjIwCsaf7FL7gwabTU7kmA2oM+fif5oy5mN27CZkmb44wB0i+KWmkPD0IHFuFShClF8i+9lCVnWu4zeFxMsuy8V/ySY1Uy5xX4DDiNw6+WegtsiL1slRgeg0HYvWptIyJD8/dKNG9d5IQym8d4asVZZyGBy/tMdJROft0gLP0ZqXR9Pvv+M/+VD8Jxzk7XAEPkJUkB9RQX5UNrayp7SBSaO6SqGE68N5a9FbtKTsQVKp0KU6uDXdZIDfnoS9q+CSH1zyJxB0UN7QSkOrGZUEyVHulXzzGqEJ3d4qaSzBZDWhV+uJC4xTvMvM9kpbOSGeEpoiJ20dec5FZULYKKjdJ/uujD5G8fh8iqyfoKVOseS2zWjEBqh0vecHGn//naKrr8FvzBhSv3TSQNddTLtMlkco2w45K8HR4gdvolJ57Pf73rX30mRq4oqJVzA6YrRH+vRlhBW4oBt6jZ69tXupaqlybFuJo6y4H55Khy3vOXVb42+/0bh6NZbaWuVi6YWIQB0RgfJDI7dSOTMy6Ed32dQii/cPVf75DFvWClDrek0UOEuloZImk6wt3NN2YYFA4BrJwckAFDcVOHejXSJhl9C1VQKbzUZlcyUAhiZZAuhAw8WQoxcyevMm4vtaONGHyVtSo0bDGf/DVF5J0bXXkXfqaV0usydt8+vzsdqEiaunaFq9hrLHn6Dy5Zfb3/vfzv/xadanVLdUezEywVCnXde2F4kEAP3o0fhlZCA5Kien0cPfH8P+rZD7mwJRCqBjPJsUGYifZvhI+9mTp8mhyagk5dMPdr+LrDL5b8CpxUlJgow2Q6asHxWPzadoqoKijfJxxkJFmmxcs4asqdMouePOXq/RjxmDKjgYTUT44PExCoiAWdfI1cYjBvlCrNUq69l6kN+Lfufngp8x24aQ5u8gRiRtBT2ieCWO1QJ7vofWOrlSywnKlz5P4WWX07x5izKx9ENatLxyuLfCdTMygNa9eym+9Tb233V3zxdk/wzPT4Tl3bexDgkMNdi+v4O8n6Ip/GcSxhZlqjGe3/w8M96fwX//+W/7IEyYkHkfc00NjatXezsMgQKMj5bNIGtNDhqR2RnT5iye97tcbSFwCUmS+O3M3/j5tJ+pqJafPwcmbUHWd9eEh8Oqx+G3f3dvSKuH6DHyVk//MFTBITSsWkXLjh0Yi4raLxsZPBKNpMFgNlDWNIQXC4cY2oR4ghYsIHD2bEBO1i/buowH1z9IlaGqn7sFgt4Z165r27MZGYDBbKDSUOl4o5LUScP8G1fCE3Si3YTMF3eOrX1B3u2x54dup9r1bEOUl0YAGN02P8irbMJotrb3Y9/F0i/2BGT2T/LuQUHP7F0J2GSjudCRijRp2L4dW2trn7J6mvh4Mv/8g1Fvvjm49N8PvwsWPjL4pcKyfoClB8H6l/u/VgFsNhuPzH6EW6fc6ra/+eGGSNoKesSetHX4YdcfhX9CUwXoQyF5jlO36jMz8MtIxy9NWbfR3rBLJCihawtgbWmh/ptvqP/2Wyx1PSQ4QhOhsUx2Y6/Yo0ifHmXFA7SW1NBaq6VpTwXq0BBFmrUP8EJ00ZTVtwI+OsgdQlhqa8mePYfCyy7HXC0qw4Y6hyZkYrNJWKRGalpqHL8xOhPm3QHnfQpaof2mBJIkMSJwBNnl8mJhnwtUKjWserQjcbvnB7mK4rd/y/pqfvK96qBA4h56iOSPPuxiDKlVaRkVIsssCIkEzxF8xBEkLnuZiPPPA8BkNXFS+knMTphNUkiSl6MTDGXGtFXa7i7tOWn78Z6PmfbeNP731s2UP/MMhh07HGy4LWm753u5+ELgMj6dtM3+CbJ/hIb93U7Z55Pu0LMFiAvVE+ynwWy1kVfZ1KX4yKHKzJS5oPGH+uIOMzVBd7LbNPUzjlKsyejrriP1u++I/FfvcluSJA2uZO1QwmaDNc/KBmRNFR7pUpIk5o6cywXjLyBA66MyMB7G65q2gsGJfVVEsQnd7rZttJmLQO2cQVX8k08qE4OD2JO2eyuUSdrqx40j6tprCDrsMFQhPSQ0Y8bA6ONgz3JZG+ekZYr06zEmX4i+ZAupp1xHS70/6mDXq2FtNlv7757KFAOUExeqJ0TvurmZYOCow8Lwy8gAqxVzWRmaiAhvhyRwgXEjorCZwpB0Neyu2svMhCmO33x479vYBAPDZrOR3YMUjLGomNIHHsB/4kSir7sW5skGjax6FIr+kifJ0WOhYhccfnfHeSDslJ5101NCU8ityyWvPo9ZCbPc90UJekWn1nH7tNu9HYbAB7Cbke3e34DNZuuW3BgRKC/aJP+WQ9W2P1GHhuI/fnz/DSfNlmVXmqtg3wZInq106MOO9qRttI8lbS0m+XkEMGpmt9PtlbZuStpKkkR6bBBb9tWSVdbAogmJHTtKmsva/wZ6Resva8EX/gE1+bJRtKArVgvkrJCP7XISCiBJEn6pQ7waM38NrHkOjnpo8P3u5K+RJS00ephxlbejEQwQkbQV9EhqmFzVqkjS1maD3cvlY/uq/SDGvvq9V6FKW0mSiL766r4vmnuTnLT9+yN5q4VCW048QsJkuPw3/CQJP4WarGqpot5Yj4REQ0MYUO6bVQlDkOSPPkSl13s7DIECRATqUFtisVHDn8W7nEvaChTjla2vkF2bzVEjT6KhxYxaJZES1VHB3LJzB01r1mCprpaTtiAnZpsq4M/X5P/3kLDtC2FG5lksjY1IWi0qP6WekgJBB2nRQejUKhpazRTVGEiM6FrZNG3ENH4/83dUYb/TPHoz+nHjHGtYrZWLLf7+UB7Hi6Sty+RU+Gil7f5tYDaAfwREdzUdstlsbq+0BVkiwZ60XXxwPCODR5Jfn09eXV7/SVuAE18G/3BQi/RIjxRtBEONvJAzcqrHuzds3075v59CFRRE4iuDrMDpz9fkhLZ/BJz6f96OpitrnpVfJ50HQTEe6XJd8TrqjHVMipnk2N+eoF+EPIKgR+yVtooYlZTvhJo8eYUn3TlnRW+IjdsrbXMrm7BYPdT/yCmQPBesZlj3kmf6dJXOWpYKb1mxJxISghLIqzABkCn0bAcFImHrW4RpZZfnXZU5zt9c9Bf8cKe8ii8YMBv2b+Dngp/ZVV4CQFJkQBeDGv+DDmLEAw8QceEFHTfZbFCZ3fF/ta7XhK1h2zYqXn4ZY0GH4ZziEkiCPql59z32HDqF8meebX+vpLGEulahCS1wHa1a1Z4E7EnXVq/RE64PJ/TEE4l76EECZzlRXT+2TcN89zdC69NF6ppNVDTIcl9pvpa03bdefh01o9ucoNHUKFeAI7lVCuZAM7KT0k/isgmXERsY61gDQdEiYdsXIfGyNNa0yxX7PtV+8SVljz+BYdu2fq+VdDqaN26keePGwWNGZmdOmy/NP5/JldqDhZKtsPcXkNSyYZqHeH/3+9z2+238VihMLJVCJG0FPRIfFI9WpaXV0kpJY4lrjdmrbFMPB51z+odlDz/M3kXHUPf1167F4AQJ4f7oNCqMZivFNQbF2jWVlVHx8stUvtbLCtzctg/8zW/L7pyDmcoceO4gWPUYxTfeSNm/n8JUXq5Y8523UWWXt5mQ+doAd4hjs9mwmUzeDkPgIqMCM7AYEjG1DGBRZNsHsGGZ7DAuGDBXH3I1t029DVWrnEg98LNOGx9P+FlnEnriiR1v7vkOclfJxyotWIw9m5MBFc+/QOWLL9H4e4eBYGqogrtpBP3SuncvmM2oIzskZR5c/yBzPpzD13s9N74R+C52Xdtd+xuUbThtgey9kDIPTJ51H/c1cirkn01cqJ4gPx9LDhZ0StoeQLAumLVnr+XXM39Fr3Hfwn9mm6xQdplczfyvCf/iusnXtT/vHMZmg1Zldlv6FGGjZGmsBb0Yaw+A+uXLqX77bQx/b+/3Wr/UVOIef5yk995VrH/FiD9E/qy0WWDdi96OpoM1z8mvB50K4cke69bdcijDEZG0FfSIWqVuXw11eVI37kSYfydMvqD/aw+gNSsbY36+4pWcfaFWSaS2bU1VStcWoHXPHipffInqN9/EajR2vyD1cIg7WB4UZ/+oWL+KY7PBtzdAaz3G7Wup//4Hqt96C8xmxbrokrQt667xKPAuVW++Rc7hC6j5SCTrhjrzE46hOf9q9IYFzt/cblLznTCpcYFpcdM4f9z5lNXIW5ozYvpJoJta4Msr5eOkWXBfpSyN0NmcrBPBRx1J8DGL0CUnt7+XHCIfVxoqqTf27jgvUIb4fz9J+soVhJ5wQvt79irnxOBEb4Ul8CHGtena9lRpC/DN3m+4csWVfJH9BabycsxVDhYH6ALhhu1w4ktOF14IuuKzJmQ2W6dK256ruCVJIkLvXh+E0W2VtvlVTbSYBjgm2fUNLJ0Ay29SMDJBb4SdcTphZ59F4KzuOsgHImm1hJ18EvrRowenKdmcG+XXLe9Co3KFTAOmoVT+fQaYc4PHujVajBQ1FgEiaaskImkr6BXFNO+iR8P8O2DMsU7fGv/sM4x6600CZ/b/Ya4k9m1LSiZtA2fPJnjRImLvvoseHzWSBMc+DVeshkPOUaxfxdn2AeSvBo0/2rOWMnLZy0RddRXa+HjFurD/zsUFjKK0vgWA9P4SGQKPYTObMZeW0vzHBm+HInARl4wXk+aAX6isrVq0UeHIhh859l0FnRaorM3NNP72G6ayso4L3z9DlqfRBcE5n8jvzbut18Rt+NlnM/K55wiaO6f9vSBdEDH+srZZfl2+e74gQTuSJKFNSGg3b2w2NVPaVAp0yFEJBK5gNyPbVdpz0nZfwz7WFK9B8+wb5Bw2j5qPPnK88cGYIBmC2JO2ab5mQmaokc2X/MPl4hMvER3sR6i/FqtNHtPYbDbKmsrYWOrE+MQ/HOoKZX1SsRjdQf5a2PkVtCi7yBuycCFx99+PX1qaou16heS5kDAFzC2w4RVvRwPBI+Cq9bDoCY+ao+2r34fVZiVIG0SUf5TH+vV1fGxvhkBJ2pO29d7bPqmNiUEb4xnR7M7YB1Q5CpmRAUhqNSOXPtf3RYnTFOvPLTRVwY9t22Lm344Uk0HwggyCFwygSq8P7ElbjWUE0EhsiDwQEwwOQo87Fv34cQQceqi3QxG4iL3iJ7eyDoOpFX+tE0ZJGh1kHg3bP4bd3/a4LVLQNzuqdlBQV8D4yPFk2XcVdFqgatm5k8IrlqCJiyNj1S/ym6EjITAGjn4M/DpN/u2atg5ONFNCUyg3lJNbl8vE6ImKfD0Cx8ivzwcgQh9BmD7Mq7EIfIMxI+TPjYKqZppazQQesP2+3asi1ES6SoWl0kkZLqsVSrZAWKLHzGx8Dfucwud2jgVEwEXfys8elbrb6QfXP0iloZLLJlzm1meNJElkxgaxMb+G7LJGEqMkjvxU9lJZf/Z6gnQOfN8Tp8uL0c1VULwZEj1vuDUoWf+ybJjthOGpO7DU1tL055/YWlq67FwZFEiSLHX44Tmw8XW5ulUf6t2Yokd3MwZ0N51NBwdlRfQQRVTaCnql3aik1gWjkt+fklfmjENLBystWnl5BKepLwFzq/f674mf7gFDNcQeBDOvcUsXBrOBkiZZR9nQJFclCROywYU2IYGg2bOFKZkPEB/mT8DID9Cl38Onu793vgG7RMKub4VJzQD4Pvd7bl99O29uf5c6gwmVBKnRHVuQrYYWdOlp6Md3cns/aRncvBsmnNa9wXm3yZpzPWCpr+9iRnZc6nFcefCVjIkYo9jXI+hOzYcfUvrwIzRv2dL+nn1h0i5TIRC4SmSQHzHB8qLb7tLuurb2Mf03Y5oY/ddGRtx3r3MdfHoRvL4Atn/qaqjDlpy2OUW6r1Xa2ukhYQuwvmQ9vxb+isnqfh+EzE5mZCG6EGIDYkkOSaaqxcFFCrUW0tsKUQazVJ0nMbdC7q/yccZCxZpt3rQJU4lzvjmtOTkUX3c95c8tVSwORck8BkYfCwsfkQ3YvUWrwtrmTiD0bN2DSNoKesUu3G6vCHGapipY9Rh8fIG8fdZJmrdsofq992jZuXNg/btAers8QpPibVtbWqj7djl1y5f3ftHKh2HpRNj2oeL9D5jc32Db+4AEi5+n4pXXqPnoYyyNyn6PCurlpEKYXxjFVfIA0Of0vwSCQYJaJRGs80eSrGwv2+t8A+lHgtoPavKgfJfyAfo49p0selscAKMiAtBrOya+QXPnkPbtt4x88cWuSXGV2qkty3XfLidrxkxKH3yo/b2TM07mqkOuEklbN1P/44/UvPcerdnZ7e91rkQRCJRibB+6tnafiv1SHXWqARQE2LVKd3874PiGMwajhaI2c2OfG9Maavo8/eCsB7lr+l1khGe4PZTOSVuAH0/9kW9O/qb9998h7InJLJG0BSB/DZiaIGiEYvIXNpuN4htvImfBETRv2uTwfX5jxqAfN47A2bOwKeilohgqFZz9ARx6IWic2LmmJM3V8Nx4+Owyrxjq2ce1YnyjLCJpK+iV5JBk7ph2B4/PeRzbQCqosn4AmxVGTIBwJx6WbTSsWEHZw49Q+/kXzvftIqlR8oCquslIdVMPpmEuUP/9D5TccgsVz7/Q+/fVPxysJlj7/ODRVKorBI0/TL0US3AGVa+9Run992MsyFe0G3tld0poClltW8lEpe3gw1xTQ9V//0vZkz071guGDhMCT6cx+07StCc6f7NfEKQdDsFxUF+sfHA+jr0iwdwi635l9PJZJ9ms8Naxsl6tyeB0P/rRmWC1Yq6pGdjzXDBgIs49l4gLLyRgSsc2W1GJInAHY+Lkz4/dPejaBmgDiA+UvQcGVIxh96XYtx6aKgca4rBF1liF8AAtkUFeSua4g/r98GQyLJsFlp4raafHTefsMWcTogtxezh26Qm73JC6l+rfPkk/CpCg9G/56xvuZP8sv2YcpZi+tbWxEc2IEUj+/ujHO663qg4KIuXzz4h/5BEkjVD57JGNr8u+B+W7vGIeKcY37kEkbQW9EqAN4Nyx5zIrYdbANEl2t1WSjjl+QP37pWcQtGAB/pMOGdD9ruCvU5MQ5g8oL5EQsvAo/DIyCF28GJuxl4TwoReCPgyq98KurxXtf8BMOk8WND/iXlCrib7+eoKPPhr9uHH93+sE9slESmgKOW0r5Rm+VpXgA9iamyl/4kmq33kHS6MXZUQELjM+JhmbOZTcigHK2Jz0Cty4Ux7QCxym1dJKcaOc6K6tl6Vgev2s2/I/2LcO1r0ERud3N+jS00n/dRWpX3ze/jy32WwUNxazumi1R7atDleCjzyS2DvvwC+1YwJjn9TYdzQJBEowrr3StuetsfZJdPnKHyi67nqq33nH8cbDRslVdjYr7BmAlM4wxz6X8Lkq28I2Q1qVSpYW8DKj2xY+C2uaaTYOsBIzKBoSJsvHOT8rFNkQxi4TkXm0Yk2qg4NJ+fgjMv/Y4JtSa2Yj/PUWvHMSWDxYEWxs6jBBm3ODx00kbTabSNq6CbFEIXAPxibYu1I+tmseOknYyScRdvJJysXkJOkxQRTXGthb3sjU5AjF2lUFBpL6TT+JWL9gmH4F/PYkrH4Wxp00ONx7I+QPYDUQ+a9L3NLFFROvYHHqYpqMZt7+WpbG6GzMIxgcaBMSCD3lFPzS04WW6RAnrW0SmTPQBaoA5T4fhxMF9QVYbVaCdcHsq5TX0Dsb1LTm5lK45EoCJo4nPuZL+c35d0Cg8268kiShHTGi2/snf3UyBrOBr076SiQQPYTFammXARKTGoGS2OURdu+vx2q1oVJ1HTemhKawtmQtdfk5NPy0DqvBQMQFFzjewZjjYf82uShj8vlKhu7z2E3IfC5pW7Befh01s8fTG/ZvoLy5nEnRk0gMSXR7OJFBfkQG6qhqMpJT3oh/YAUPrHsAJHjv2Pccb+iQcyBhCowY5iadlTlQnQsqLaTOV7x5lU434HutBgMqf38Fo1EQqxl+eVg2tNvxBUw83TP9bn5H9p4JT5FzBx6mrLkMg9mARtKQGOz+v/fhhKi0FfRJSWMJ3+Z+y5riNc7duPcXMLdAWJJsWjUESWszCrAPtDzOtCtAGyBvz9n7i3disFrhq6uhYJ3HulSr1CSGJGJoDgMgJtiP0ADvr94LuhP/2KNEXnIx6mCRVB/KpEcHoYtaQZbtJfY3urAV0GqBhjLlAvNxOlcj7C2Xq2c7L1C17NqFad8+jNs3yAP/qNEw7TLF+pckiTERY0gPS6fRKKrl3YHhnx0Y8/O7SFKUNJZgsprwU/sRFxjnxegEvkZqVCA6tYomo4XCmu47J+yLBFuTrcTccjNRVy5xrgN7EcbeX7yilTiUsc8l0nzNhGxf30nbz7I+4+41d7Ni3wqPhdRZIiFIG8TflX+zs2qncztKpl4Kx/4b4g9xT5BDBfvPN2mWXFCkEK7INBm2byd73nzyzzhDsXgURxcA06+Uj9c855niFrNR3o0FMPt6UHu+NtOu1z8yeCRalZi7K4lI2gr6ZHXRau5cfScf7nbSEKuzNMIAKkRtJpPXBcbTYmQdGKXlEezYbDYM/+ygefOWni8IjITJF8rHa55zSwz9suUd2PIuvHcGGGoBqPvmW5r++BOb1erWrrPb9Kg6V54JBALlSY0ORBPyNwRuZ0vp7oE1krMSns6Az/6lbHA+jD1pGx+QRHWTEUnqOqEPOuwwRj33INGp8iCYY55wafup1Whk/733sfeYY9sNJN9e9DZfnPgFE6OHeTWRmyh9+CH2LjqG+u++a3/PPqlJCkkamN6iQNALGrWqfczUk0RCe9LWr4zISy8lYPJk5zqIGSdXcFlavVdMMESxJ2170y0fkrTUQ9k/8nEvSVtvSMHYJRKyyxqIDYzFX+OP2WqmuEHo7jvN5PPh+m2w8BHFmjRXV5M9Zy5F1143oLm+JiYGc1kZrfkFWHuTGRwMTLsUdEFQvgOyf3J/f9s/gfoiCIqFg892f389IKQR3IdI2gr6ZHTEaCbHTGZs5Fjnbqwrkl8HKI3Q8Msq9kyaTPFNNw/ofiWwT573VjivH+gItZ98Qv5pp1H+9NO9XzTrGnlLSvGmju+pp2gog5/vk48Pvwv8w7AajZQ9+ij7LryQpvXrFe+ytKmU236/jTe2v0F2uV3P1ocGuD6ItbWVpvXrMVdXezsUwQDRa9XobfLW+c379wyskcg0uRq0YJ3sXCvoF/vgNkCSzYESwwPw13Uk8dRBQQRWvEdgrAFGHwdpC1zqT6XT0bR+Pca8PAybZbfmAenVCxzCZrOh8tMj6XT4H9Sx40jo2Qrcydh2XdvuZmT2iXRxYzGtllbnG5ckOPoxuOg7GH2sS3EOJ8wWK/lV8lzCp+QRiv6UNY7DkyGk+64Bq83axafCU9gT43vKGlBJKpJDkoGOBTOHsZggb7XQcA5PhjjlFnYNW7ZgqaqiNS93QGZimpgYkt5/n8z1612SV3A7/uEw5WL5ePWz7u/vrzfl15lXg9Y7OsFalZaU0BQywjO80r8vIzRtBX1ySMwhvH3M287feNG3UJMPoQPTMzHm5WEzmZC03iuttw+sCmuaaTFZ0GuVrYgJmj8fKSAAbXw8NqMRqacHT+hIOP2/8gp2YKSi/ffLj3fK7pNxh8j6uoC1qYnghQtp3rSJwBkzFO8yqyaL7/O+J7smm9DqCYCotB3sFF56Gc0bNxL36COEnXqqt8MRDJBIv0RK+ZusaicnNXbCkyF2ApRth6wfZD04QZ/Yk3c2YzTQgwlZZbacBFf7wdGPKtJnzM03Ifn7E3DooV3et9lsIoGrMJIkkfTO29hMJug0MS1oEHq2AvfRV9I2Uh9JsC6YBmMDBWVZjCyRE7cBU6Y43sEYkax1loLqZkwWGwE6NfGhPmS6tK/NhKyXKtv9TftptbSiVWmJD4r3WFiZ7ZW2cnVzSmgKu6p3tT9zHWbPd/DxBRCZAaOPUTrMYUvg3LkkffA+1saBFUVJkkTA5EkKR+UmZlwNf/xHNuwrWCfLTLiL8z+Xzc8Ovdh9ffTDGaPP4IzRg1i2YggjkrYC9xGePOBbIy+/jJDjj5dXcL1EZKCOUH8tdQYTeZVN7QNhpdDGxJC5dk3/Iupjj1e0X4fIXgH/fAaSChY/D21bODXh4cQ99CA2qxVJpXyhfnJIMjdMvoEAbQAvfSkPtjJ9aSuZDxIwdYqs2TiYtygJ+iUpOIXSRihqLBh4I2OOk5O2u5eLpG0/dK5AaqiPAIykd1qgstTVUff9H+hnvUFAdGu7CaSrhBzTdeJZ3lzONSuvocpQxYrTV4jErRs4cPH53hn3cvmEy9EOAqd1ge8xdoQ8Ztpd2l0eQZIkUkJT+Lvibyq++hzL8x8SMH06SW//18NRDi8669n61Gds4nQ45DzIOKrH07m1HVIwGpXnUg6Zbc/S4loDDS2m9gUyp5O2qfNBpYGqbNmMK2KY7Y74/g65AGvODTBKuUIdlU5HwKQhknR1lZA4eTy86b+y1KE7k7b6UPlnJfBJhDyCwCFazC2OGZXYbGB0XU5AUqnQjUxAl+g950FJkkiLdq+urdOul1V73RJHF4xNsPxG+Xj6lT2K8LsjYQswKmQU/5rwLxannE5xrQHoofpMMKiIvOIK0n//jfCzvaOfJFCGcVHpANSYXJBhsS8w5awEY3cTHEEHZU1tDrsqDSVV8nMgs7MJ2c6dlD32GCVPvuxW1+EwvzCyarIoN5RTaah0Wz+CDlSSirigOKL8o7wdisAHsRcY7KtupqGlu/HSyeknc92k64idMhtNbCzaESOc76R8Fyy/BX590tVwhwX2pK1PSSOAnKw96WUYf1KPp72lbxkWoCMm2A+A7PLGjqRtvZNJW31oRxVx9s9Khjj4sdlg55eQ9b0i83qlMZWXU/X661S88KK3Q+mfWddBxkKYfYN72m+p84zRWT/YbDaXDOYEfSOStoJ+eWrjU0x7bxrv7Xqv/4tLtsC/U+FT3zCjsQ+w7M7e7sJcXY1x377eLzA2w5vHwEtTocaFSjhH+PsjqN0nS1scflf7282bN2Pa74KzvBPYB7jRwX6EBQxivSIBKj8/36ocGaYcGj8aALNUT11r3cAaiT0IwkaB2SBMavrBPplNCk5ib3nbApW90ra+BKk+n6AFCwiaM0fxvo0FBVS/+x7Nm7egU+sYGTwSGIDen6BPCs47n32XXU5rnpOJAoHABcIDdYwIkbfg7+mh2va0zNO4bOJlpE05gozffiX+ySec76S2EDb+n1w95mZTWl/AZ5O2/WBPknpDCiazkxlZ50pbp5NKGQvl16wflQxv8FO6HRr2gzYAkmYr1mzLrl1Uvvoqhm3bXGrHWl9P+dPPUPXf/7rdGNtlItPg3E8gWbnvYxc+vgBePwL2u/Y9dZV/Kv9h1gezuGblNV6Nw1cRSVtBv4Trw7Fhc2yFcvdyMLeAtfvqvqOYKyspe/wJaj/7bMBtKIXdjCzHTZW2ALWff0H2vPmUP/VU7xfpAmTXcJsF1r/ktlgAWQvn5Ndg8VLwk79+m81GyZ13krPgCBpXr3Zb16v2rWJv7V72lNYCosp2qGEzDfzvXuBdDoqLxmoKBWD3QCv6JQnGLJaPdy9XKDLfxGQ1kRqaSlJwGpWNsrSI/XnDz/cRsH4JiedmMuK+exXvu/rd9yh75BHqvvkaYOBbRwW9YmlsonnTJppWr0Yd1PEc21m1k5t+vYl3d77rxegEvs7YODlh1ZOurR2XFltTDpNd0RtKYP+WgbczTOgsj+AzlGyVk0RWS6+XeNNJ3p60zSprJCkkCZWkosHYQFVLlZMNHS2/5q8ZlBWnbiO7LUmdOl9RU6uGVauoWPo81W+/41I7upQUQo4/nqglS4a3PFvxZsj9Vf5b9A/3aih59Xk0mhoxmA1ejcNXEUlbQb/YH7Z2baI+2f2t/Dpm4DqsrVlZVL/9NlWvvzHgNpTCPsDaW+6+pK3+oPFgMmGuqMRmNvd+4dyb5NfN70BjhdviQZLg4DMh/cj2tyy1tWhHxKEKCCBg8mS3dFvbUst1q67jpK9OYnd5DSD0bIcKzZs3k3vSyRRescTboQgGSGSQH2pzDAB/Fe8eeEMTToU5N8L0yxWKzDeZlziPr076irOT7wRgZLg/gX4a2dhl+yeABCnz3NJ30JzZBMyYgX7cOEAkbd2BSu9H8scfEffoI2iio9vf31m1k58LfmZNyRovRifwdcbYzch6qLS12qzk1eXxy75fsLb5RjhdfajVd+iY7vrWpVh9HavV1i6x5lOVtr89Cf85DDYs6/US7yZt5e91VlkDfmo/EoISusTkMFGZ8g4iSyvk/a50mIOXrJ/k1170igeKfswYgo9ZRNC8w1xqR1KrSXj6KaIuvwyVfoiY+zWUwo93y/+UYs1z8uuE0+XfUy+yKHkRX5zwBbdMucWrcfgqwohM0C+pobLwen59PlabFZXUS66/Mgcqdsui7fbtJANAEx1NxIUXogrxfsIurW2AlVvZiNVqQ6VSfhu4PjOT1O+/wy+ln0FNyjyInyRLUPzxKhyhcAVW3moYMQH8w7qd0oSHk/T2f7HU1qIKDFS2X3v3bZXcIwJHkF8uV2z61ADXh1GHhtK6ezdGvR6r0YhKJyQthiKhmgRqyWZHZc7AG0k4VP4ncIicCrlyJyMmSK5Y+u5WWZrs4POQetATV4KgefMImteREE4JEUlbpZE0GvwnTMB/woQu70+KmcQtU24hNiDWS5EJhgN2XdueKm0tNgunfH0KZquZ7zKXYnrqZdRBwSS987ZznYw5HnZ8Ie+qOPJ+JcL2SfbXt9BstKBVSyRFBng7HGWwWuUFRoDEng2qaltqqW6pBjqeMZ4ko73SVl64SAlNobChkLy6PKaOmOp4Q5IEGUfLciD5a2D0Mf3fM9RpqoKijfKxC/P5nghesIDgBQsUbXPIULVX3i2r1sGsayF4AHrinanIgl3fyMezr3c9PhfRqXWkh6d7OwyfRVTaCvplZPBINJIGg9lAeXN57xfuadsOmzy3x8Sfo/hlZBB75x1EX331gNtQisRwf3RqFS0mKyV17iv37zdhC/LAYU5bte2f/wctvW97c5r6EvjgbHh5Wp9mZ+qwMOX6PID2FfmQFLLK5KoEIY8wNNClppKw9DnSV64QCdshTEJgEgD5InnnduyVbfZtsxmxwbDlf1D6N63NYey+7zcKzjvfI7G076YRmrZuJy0sjQvHX8iilEXeDkXgw4xrk0fYU9qA1dq1ilar0jImfAxjI8bS5GejdecuDH//3fdOr57IOApUWqjcA5XZSoXuc9g/45MjA9GqfWTaXZUNhmrQ+EPcwT1ekl+fD8iFGAFazyer7RrxZfWt1DWbXFucnL4ELv8NjnpYyRAHL3tXAjaIGQ+hI70dTZ9Ym5po3esBk24lSJoFidPBYuyzQt1h1j4P2GD0cRAz1vX2BIMaH3l6CNyJVqUlMSQR6GdSZ98iNeY4D0TlGTRqFclR8mAjx40SCXZsZjOW+j6SsWOOh8gMaK2DTW8p1/F3t4KxQd5aEd41gWwsKsLa2qpcX71gH0glBidTXCsnyIU8wtBAkiRCFi1CExnp7VAELpAZIa+QV7YWutaQxSSbdvx496BwtB1s1BvrmfnBTM7+9mz2tEnBjA23wsqHAGgdcSI4m0AZADajEWN+fnvStqy5jCbTMNLscyNVb/2XxtWrsQ5nrT2B10iODESnUdFstLCvurnb+Q+O/4CPF3/M6EMWkPDC86R9txzUauc60YfK2rbQIY0m6IZPmpAVrJNfR04BTc8L9Z0LMbxBiF5LfKi8bT6rvIHUMHnX6IAWJ6PSIf4QUA2TtIlfiGw+NuZYRZs1lZRgaegu2TJQWnbuZM+UqRRcdJFibboVSZLlwwA2vgmG2oG3VVckG4dDR5texGQ1cd/a+3jznzcxWsS4xx0Mk08fgav0u0LZUNqxlcLFpK2ppGRQOUG269pWuHcyW//99+QcvoDyZ5/t/SKVCubcIB8rZfSze7k84FZpYPHz3QYlJbfdTvZh82hcs1aZ/nrB/rsVKMUBEBWkIzxQVG0KBJ5iUlwGAAZbhWuDLqsZPrlY3ga2f6sywfkQeXV5NJmaKDeUk1PWAsCcotehuQqiRhNy/dOkr1xBrBtMyOwYduxgz/QZFFx0MSG6ECL0EUBHdZRg4Jirqih/8kkKL7+ii0FKq6WVH/J+YE/1Huc1RAUCJ9CoVYyOdcCMTKUiZOFCtPHxAzMmG3MchI6SKy4FPZJTLiepfCppa5dGGNWzNAKARqUhMzyTMRFjPBRUdzpLJEyNncq9M+7l6kO8v4tz0DN6EVz8HRyuoPYqUPbvp8iaPoOajz5WpD1dcjIAEpKiyWC3knE0xIyTC6U2vj7wdrZ+IJu+J8+FRCfkPtxEUUMRX+R8wavbXkWjEuqr7kB8VwUOkRKaAoV9JG01fnD0o1CVAyHxA+7H0tBAzoIjkPR6MteucZt+qjPYB1p2IwF3oQ4Lw1xRQdP69disVqTeVnQnnAFafxh7ouudtjbIVbYAM6+B2PFdTlsaGjDt34+1oQG/jAzX++sD+++WpVU2Q8qIEVW2QwmbzUb911/TtG4dMbffjiYiwtshCZxkcnwypl8PQTJH0mI2olMPcNFE6w/pR8Cur+VFofhJygY6xBkXOY6vTvyKwroKLtxUCUBw8iTIi4JjnkDS6NAmJKB1Ywx+qalgNmNrbcVSVUVqaCrVLdXk1eUxPnJ8/w0IesXW0kLICYux1NWhDupI1OTX5XPr77cSogthzVnCiEzgXsbGBbO9uI5d++s5ZkJcj9fYbLaBJWvtTL4AplwiV5AJesSVStuKF18CtYroq67qfm7ZMrBYib72GpdjdJp96+XXUTN7vWRx2mIWpy32UEA9kxkbxG9ZFWSXNXLu9PHtu0YHRNVe+P1pOdl25rvKBTmYUfjv2lxeDlYrfqnKVF+rAgLIWLcWTXi4Iu15BJUKZt8AX1wOG16BGVeBbgDyIXNvgujRruviKoR9Dp8ckty795HAJUTSVuAQ/bpL+4fDTNdXL00lJaDVogoOGhQJW+iotHW3PELA9OmMXPYyQXPm9J6wBXkr0kGnKtPpL49AfTGEJcG827udVgcHk77iZ1p27UYbG6NMnz3QammlqLEIgIaGcKCmXY9KMDSQJImqN9+idc8eAg87jNDjfEcmZbgwMiIAa/k5GM1WahtVhPi50NiY4+Wk7a5vYcE9isXoC2hVWlLDUqmuDQMqiQ/Vo596ARx8Kug889xT+fuTuvxbtCNHIqlUpISm8FfZX+TWCl1bV9EmJJDw7393e98+fkoNTXUtUSYQOIDdjGzn/u4VaIX1hVy36jqaTc18d9THNKxYibm6iqjLLnOuE7U7l5Z8A/vcwT6XcAq1isoXXgTokritWLaMyhdeJOq6axWJ0SnqS6C2ACQVjPR+hV9f2CXW9pQqUIWp1sK29+Wv21Ajz3t9kdLtEBwPgcrLnSW//x6m8nI0CvqjDKmErZ2DToVVj0DtPtjyLky/3Pk2VGoYd4LysQ0Qu+yIXYZEoDwiaStwiNRQ+Y/Q3e7S+tGjGbNlM+aqKrf24wz2gVaumyttJZXKeUdNsxEaSiA82fkOizfBH/+Rj49/rteVPkmtxv8g91Ze7avfh9VmJVgbTGGl/LGUIfRshxxhp5+OuawMfWamt0MRDAC1SiI1KpDdpQ3kVDQwyhWn68yFsuRKxS65QiUyTblAfYR2w0X7Z50uEHNlJZWv/gf9uHGEnXKyW/vXjRrVfmxfmBXyCO6jXeMx1Dsaj4LhxZgRctJ2d2l3eYRwfTg5tTkA1FWVUHb33UhaLZEXXog0EDNRiwnKd/ZqSjVcqWpspabZhCQNLGlrT9R2Ttx2Ttj2VIHrdgIi4YKv5Z2V+pAeL7HaZIk7b1fc2ZO22W0SFVk1Weyo3MGYiDGMjXTSuClsFESPlcc0OSthwmlKhzs4+OJKKPsHzvkIMo9WvHltjPsKgIYMag0cdhuU7XBeN9hkACTQ6t0S2kDxtob1cEDULwscIjk0GYAKQwUNxgNWLAvWyStFTcokWiWNBm1srCJtKUFqtFz5VNlopLbZc+La/er6Fv0FLxwCH543MLOfsCSYeKYst5B+RLfT1uZmj+nudZ7Mtrup+5L+1zAh4rxzibn5JrdLaQjcR2p0AJKmhj+KdrjWkH84JM+Rj4VJTRee2/Qcb2x/g50luXyoe5gTNRvaP8Nbdu6k5t13qXrjDY/G1O9uGoFD2Gy2Xs1ERdJW4EnGtVXaFtUYqG8xdTkXpAsixl9OnhQFGQmaN4/w887D2tLifEdNlfBUOvzfAteMdXwQ+3h2ZLg//jonjd7aiL7qKqKuu5bKF15k94SJ3k3YgiyHlzoPpv6r10tya3OZ/t50Lv7hYg8G1h27JEVlo5Gqxlbe3/U+9627j18KfxlYgxlHya/ZPykU4SCjrhjKtsvHCYd6NxYHMe3fz/5776Xouuu9HYpzTD4fjnkCQkc6d99fb8LSCXLeZRCRX5cPiPGNOxFJW4FDBOuCifaPBjr+MNv58//gq6th/YueD8wDBPpp2h1I3W1GBtC4eg3555xL5bJX+r4wIlUeIJdth+yfne8oMApO+Q+c1HM/JbffQd7Jp9C8caPzbTuJfTKbGJxEYbUB6FghFwgEnkMbvIugjCdZvv951xsbc7z8ukskbe2YLCbe3vE2SzcvJaXgA2aodrGo+EUwyQ7vmhEjiLj4YkIXe0YLsPKVV8g77XQyK3U8NOshHpz1oEf69VVMRUVkTZtO7uITui285tWLpK3Ac4QGaNvHrrt7kEiw/x7m1ueR+J9Xib39NtQhPVdO9klgFATFygaUAxmL+jA5bTv00gcijdCJ6KuuQtJqsZlMSFqt9xK2DpJXn0eLpYVWS6tX4wj005AYIZvkZZU1cnD0wcyMm0l84AC9V+yVp9k/g9WiUJSDiJy2v9+RU+S/awUpvGIJJffcg6m0VNF2Ja2W2k8+peHnn7E2Nyva9qDD3ArrXoKmcrANHsN2m80mFqU9gEjaChymvRKnvlMljrm1Y5Bmn6C7QOlDD1H+9NOYyspdbktJ0uxmZG7WtQWw1NZi2LyZuq+/7rvSNSACprStYq95zvEOjM1dK3PV3VVSrM3NNK1dS+vu3ahCQx1ve4DYtXCCVfJAKjJQR0TgAE2QBF7FZrXSsmsXLTt3ejsUwQCYEJuBzaam1aTAgHBMm65xSx2YBlDB5YMUNhRisVkI0Phzdu0XAJROv7tdy1afmUns7bcRteQKj8TTvHUrLf/8g3rzTk7OOJmJ0RM90q+v0pqVBYCk13fRprfarKISReBxxsT1LpGgaHX92Lbxv9hV0QVXTMg6U7FsGTaTCSQJm8kkm5B5g5Y6+OEu2Pl1nzv8Dk88nG9P/pb7Zt7nweB6JjOmQyLh5IyTeW3ha5ycMUDpocTp4BcKhmoo3qxglIOErLYK4gxlZRHM1dU0/vYbdZ9+hkqv7LZ+TVQU0TdcT8Jzz8kmX0ONwo3w/lmw+7v+r/37I1kSMThO3ik7SKhqqaLB1IBKUpEUkuTtcHwWoWkrcJjbpt6GVq0lMbiT+2be77KTZnAcxE92qX2byUTNJ5+CyUT4Oee4GK2ypEUHsTq7kr1u1rUFCD7qSGJuuZmQxSf0b1Yy82pZl3bfOti3AUbN6Pt6mw0+uRAkNRz3DIQm9HiZKiCA9F9W0rhmrUf0SdsnDSZZFsPVAa7Ae1S//Q7lTz5J0IIFJC572dvhCJxk+sjRNH7xENoABQbWIfFw/baBaW77KO0OuxaJAFrZaM1k9EzvPe8izjuPkGOOIWj2bK/F4EsEH3EEGevXYamu7vL+/qb9tFha0Kq0JAT1/NwVCJRmbFwwv+wuZ9f+/pO2NqsVU8l+dCMH8Ps55jhY/QzkrJAX6AaZ3qK3UCJpW/roY9T873+oIyOxVFWhP+igHs3JPELhRtjwMuz5rk8TJI1KM2iSNxmxwazcXU5WmUJmZOkLYMcXkP0jJA5uIzanMLdC7q/yceZCRZtWBQQw8pVlGHPzUCtoQmYnaskSxdv0GLu/hazvobkSRh8Dvc37rRZY27YDbuY1skzJIMH+DBkZNBKdWhRcuYshuCQh8BajI0aTGpqKVtXJLda+qj76WJdXuGwWC7F33E74BeejGTHCpbaUJq1N19YTSVuVXk/kpZeijXVArD0kHg45Wz52pNp2xxeyFtPelWDsW+pBHRZG6PHHORCx61hsFiQkmhojACGNMJQJmDoVKSAAlb+/t0MRDID06BAkSU1Ns4mqRgW2NoqEbRfsO1VSGyqw2iRe0l1GiL88yLUZjRgLCvrXM1eQoLlzCTvpJDTR0eyt3cvHez7mj/1/eKx/X0QTHo5fWlfjPfukJikkCY1K1EsIPMPYtkrbnX3II+TV5WGuqSFr2nT2Hn00VoPB+Y7iJsmO88ZGyPvNpZh9CVeTthXLllHzv//J/zGbibrmGhJfWdaucevxitt96+XXUTM9268LjB4hf++zSjvmbw3GhoFLN2QshJhxsiSIL5G/BkxNEDQCRii740al1xN8+OFE/usSRdv1CWZcBWo/KNoo/wx6Y9c3beZ/YXDohR4LzxFya+XdsmIXkXsRSVvBwLFaOsr5x7ie3FPp9UScey4j7rqry7bCwYBdHiHHA/IITjPrekCCrB9kJ8reMNTCD3fIx3NuguieK2g9ZT7Wmc9O+IyN522krFr+PmfEikrboYp+3FhGb1hPwjNPezsUwQDw16lJCJMT7opqeJta5EqOYU5u7V4AUowmPrTMx9rJbb1lzx72Hr2IvQuVd2x2hJ/yf+LhDQ+zPHe5V/r3ZYTem8Ab2JO2e0rrsVi7ju3sv4uFDYVYgwNlSQ+NBuO+fc53pFJ1zAOERAIAja1m9tfJskDp0QMsRLBYCT72WAJnzSTmjjuIvuZqNNHR7eZkWDysa2lP2ib1nrS12Wzcs+Ye/rPtPzSbvK8xmtEmj5BV3oDNZuPSny5l1gezBr44efDZcNV6mHaZglEOAuxShxlH9V7tOUixmc207Mmi4ddfvR2K8wTHwqTz5OPeiq9sNljzrHw8/QrwG1yFTUKv3zMMrsyYYFBjsph4ffvr3L3mbkxWExT9JYth+4VC8lxvh+dW7CYC+6qbaTV7Rnze8M8OSu66m9rPPu/7wqh0GHeifJz1Q+/XrXwQGssgMh3m3tTrZeVPPEHR9Tdg2OGie7yT+Kn9yC6TB3j2QZZg6CGpVEg6sT1mKBMes5OA5Jf4vx0vKNPgT/fAU2liMg/klf8NwEiLmqfNZ3b5rDOVliL5+aFNdNJN2EXM1dXUfbucSTkW5ibMZUzEGI/27yu05uZRcvvt1H72Wbdz7bIYIckejkownEmODESvVdFislJQ1XURLjYglgBNABabhaLGIpI//JDRm/5CP3r0wDprT9p+55smTU5i98CICvIjNEDbz9U9E33tNYx89hlGvfkmYSef1PXcVVcRfe01robpOOZWKN4kH/dRaVvWXMZXe7/i1W2volUP7OtWkvSYIFQS1DabqGhsJcwvDHBBy3mIJTQdZs6NcOLLMFnZKk5rczM1H3xAS1aW24qCjIWF5J14IsXX34DNbHZLH25l1rUgqeRdsCVbu5/fv03+pw2AaZ7xO3AGsSjtGUTSVuAwGpWG1/5+ja/3fk1RQ5H8AYIka99oXE/StO7di6ms3CuVnv0RHexHsJ8Gqw0Kqjyzctz810bqPv+cmg8+6P/iBffAZb/A3Jt7Pr/vD/jrTfn4+KW9auFYjUZqP/+Chh9/xFrfXQPNnRiMFgpr2pK2otLWJ7AZjd4OQTAAooNVqP2LyK3fo0yDklreNrt7eFdw2mw28lqrANjsfzbVhHT5rAs56ihGb/qLhGee8WhcDT/+SMkttxD39UaWHbmMc8YOLk35oYJh8ybqvvqauq+/6XZOTGoE3kCtkhjdJje16wCJBEmSukgk6EYmIGlckO5IngPz74LzP5cTEMMc+868DIU9Glp27aLkzrs8X1W4fxuYWyAgUi7+6IV2fcvgkV3l9LyEXqsmKVKWuMsqbVTOgM/YDCVbXA1v8GCv+FRYp9fw93ZKH3yIwsvdl2zUJSWhiYlBP+EgLHV1buvHbUSkwEGnysdrl3Y/H38ILFkrz98DIz0YmGPY/5ZSQ1O9HIlvI4S1BA4jSRLnjT0PP7UfgdpAmH45jD9ZnowrwP677sawbRsJzz9PyNHKiqC7iiRJpMUEsbWwlr3ljR7RXA094QRas7MJO+WU/i+Oyuj9nNkI31wvHx9yHqT0XhWt0ulI+t871P/4IwHTpzsZ8cBYumkpm8s3M3/EadhsaiICdUQFDR6BdYHzWA0GCq+8CsPff5Px26+og0Xl9FBibFQ6G5ugxlisTINjjpcHolk/ydU6g8hAwZNUGCpoMjWhltQsr58PmLtN6CWNBk1EhEfjCpw5E7+xY/GfNMmj/foa+gkTiLr6arQJ3Y2cxKRG4C3GxoWwraiOXfvrOW5iXJdzKaEp7Kja0b691SXUWph/u+vt+Ag5Fa6bkDVv3oJ+/DhUfh3PzPrly6n74gtMpfsJnj/f1TAdp7OebR/Vprl1g0/fMiMmiLzKJrLKGkhNkD+DXUraVu2FV2aBSgO35Q7bMY1DqCQCZs5Am5DQv7n2AJFUKtJ/+9Vt7XuE2TfA9k9g51fy71dkV118Rhwk/xtk2Gw2jhh1BHtr9w6qv3lfRCRtBU5x3eTrur4RFA1EK9K2zWYDtRpdSrIi7SlNWrSctPWUrq0mIoL4Rx91/saGMnnwHNA28a8rBLNBXh1f+HC/t+vHjEE/xnPbY7dXbmdL+RZS/BYA0S4NcAWDA5W/P6b9Jdiam2netMmzEwuByxwal8k7BWCSamgyNcmLdK6QcKhsbtFYCnmrIeNIZQIdStTkk1u1E4D4wAR21slb+AaDFIwuOZnULzpkeGpbatGoNATpxGexM+hHj+5xa3mLuYVgXTB1rXUkhyZ7PjDBsGbMCPkzZndp991T9kl2bm0uNouFihdfpOWfHSQsfQ51kPj7dwVXTcjMNTUUnH8+kp8f6St+bl/MCz/7bEz7Swk/71zFYnWI8l3yaz8mZINxgSozNpifdpaRXd7ArHEKVNqGp4BfiCwRuG89pM5XJlBv8fP9EBQDE8+EwChFmw6cNo3AadMUbbMnhnTCFuSE7JR/Qew42WTcTnN1x3x+ECJJErdPE4t1nkDsXxEMDIvymjEpH3/EmC2bu7kuDxbSYuTExd6KQWhGZmf9Mlg6Ada92PFeZBpcuR7O+3xQfvDfOe1Onpz7JDaDvN0qU0gj+ARxDz9M6nffETRvnrdDETjJwQlxWM3y3+HuyhzXG1SpYMyx8vHu7lvHfR6bDb65nrxvrwYgSp8IQExwh9ahaf9+9l12ORUvvNhrM57glt9uYe5Hc/ku7zuvxuFL6DV6vjn5G/4870/XF0AEAiexm5EdKI8AHYm1vLo8JLWauq++pmnNGlp27Bx4h3t+gC+WQJkLbfgAe11M2poKC9FER6MbObLL7gttQgIJzzxNgKd3Rpz0Cly/TU7s9UF+XT4wuCptM9sWLrLKGkkKSQKgprWGmpaagTWoUkFG247QrJ+UCNF7tDbA+pfhx7vAMMDvh0AZjn8Wpl4KWtkMmPLd8Mxo+OoasHrYdFAw6BBJW4FTWKwWCuoL+Out+fDf4ztWXhVC0umQ1GpF21SKtDYzMkUd1R3AVFZG5X9eo2ndur4vXPW4bBJgaYWNr0NLJ12f9S/Bnu/7vL363fcof/Y5jEVFCkTtOOnh6RybeiwllXpgcFSeCVwncNo0/FJThv7q9zAkIlCHyhwLwMZihXRtxxwvvw5Hk5rdyyH3V/I08pBLb5O3KHfWs23ZuZOm1atp+OUXr4QI8m6X5MYAQAG9v2GGqawcwz87+tTxHgz6joLhx5i2pG1xrYG6ZlOXc4fEHMJjcx7j/ln3AxB56b8Ycf996JKTB97h5rdh2wewaxgu0LXRaraQ32b8NtCkrf/EiaSv+oVRb76hZGgDR5IgPLlth2XvDEb9bnsxSFZpA3q1nvhAuZLRpedcxlHya/aProbnXXJ/A6tJrh7uQ6t4IFgNBmwmU/8XKoCprIyCCy5k76JjBqU3jkOsehx++3fH/9c+DxajnExf/bR8fpBR1lRGvdGzHjjDFZG0FThFbl0ux39xPNeqa7AVrIUAZbdRDGbsA6+9FY1YrZ57INS8+x4Vzz1H9Tv/6/tClRr++VT+mbTWw0fnwR+vwa9PwKpH5fO9YLNaqX7zTapeew3Dlq3KfgEOklMuV4EobdogEAicQ5IkQtWyLuc/ldnKNJo8F/xC5e2ERX8p0+ZQwNQiV7AAeVHJAFiN8qS38wKVfuxYRjxwPxEXXODxEAEsDQ3kHDaPBbd+SkCLTRmNy2FEww/fk3/aaRTddJO3QxEIuhDqryUhTK7c2nWAREKUfxSL0xYzJkKWxIo491zCzz4bbWzMwDscc5z8uvvbgbcxxMmvbMZqg2A/DTHBA9c7lSQJTVTP8yxzTQ2V/3mN2s8+7/G8N2g0NlJuKAcGV9I2JSoQtUqiodVMaX2LMmZkaYfLmrZVObIG6VDFnnTOPLpPreKBUPvJJ+yZNp3yZ59TtN2eUIeG0rxpE8b8fMzl5W7vzy2o1PJ8/cNz4bX5sP1j+f3A6H7n8d7iyY1PMvuD2Xy4+0Nvh+LzCE1bgVMkhSShQqJRpaIycSrR/ay4OkrV669j+Hs7YaefRtDc3o2yvMmoiAA0Kolmo4XS+hbi2wbB7ib0lJNp3rKZkGMW9X3hvNvk11VtOrh5v8v/AA6/u+N8T9hsxNx2G/XffUfwUZ7Tm9xavpXtldsZFzGRgupmADI8YPIm8AzNW7bQ8MMPBEybRvARR3g7HIETxAcmUWeEfKUqLjU6mHmVrLcdlqhMm0OB9S9CbQEEx3Foxomoq3dSXdSWtO1UaauNjyf8rLO8FSXq4GBUgYFQV0tipZX8qHyvxTIUsRqNqEJD0Y8b1+3cHavvoLC+kGsnX8uMuBleiE4w3BkbF0xx7f+zd97hUZRdH75nW3rvPYEkEJpUKSpdAbGgqKAoYnstKJYX+/dix4oVFBvYEHsFBBEpgkiVTkghDUjvdet8f0w2JKQn25LMfV25stmdfZ6TNjPP7znnd6pJzC5jVC8rdx+PnwqCAnIOQXEG+ERZdz4HxOxn2zvQvUPVRqJOh6DRtHhM+R9/kP/GG6jDw/GacaV1qxR/exRKT8GYBRDZfJNiswga4BKAh8Zx7uWdVEpi/N1IyasgKbeCGK8YdpzZ0TnR1tlL8vdN/wuSNzZuHNUVEEUpdjibOWxBqo8cQayulu4trIzC2Zmw119HExmBys/K5zhrMe4R6XeyZfHZ57yjYN/K1tfxdqJMK20Ehrk3bsAqY1nkTFuZdqFRaghHujFIixxmsXEr/9lF+e+/Y8jNtdiYlkatVBDlJ5WO2tLX1ikmhugvvsDryitbP3jcIzDusXOee6zVE72gVOI5dQrhb7+Fwtm5E9G2j81Zm3llzyt8efR7RBG8XdX4u7d8oyrTdaj86y+KPv2MsrWyP2ZXI85H8jrMr8my3KDjH4OL/tuwyUJ3pvQ0/PW69Pji57hr2AKWX7yc07nSgsLRrGAilr9H0F8bOBEucKbiDNWGanuH1GXwv+MO4v/Zid9ttzV67UjBEQ4VHEJAtoqRsQ8t+doeLzzOquOr2JsjVUAYCgup2LoVY0UH73Pd/CFyjPT4RM+89ne2CVn2oqc4efkVVGzb1uwxXpddhuuoUfjPny8JPdYkcZ2UOa1v2R7OXKHhSFm2ZupbJNRl2na2oiR+ivS5q1ok5ByG8mxQu0LUhRYfPvSll+j16y94zZhh8bGbwnPKJTgnJCCounBO4vhHod+Ms1+XZDisYAvw0ZSP2Hn9Ts4PsX6zuZ6OLNrKtI+qImKqpJu+NN9wiw3rf+d/CHzsUVyHD7fYmNagziIhz4GbkU14XCrZAVCopa8dFPMut9okeTzGB3rIHqjdCPcJE/C+9lo8r7jc3qHItJMhIfEAVIm56E228STrdmxcBPoqiBgFA68BoFJr4HSJJIaarWBM1dVUbNuGPte+JX2a6Gh8PYPxcvJCRCSzLNOu8XQ1BEFA4dS4FPqtCW+xZNwSEvwS7BCVjEw90Tansffgb2m/8dLul/g9Q2qolH7DDWTdeRfVBw92fMI6i4S1HR+jC5NSm9jREbsv0WSiYvt2tMnJCC0kUShcXIj6ZCXeV82wrkhVegpKM0FQQviIFg91RD9bM+ZN0qTcs6LtyZKTnRu072Vw8XMw9eXOhmcfzGJzr/GgtnzCjqBQ4BQX1zm7lZ7IzI+lagUApcZhBVsz7hp3nJQdt4GRaRuyaCvTPpI2EKOTFvAn9ZYznnYdMQK/efM61/zABpibkaXYMNPWjEmno2z9BmpOJLV84NZXwGSQTvQmfUNT8yYo/3MzJT/8iKnStg3W4OwNnrZayjyLDZL9bLsTLgMHEvLcs3iMH2/vUGTaybCwGESTGgQjWWUWbE5YUwqHv+v+TWpMJvAKA6UTTHuZEm0ppdrSuioNf3cNPm5SVUHN8USy/nMn6ddea8+IAUl4jPG0gN+fTB29vXtzSfQleGo87R2KTA+lb7AkWJ3IKcd4Tk+G8wLPY2LERBJ8pU0Fl4GD0PTujajVdmLCWtE2YwdUFnZ8nC5KZzJtBYWC3mt+JfSVl3EdOtTSobWfzH+kz8EDwanl6hBHFm371P4PJOVV1MV3uuI0WmMn/s59Y+CCBRAQb4kQbU91iXSPYgVrBHtgqq6mbONGCld+Yu9QOsf210E0Set4o67VdbxMz6AL54/L2IXENfSq7QTZExd0ZtE2Nc/2Amfuiy9SsvorvGZeTegLLzR90NZXJE9bcymF+Wtodqeu4L33qDl8GGNZKX7z5lkn+CbQm/ScKpfEoMJib0BPvNyETEbGIYj0dUfUBSA4n2Hv6RP08rbQIuzYz/DLfRAyGBK6cQa2QgEXPyt5ALr589n+t/nw8IeM8rsKGNnAGsFUXYWmd280Efb3+i395RfmfXaKL+NNnDyvk1lIPYTCFSsp/+MPfGbPwuuKK+wdjoxMI6L83HBRK6nWG0krqGwgJk6KnMSkyLOe86GvvIyg6GROj0+UJPKZTFB2Cty6qMdkBzCaRE7md84eQent3eZziWgwUP7nnxhycvGde1OH5muRzJ3S58jRrR56slS6ZjiiaGu2R0jOLcdH44uHxoNyXTkZZRnE+3RR0bWzTHkBJjxhlaELPvwQU2kpXjNm4BQba5U5zsVUU8Pp+xYA4H3tNSjdu+CasgPreHvxfdL3/Jb+G9NjpnNV3FX2DqfbI4u2Mu0j4XJiDCVgzLRYd2ldRgaGvDw0sbGofHwsMqa16G22R7BDpq3XZZdRsenP5hf2557ooXFzsnNO+KLRiMfkyZiqq2y+2Mwqz8IgGnBRuZCZrQb0chOyboo+JwdtcrLDNhmUaYxSIeCmCKWaMxzIPcF1/VtphNhW4qcBAmQfkMouvSxns+OQuEmdvwtrpGwznVbKtqzfhMz9ggtwX7sG0WSyfXznUHM8kdAjOQxRCj1yY7YjVO3dS/X+/XhOuaTRa9tObSO1JJVRIaNkewQZu6FUCPQJ9uBAVgnHs8taFBM7LdiaueW3VjMzuyOniqvQGkxoVArCfVytPl/V/v2cXnA/gosLXldegdLLy7ITmDNtI1tvojgpchLJxcn09nK8plxRfm6olVIz6TOlNbw76V38XfwJcQvp3MD6ajjyA2Ttgsvfgq5m8aaxTpOw0u9/QJeejsvw4TYTbVU+PrhddBGqgABMVVVdT7TtwDrenhwqOMSu7F0MDXSAioAegCzayrSP82YT3WcqfH0ROZU5VOmrcFV37qak9OefKXj3PbyvvZaQ5561UKDWoXeAdHHLK9dSVqPH01lts7ldhg4l9s9NzXtXmYxNm5WbvzYZG71FUCrxv/M/+N/5HwtH2zpmQSDaM4a9R6qAhkKGTPdAl5FB6pSpCBoN8bt32bTRnUzniHWewJ7TkfiED7HcoO4B0uIvc6fkdzjyTsuN7QhUF8OPd0vNJELP/tyeGfMMj4x4hPmr9gHlTXodWkws6QSel15KhnMFaww/4C6Ltm0i6NFH8JxyCS6DBzd67be031hzcg33D71fFm1l7EpCiGedaHv5eQ2bQYqiSH51Pq4qV9w1FroP64GCLZy1Rujl74ZS0T4BryYxkYL3luN56aVNbgI1heuIEbiOHCmdfyzdkKy6BHKPSo/bkGl7/9D7LTu/BVErFfQOcCcxp5zkvHIm9h1sucHX/hcM1XD+fyB4gOXGtSY1peBsYYG/FlEU8bvzTqr378N1iAXvH9tA5Icf2HQ+i9KBdbw9cWQ7lO6I/VcIMl0Ob2dvfJ19AQt03gQEjRPqsDCcYh1vZ/ZcPJzVBHlKZtu2bkYmCELLzQYmPN78Dty4RxyuIZn5ZO+nCcckgpeLmgB32ci8u6GOjEQdGopTQl8M+fn2DkemHQwPGom+5HwKS7wtO3Dfy6TPiWssO64jsOVlSPpNEm7PyZx1U7uRmmcAcNiqApeBAwi+5XZO+wukl6VjEu2f/evoaKKi8LrySjRRUY1eq1vUeMqLGhn7khAinXMSc8obvXb3pruZ9O0ktpzaAkDea6+RcskUyjdv7vzEuiooz+n8OF2EzvjZlv/+O+UbNlC2pu3XRkEQiPr0EwIffAClt3e752yRynyIGAkBCeARZNmx7YD5unsix4LrN7ULxIyVHif/brlxrYnJCG8NhvculCqeLIwgCHhfNYOQ556zfOZ3d6aLruNl0dY2yKKtTNswGmD3h1CSBZz9B7VE+aT/XXcSu+kPfObO7fRYtqDO1zbf9r62ZmpOnECfm9upMaoPH6Fq/35ES+/MtxHz344zUmlSXKA7QlcrK5JpFUEQ6LX+N2K+/tohPDtl2o7V7GD6Xip9Tt8BVUWWHdue5B2H3bVZHlMXS7629ajSGThVXA2c7SquS08nZcoUzjz6qE1DbYlQ91DUCjVao5bsymx7h9NlEUVRXtTIOAwJIZI1y/Hsxk2EzSXiJ0skT1JDfgH6zExqjhzt3KT7P4dXesHGpzo3TheiM6Ktx5Sp+N15J15XO4g/pH8c3LYB7v671UNzK3MprC6025qiLZj7ZiTnlpNbmcu7B97ljX1vWGDg2qzoriLantoD1UVQmgXuwfaOxioYK2xvY9iTKK4ppkRbAkCUZ+MNaxnLI4u2Mm0j829YtxA+GAcmo0VFWzNdRbCLtaOvLUDO4sWkXTmD4lVfdmqcgmXLyLhhDoUffmShyNqH+W/HUBMAOG7mmUznUWg09g5BpgP0DnBH4ZJBctUflGpLLTewby8I7A+iEZI2WG5ceyKKsP4x6Xvqexn0nlj30j/Z/3D7htt5c8/7iCL4umnwq60qqElMRJ+Rifak41gRKHQGJuX4c8FRU52II9M0FVu3Uvbbb01WEeRV5VFlqEIlqIjwlDesZOxLn2DpHiu7tIaSKl2D13p59QIgvSwdAJ8bbyTi448639jKr7dUNp60Hoz6zo3VRUipXRvUbzbZVpz7xBP44AN4TJjQoblrkpIo/OSTDr23Rdpg3fPOv+8w/pvxfHzkY8vPbyHia/8HkvLKqTZU897B91iduLrzFSVxtaJt1q6usRFtvu+KnQRKyztlVmzfgT4vz+LjtgVDYSHJEyaSPHoMor5nnHPsgbnpYKhbKC4qFztH0zOQRVuZtpG4VvocPw0UyrpSv57YqMScaZtiY3sEM65Dh4FajakTu4iiKKIK8EdwdcVj8mQLRtf2+c1/OyWl3gBNejzKdC9Eo1G+iepC9A5wxyX0a4x+37Iv+5hlB0+otUjIPWLZce1F4lo4uQWUTnDJ8w1eOl54nF05uziSfxxomIHldsEFRK74mIAFC2wZbYtUHzzErR9n8cAOby4IvcDe4Tg0hSs/4fSDD1G+ZUuj18yLmnCPcNQK2/nfy8g0haezmnAfaXF9PLuhRcK5iRguAwfgfsEFnS9tjhgJrv5QUwIZOzo3VhdAFMVOZdp2BkNBAWlXXU3eSy+jTUnp/IBGA9Q0zspujnJdOQICkR6RnZ/bSsTXJock51YQ4hbG1XFXc89592AwGTo3sHekZCEhmiD1TwtEamWSN0qf46ZYfGhTZSVZd95Jythxna4I7QhKX19M5eWIej26zEybz99TqKsi8pariGyF3IhMpnVE8axo23c6cPYGr1zX2BurPVTs2EHea0twv/BCAv/7UKfGshVn7RHsI9p6TJxA3LatqHx8OjyGIAiEPPccQU88gcLF9jtkBdUFVOgrUAgKTuW5Arq6mymZ7knektcp+eYbghb9D6/p0+0djkwbcNEocTL0oVpXSG6phcX24bfB0LngFW7Zce2BvgY2PCE9HnMv+Da8iTXf3AoGyRMwvl7DRaWHB25jxtgmzjbiMmQwmpgYqbFNdTW4Wae7dHfAZUB/TBUVuAw6r9FrsjWCjKOREOLJqeJqjmeXMbq3X93z5r/RjLIMDCYDKoWFlocKJfSZBv9+Lq0jeo23zLgOSn65lvIaAwoBov3b16S5+OtvcBk4AKeEhA5VHqr8/fG4+GJpzWaJppan98LKadLv7KYfWz38rYlvUWOoceiqyUhfV5xUCrQGE9klOp4Z84zlBo+/BPKPSxYJA6+x3LiWpvQ05B4GBIi1fNKOIT8fp/h4TGVlqINs74MsCAJRX3yOOjwcpbucDGQtZL9+2yOLtjKtk31Q8r1Ru0JvqWRnZMhIdt2wC1d1+25KzkV7Ignt8eNooruOH0rvQGkBm1lYhd5oQq20bcK6oNGgslC5uT0EWzh7sg9zDyPxuFSmFxckX1y7M6LBgLG0lKrde2TRtgsxwOl2tiXlgzbasgN3g6YmdRxYBSUZ4BEKFzbefDSf76oqpQaeHSmbtSUKjYbev62zdxhdgsCFC5t9TRZtZRyNhBBPNh7LbeRrG+IWgrPSmRpjDacrThPlGUVNYiJVu/fgMmQwLgMHdnzSvpedFW2nvQIOLOp1luTaLNsoPzecVMo2v89QUEDO00+DKBK7ZTPq4I75jIa9vgTBEoItQOZOKXNU0/ZNO2eVs2XmthJKhUBsoDtHz5SRlFtOtL8FNyTjpsCOt6G62HJjWgOz7274cHDza/nYDqCJjqbXjz8g6nStH2wlnPv2tdvcPQX5/sb2yKKtTOuYs2xjJ0ldMgGNUoNG2Xnh0POy6Wiio1F6eXZ6LFsR7OmMm0ZJpc5IRmElsXZcgBtLS0GpQune9hsP/ZkzoFKhDgy0YmQtoxAUjAgegZsigGMieDqrCPRwsls8MtbHZ9Z1eFx8MS6DOrH4k7E5sQHubEvKJ9WadjBGPSi7cPn4sHnSZ7cAcGq4+SSKYl2ZfH6hVGpstoIxlpdT+tPPOCf0xXX4cJuF2xZKtaV8evRTcqtyeeHCF+wdTpckrUxe1Mg4Fv1CpPvV4zkNRVuFoCDaK5rEokTSStOI8oyieNWXlHz7LX533NE50bbXeFC7QdlpOPMvhA3txHfg2JitEcwVeW3FVFGBx+TJGEtLOyzYApYTbAEy/5E+R4623JgOQHyQR51oO76vL+ll6RhMBvr59evcwBEjYWEyuAdYJlBrYRZtrWCNUB9B7mXRrTHf18r3N7ZD9rSVaZ06a4TLLT60OjAQj4kTcB02zOJjWwtBEOq6qqfkVdotjvy33yH5orGU/vB9+9737rukTJhI0aefWimy1hkePJwVU1ZwceD9gNSEzJFLqmQ6jyY6GtehQxBU8l5hV8JcWXAi3wpNJSry4IuZ8MYAyT+vq6JQwojboN8VjV4qqimiTFeGgMCZAum6YW66WHPsOLkvvMCZRx+zabhtQa1Q8+HhD9l68GeKKwvtHY5DYqyoaLFTujkTxdzkSUbG3vQNlhIkknIrMBgbNl86t1eF68iRuI8bh1NcbOcmVTtDXG0Ztnk90U3pqJ+tJjqa8HfeJvLTTywSh7G0lOLVqxGNxo4NYDLVE21HtXr4r6m/cvNvN7M6cXXH5rMh5qq+pNwKNqRvYOYvM3l1z6udH1ipcnzBFmDwDTBolmRbYmFEUWzxmmgrTJWVFH68gjP/938OEU93o8ZQw5mKM4B8f2NLZNFWpmUq8qAwGRQqya+nHj8m/8itG27l26Rv7RSc/bC3ry2A0s8XUaej+sCBNr9HFEUMuXlgNOLcv7/1gmsjybmSJ7LchExGxjEJ81HgFvc8BxT3UW2otuzgLr5wej9U5EDm35Yd2xYUp4O+5Z+JWQAJcAnGZFLj7arG313KQBE0atwnTMDtwgutHWm7cVW7svwHfz58x4jxRLK9w3FIMm+9jeSLxlK1Z0+j1yr1leRVSRsd0V7RNo5MRqZpIn1dcdMo0RlMpBU0TDo4txmZ12XTiXh/OV5XNN6MajfD5sHFz8GQGzs/lgPT2SZklkheEI1GTl45g5xnnqVi67aODZKfKDWPU7tBcGO/7nM5VniM/Xn764QcRya+tjoyKbe8TnCyeFNtrf3Whq2ScDlc/QEED7D40DVHjpAydhzZ/1tk8bHbhVpN3htvUPrd9xiys+0bSzckoywDERFPjSe+zr72DqfHIKc8ybSMeyA8nArZB8ClYeOr7Mps9uTsIcIjgmvjr2330Mbycso3/oFT7164nNf6TYEjYb4hs6do63X55bgMHoxzv7aX9AiCQOSHH6BLT0cdZT8f4WpDNS4qF5JzpZ9fnNyErEegz8mh9McfMWm1BD7wgL3DkWkDA0IDASlTIanoJOcFWnCzR6mSsj0OrJIysGLGWm5sa2Mywtc3SQvbaz9ttuTXXCLvrZIarsUFutctzF2HDMH1vXdtEm5HiAzvT0VSLsrULBjaerZVT0I0GNCmpCBWVaFqopzZLAL4u/jjqek69k8y3RuFQqBPsAf7M0s4ll3W4N7LLNqay14tSu+J0kc3JyW//aKt/swZFB4eKD0scx8sKJV4Tr+Uyu07ENQdXOabN1HDh0vX6VboSqXSfYKln/PJ/ErC3YcAUFhTSKm2FC8nr84NbtTDF1dDxk544DB4hnQ23C5F1b59GPLzMeRZoTKrHSg0Gnxvugmll5ds02AF6ls/yVWytkMWbWVax9mzycX0pMhJhHuE09+vY4t4bVIS2U88gTo0lNg/N3U2SpvSO0AqGbaqz2MrKD09celgtqwmOtqywbSDSn0lo78cTbBbMIa8/wJypm1PwVBQSP5bb6NwcyNg/nwEdRf2Me0h+LlpUBgCQZXG7lOJlhVtQWpSYxZtp77UdZrU/Ps55BwCJy/wjmz2sJMl0mJWZZSEPXt6oLeXoEcfIeT551D5+LR+cA9DUKmI/3sHNceOow4Pb/S63KRDxlHpG+LJ/swSEnPKubLe8/UzbUVRrFuMizodppoalJ7y5kNLlFbryS/XAu0TbfOWvE7Z778TvOh/+Fzb/gSYpghYsIDAhQs7LqiYrRGixrTp8K5kBRPm7YKLWkm13kh+mUCgayB5VXmkl6VzXkAnE4iUatBVgkkPKRth6FzLBG0pdr0veRQHD7TKvZbP7NnSutQBbNCCHn3E3iF0W/r69GXh8IWd3+SQaReyPYJM87TiA9PHtw9X9L6C3t69Oza+oMB19ChcR4zo2PvtyFl7hEqH8MsRTSZEQ8uekMaKCrt28zSTWZaJiIjWqCWjUPLbipczbXsEzgl98bzicgIfXthxrzUZmyIIAp6qMACO5FmhTL73BFC7QmkWZB+0/PjWoLoYNj0rPZ7wOLj5N3uoOSOhpkrq0hxf66cnmkyIer114+wkYlgQSSapokamMQpnZ8mnu4nFr6fGk9EhoxkSOMQOkcnINE9CiCS+Hs9u2IwsyjMKAYEyXRlFNUUAFHz4ISeGDafgveWdn1hbAQdWw8anOj+WA2K2RgjxcsbdqW2ilSiK6E+dAr0e57g4i8WicHLqXAZc/FQYeB30mtDqoVX6KrIrpRL0aM/ojs9pIxQKoc7XNjm3vJEtSKcxN/hK2mCZ8SxFQQr89gh8OBF01kk4Ujg74zpiBK5D5OtedybaK5qb+9/MjNgZ9g6lRyGLtjLNs/8zeH8c/LvKKsO7Dh1C1MqVhL78klXGtyZRfm4oFQIVWgN5tTvr9qLo8y9ImTyZsg0t3yAUfvgRyePGU/LddzaKrGkS/BL4a9ZfPDn0LYwmEQ8nFUGeTnaNScY2CEolYa+8gs/s2Sicne0djkwbCXWVMknNAqRFUbucLZvtKk1qtrwMVYUQ0BdG3N7ioeml6QAUlkgZCXG1mbbalBQShw4j/UbH9Xg8mHeQWWtm8fTfT9s7lC7HuIhxfHDJB9w35D57hyIj04B+IdI56FzR1lnlTKh7KADpZekAqPz8EfV6dCctYJmgLYOf7oIdb0F5TufHczBSO+BnKwgC0V9/Ra9163AeNMjiMYlGI+V/bsZYVtb6wfUZeA3M/BAiR7Z6aEZZBgC+zr54O3t3IErbY04UOZFb3qgBX+cHr+3/cnILGOy7PmxA8u/S56gx4NQzEmVM1dVUHz1q7zBkZCxCu0Xb6upqqqqq6r7OyMjgzTff5Pfff7doYDIOQOIaycu2vHkT7wN5B1iduLruot1T0KgURPm6Amd31+2FsbgIw5lsyn77rcXjKnfswFhcjMLd/lYE3s7eaKsCAamTq+yJIyPjuMR6SyWPedVZ1pkg4XLpc1cQbfOOw+4PpMdTX5TKIZuh2lBd15jlTL60SDJn+GiTkkGvh3M6uDsSMV4x9MswMWNVOnkrPrZ3OA5FznPPU/D+BxiKi+0dioxMu+gTLGXa5pZpKapsWH31zsR32HLdFoYGSh7dHpMn0fuPjYQvf6/zE3uGQtgwQIQT6zo/noORnCc11jVX4rUHp14xCArL51Gduvc+Tt1zDyU//GDxsc2Y/Wy7Qpatmfi6TNsKy3s5B58HboFSNmuGAzVYTa5N7ImfYpXhK//+m8KPPqLmRJJVxm8vxopKTgwbTvrMazCWlto7nG6DSTSxIX0DScVJmETHvX/tjrT7CnHllVfy2WefAVBSUsLIkSNZsmQJV155Je+9Z4GLuoxjUFMm7RKC5DnYDMsPLWfxrsUdKp90BFuBztArwP7NyAC8r7mGsNeXELZkSYvHRX+1mvD33sV9omM0g0iuFbvjupDHo4xlMFZUUP7nnxgrKls/WMbuDA6OB6BSzMZosoKtRdwlEDMOht0MJge+CRRF+O1REI3SdbGVxjrmDrvuak9MBjc8nFUEekhVBZ7TL6X3H38Q/LTjlgr7u/gTVebEhUdNFK3vAoK6jTBWVFL85Zfkv/EGNGHzYjQZKdeV2yEyGZnWcXdSEVmbdJB4TrZtnE8cfi5+dRvpSk9PNOHhlttYN68nusIGXTtJaWemrSiKVl8HuU8Yj8Krnb6TGX9D7rE2X4u7on+3uQFfUm45vWo3pc1VMZ1GoZDuaQCSN1pmzM6iLYf0HdJjc2wWpnTNWvJeW0LZWsf431a6u6EOCUHp54c+u/nkM5n2kVOZw8KtC5m1ZpYs2tqYdou2+/fv56KLLgLgu+++IygoiIyMDD777DPefvttiwcoYydS/gCjDvxiIaBPs4d1tKzEpNWSNHIUaddeh6myawo3vQPt34wMQB0aiuell6JwatliQFCp8JgwAYWdO2k+u/NZXtnzCkdypOxsc+aZTM8hfdZsTt0zn6pd/9g7FJk2MDy8N6JJBYKB0+VnLD+Bqy/c/AuMvFNa8Dgq+irQuIPSCS55vtXDfZ19WTh8IWP8rwEE4oM86sQPQRDQhIfh3Kf566u9EQSB8sG9+X6MQO5Nk+0djuMgmgh8+GG8r5+Nyr+xn3FWeRZjVo9h+g/Tu/zmtEz3JKHWIuFYdjvL5juLWbQ9uRVqulf2W0p++0Tb6gMHSJ00mby33rJaTF5XXkncls34zZvX9jetXQjvjZaqLdtAVxRt+9SKtmkFlYS7RQHSeVtvtJDPvNkiIdlBfG1PbpWao/nESOt6K+B6/gg8Lp6M25jRVhm/I8T89CPxO7bj3LevvUPpNlTqKxkUMIgBfgNQKezfcK4n0e6fdlVVFR4e0snu999/5+qrr0ahUDBq1CgyMnpWiXy3xnyx7ju9xQ6THS0r0aWnYyorQ5eRgeDq2uEw7UlsvWZkjoxoMlml7KojGEwGfkr5Cb1Jj39xLOBUt+Mt03NwPX8EokGPqHUgvy+ZZonydUfU+yM45bD3TCKRXhH2Dsk+aNzg+i+hKA18W1+gBroGcnP/m3n99Akghbh2eB06Cr7R8Xw9LpGASCXj7B2Mg6D08MDv1luafT2rXLIRcVO7ydY/Mg5JQognG47mcjy7YUZ4QXUBnx79lBJtCc9d8BwA1YePUPrLL2jCw/C9+ebOTRwQD/7xUJAkZSEOvKZz4zkINXojp4qrgbaLthV//on+zBn0GZlWi6u1ZI5GVBdD3jHpceSoNr3F7HXfy6tX++ayIyFezng4qSjXGqiodMNV5UqVoYqs8qy6zNtO0WsCxE+DuMlgMoJC2fkxO0N9awQrXZO8Z8zAe8YMq4zdUZQe8vrS0sT5xLHqUuv0OpJpmXaLtrGxsfz0009cddVVbNiwgQcffBCAvLw8PD09LR6gjB0waCGp1qO47+UtHmq+SLc309apVy96rfkVQ0FBl13U9K69MbO3p62ZsvUbKF69Gv977sFt5Pl1zxd/8QWlv67B7z934HnxxXaMEM5UnEFv0uOkdCIrT8r47YpChkznCH78cQQ7Z3zLtB2VUoErIdSQw7+5SVydYKXzSHmu5HXY9zJwD7DOHJagDYJtfZLPKZs1FBdT8O57OPfrh/dVMywdnUWp66xtjSZ03ZSLwi9i5/U7KdbKfrcyjknfWl/bxJyGmbYKQcEnRz9BQOCJkU/gonJBl55O8eef43LeeZ0XbUFKBtmeJFkkdBPRNjW/AlEEH1c1fm5tu7fxv+ceXAYPbjJb3xpoT55E4eyMOjS0+YOydgMi+PYG98BWxzSajGSUSglbXSnTVhAEYoPc+TezhOQ8ydf2aOFR0krTLCPaOnvCDV91fhxLkVlb1RZn3zWgJTAajej1FsqIlpGxAWq1GqWy8xs37RZtFy1axA033MCDDz7IpEmTGD1aSoP//fffGTJkSLsDWLZsGa+++io5OTmcd955vPPOO5x//vnNHl9SUsKTTz7JDz/8QFFREVFRUbz55ptceuml7Z5bphnS/gJdObgH1zYNaB7zRfpMxRlqDDU4q9rWEV5Qq3GKjcUp1jplGragt7+0AM8pq6FCa8Ddyb5lApV//03Vrl2UBAU2EG1Lf/mVmiNHMOTl2TE6CbO4H+IaSYFJwN1JRYhX2/5mZLoPsmDb9Qh0jiRT/JeUIgs162iK1bPhzH4pE2TYPOvN015KT8PWl2DC/4FHUJvftv30dgJcAjiRWwKc7Vhdc/QYxZ9/jiY6ukuItoJJRHvkKCV53+M9c6a9Q7I7Vfv24dSnD8oWmnq6a9xx18gbkjKOSb8QSbRNzq1AbzShVkrVWD5OPtzS/xbC3MPqrD1chw7BZ+5NuHZgjdckfS+H7W9K9giiaLXMP1tS38+2rYkoChcXPCZNsmZYdeQvW0bBO0vxvn42IU+14KOeuVP6HNW2EvczFWfQmXQ4KZ0IcQuxQKS2o0+QhyTa5pafFW276+bkXTsg82+IaFv2dHvRpaejCgxEYcXKWVEUycnJoaSkpF3vMZWWIur1KP39u2ySmCMhiqL8c+wA3t7eBAcHd+pn126V6ZprruHCCy8kOzub8847r+75SZMmcdVVV7VrrK+//pqHHnqI5cuXM3LkSN58802mTJnCiRMnCAxsvMOn0+m4+OKLCQwM5LvvviMsLIyMjAy8vb3b+23ItISrDwyYCZ5hrfoL+jr74qnxpExXRkZZBn18Hdefz9J4uarxd3eioELLyfwKBoV72zUen9mzUAUHNSpPifjgfUp//gWv6dPtE1g9zKKtp1La6W/PDa5M98Sk1ba/hE/G5sR4xZBZAmeqrGiD1PdSSbRNXOtYou3GRXDkOyg7Azd+36a3GE1GHtj8AFqjluqyhwG/Ov9uVWAAvjffjMLL8auTenn1wrMK/vNOCtnC/3CfOBGVj4+9w7Ibhvx8MubcCCoVffbsRuHiYu+QZGTaTbiPC+5OKiq0Bk7mV9InWNpQEgSBh4Y/1OBYdVgYwU88YbnJQ4fAfxPBI9hyY9qZ1HY2IbM1rsOGgSBgqqhsWXQxZ2RGtk20NYucUZ5RKO1tAdBOzNZsJ3LLGXZeDApBQUlNiWUnKcmC5N9h0CxwsuPfhkoDvcZbbfjT/11ITWIiEcvfw72275GlMQu2gYGBuLq6tmntKIoiupMnEY1GNKGhKJzlJKHOklaShiAIhLqHolHKCTitIYoiVVVV5NUmzoWEdHxzq92ibWlpKRqNplFWbWxsLCpV+4Z7/fXXueOOO7jlFskbbPny5axdu5YVK1bw2GOPNTp+xYoVFBUV8ffff6NWqwGIjo5u77cg0xphw+CaFW06VBAEYrxiOJh/kLTStDaLtkVfrELp6YH7uHEo29vZ1IGIDXSjoEJLSp79RVvnfv1w7tev0fMqX1/8bpln+4CawOx9rDRKN+vxchOyHkv55s3kvvgSLgMGEPb6EnuHI9MK/QNi2VoCpYbT1puk7+Xw5/NwcgvUlEklhvYm429JsEWASYva/LYKfQUJvgmkl2VRrvXG3UlFsKe0YHCOj8f58cb3OI5IuEc4lR5qEsOMDOw9GlNZGfRg0Vafk4sqNASlm1uTgq0oiizYvIAQtxDmD56Pl1PXvb+R6b4oFAJ9gz3Ym1HM8eyyOtHWRpN3K8EWzlrg9A5o2z1tzguLcerdG8/LpreYsW8pXEeOJHbTHy1bI+hr4PQ+6XEbRdve3r157PzHcFF1vc0r8/ojObeCNxJuZF7/eZYXoT69DIrTwSNE2pTuhoh6PcbSUjAacYqLs8ocRqOxTrD18/Nr13vVISEISiUKNzeEdupUMg0xmAyYVCYA3FzcutxGjb1wqb1XzMvLIzAwsMNWCe3uTjR79my++qqxT8s333zD7Nmz2zyOTqdj3759TJ58tiOxQqFg8uTJ7Ny5s8n3/PLLL4wePZr58+cTFBTEgAEDWLx4MUajsdl5tFotZWVlDT5kLEud510bfW1Fk4m8JUs488ijGIqKrBma1eld14zMMXxtHR3z30h1pXTRjQuUTeJ7KkoPD/SZmVTt2SN3WO8CjAxPoCrzNoQzD1rv9xXQR/LSM+og5Q/rzNEeTEb47RHp8dC5EHJey8fXw8vJi88v/ZzH+q0GlF22qkCtUBPhGcGim5TkP307mqgoe4dkV1wGDiDuzz+J/vrrJl8vrClkS9YWvj7xdZvtomRk7EHfEOn+6/g5vrZV+ioO5R9iT86euudEUUSXmUn14SOWDaKqSDrPdnFS2pFpq8/Opvjzz8l55hnEmhprhwZICTYtCrYAZ/6Vrr1ugeDbNl/XMPcw5iTM4eq4qy0QpW3pU5tpm15YiQIn62QNxl0ifTY3ArM1VUWwdASsf9xq/2eCWk3sHxuJ3fwn6mDrbMaYPWxdO2C/oPL1RenlJQu2FkBn1AGgUqhkwbadmP92O+PH3G7RdteuXUyYMKHR8+PHj2fXrl1tHqegoACj0UhQUEN/uKCgIHJycpp8z8mTJ/nuu+8wGo2sW7eO//3vfyxZsoTnn3++2XlefPFFvLy86j4iInpo1+u2cnIr5B2XfKbaSHubkYk1NXhdfjmu55+PJjy8Q2E6CnWibV6lnSOB/HeWkv/uu1QfOkT2oqcoWL6c0wsfpmr/fvLffZf8d5baNT5RFOsybfOLpAy6ODnTtsfict55hL/7Lr1+W9clxayeRt9gX4yVcRSXuVFcZaUmEIIACZdJjxPXWmeO9rD/M8g5DE5e7cqyrU9SrtSd3ZzVI+r16DIzEU0mi4VpbWI8Y0AQ6s7fMjTr3We+DwpzD8NJKdu+yDguCbW+tsezyxs8vztnN3PWzeHl3S/XPVf+xx+kXjKF7Kc6dh5skq9vgldj4dSe1o91YAxGE+mF0hqgLaKt4OREwEMP4T3rOps1IauPsbwcXWZm4xdCBkn2P1Nf7BY+w60R4OGEl4sak2jFxJs60XZju9bVFiPlDyhIgrRtYGWRTd2Jsu+2Iq8V7IvWqAWQ7206gCX+dtu97aDVajEYDI2e1+v1VFdXdzqgljCZTAQGBvLBBx+gVCoZNmwYp0+f5tVXX+WpZozVH3/8cR566Kw/U1lZmSzcNocowpoHoSgVZq9ucylHe7tLK1xdCXn2mQ6H6UiYb9AcItNWqaDg7XcoW7MW3cmzi2tDXi5Vu/fgv+A+OwYHxdpiynRlCAicynMDznpKyfQ8BLUaj4mNNwBlHBNXjYowbxdOl1RzMr8CXzdf60zU9zLY8ZbkA2fQSV5s9qC6GP58Tno84XFwa9/i2iSaUAiKugwsc1WBNjmZtKtnog4NJfbPTRYN2VrEeMVAliRImqqqQBBkL9dmMIu2XamTukzP5Kxo2zDT1pyIkV6WXncec+nXD0GtRuHsgmgyIbTS76JNqJxANELiGoi0ToMkW5BRVIXeKOKiVhLq1fp5UeXri/9/7rBBZI0p/+MPzjzyKC6DBxO54uOGL2rcIHZy029sht/SfiPSM5I+Pn1QKbpWJqMgCMQHubMnvZjk3Ao25XzK7pzdPDjsQYYFtdyEu81EXwgqFyg7DblHIXiAZcZtK0m1Gb5xF9t2XgdCFEVErRZTTY2UcSsLvx1GFm3tS7uvuueffz4ffPBBo+eXL1/OsGFtP8n5+/ujVCrJzc1t8Hxubi7BzaTXh4SEEB8f38ALIiEhgZycHHQ6XZPvcXJywtPTs8GHTDPkn5AEW6UTxLTdSNy8OEkvlW7wehK9a0Xb9MJKDEb7fu8B99yD/4L70J08iVOfPgQ98TjOAwfUCbYB99xj1/jMi9kAl2AMRjVuGiWhXnL5qIxMVyEksABNwO/8mPyL9SYJGw7uQaCrhJxD1punNXa8BVWFENAXRtze7rff8fsdTP9hOkeKJI/A2NpMW312NoJGg7oLVZn08u6FSqFi4Mq/OTFyFGUb7FTqaWd0mZmcvOJKcp59ttlj6kRbT1m0lXFs+gR5IAiQX66loEJb93yoeyhqhRqtUUt2ZTYAqtBQ+uzbS/SXqywj2IK0QQdwfI19shAtRH1rBIXCsQUhp74JmGpq0OflShtwnaC4pphHtj3C7DWz0ZusVH1jZeJrE0eScstJLknmYP5BkoqTLDeB2gV6jZMe29oiwWQ8azMVN8UqU4h6PWnXXkf2009jqrR/xWmTiCLa1FT0p04hNqMVybQNsz2Cpa1EoqOjefPNNy06Znek3Vfe559/no8++oixY8fyzDPP8MwzzzB27FhWrFjB4sWL2zyORqNh2LBhbNp0NtPEZDKxadMmRo9u2gD9ggsuICUlBVO9ssKkpCRCQkLQaOQOdp0m8Vfpc6/x4NT2DMgw9zDUCjU1xpq6G7yWMFVXdxsPyxBPZ1zUSvRGkcyizt0AWQKzcKs9cYK8V1+j5vARhxBs4exi1lsdBkBskIe849nDEQ0Gilev5tT9D2CykbebTMdx9TiDk/+f7M3fbL1JFAqYtQoeToHw4dabpzUueABGzZdKRZXqdr89pSSFzPJMsoulr82LQ49Jk+izfx9hb7xuwWCty9Toqeyes5vR8ZNBr6fm6DF7h2QXqg8dRpuURPXRo80eI2faynQV3JxURPlKNh+J9SwSVAoVUZ6Sd7X571kQBARLr7NiJ0tJIsVpki1bF6U9frbVhw9TuXs3Ygu9WKyJJjyMmO+/o9evvza0eClIgY2LIKXt1R8VugqGBw2nv1//LtmIDBqKtjf0vYFXx73KhAgLV4CZs1yTN1p23NY4tQdqSsDZG8JHWGWKmsREag4fpvy39QgOWn0jKBQoXF2lv3c72lLt3LkTpVLJ9OnT7RZDZzk303bLli3StaH2IyAggEsvvZTDhw/bM8xuS7tF2wsuuICdO3cSERHBN998w6+//kpsbCyHDh3ioovanp0J8NBDD/Hhhx/y6aefcvz4ce6++24qKyu55ZZbAJg7dy6PP/543fF33303RUVF3H///SQlJbF27VoWL17M/Pnz2/ttyDSF2UOwb/tOKCqFioH+AxkSOIQqfevCZdZ/7iR51Ggqtm3rSJQOhUIh0CtAKvVPzXeMXcaAe+5BUKsR9XoEtdohBFugzg9RbZQy6ePacIMr081RKilY/j7lGzZQtW+fvaORaYXzAgahKz4fjbbtDbk6RMQIcLWS/UJbcfGGqYuh98R2v7WkpoSiGqnJpq7av1FVgaBSoWpnB2R7olFqUCvU+NxwPb1/30DQE4+3/qZuiNsFYwhfthT/O+9q9hjzda6Xd9sa+cjI2JPmLBLMmw4nS6zoY+3kDr1rBbLENdabx8qktkO0LfzgQzLn3kzB++9bO6xmcU5IaJwwcXKzVF3y9zttHifCM4KVU1fy1WWNm5N3Fcx9NZJyKxgdOpqp0VMJdrNwMy2zr+3p/aC1oZWe2RohdjIorWNdoYmJIeydtwlY+F/LZeBbAaeYGN5NNbD071NNvv72pmTe2GjBDOsm+Pjjj7nvvvvYtm0bZ86csepclkan02ESTXWZtufaI5w4cYLs7Gw2bNiAVqtl+vTpzVbAy3ScDv2HDR48mFWrVnH06FH27t3LihUriIuLa/c4s2bN4rXXXmPRokUMHjyYAwcOsH79+rrmZJmZmWRnn83cjIiIYMOGDezZs4dBgwaxYMEC7r//fh577LGOfBsy9Sk9JXUORYA+09r99k+nfcpn0z4jzqf1vwNdejrG0lKUPj4dCNTxMN+omXfb7U3+u+/WCbaiXk/+u+/aOyTgbMaGrkbyhoyXm5D1eARBwPemGwm4f0GP70rfFbgg8jy0OVdTmj/EdpPauiqjLLvTc6aXpQPgrQkEUUNsoHu3qCpQh4SgiYzsFt9LR1D5+OAxaVKzXtxV+qq6aiPZHkGmK9CaaFu/V4UuPZ3MO+8k4+Z5lgvAnCTShUXb5Np7f3Nj4pZQBfij9PLCfdw4a4fVKqLJhN4sHmX+I32ObLrStbvSpzbTNqu4iipd4349FsE7EuZ8Bw8nSxsVtiL5d+mzWTS2Akp3dzwvvhifa6+12hyWQqkQeH1jEm9vSm7w/Nubknl9YxJKK1qbVFRU8PXXX3P33Xczffp0PvnkkwavmzNWN23axPDhw3F1dWXMmDGcOHGi7piDBw8yYcIEPDw88PT0ZNiwYezduxdRFAkICOC7776rO3bw4MGE1GsMt337dpycnKiqtUQpKSnh9ttvJyAgAE9PTyZOnMjBgwfrjn/66acZPHgwH330ETExMTg7O9cJtgpB0ci/OjAwkODgYIYOHcoDDzxAVlYWiYmJDea/6KKLcHFxISIiggULFlDZgp1GS/ElJSUhCEKD8QHeeOMNevfuDYDRaOS2224jJiYGFxcX+vTpw1tvvdXg+Hnz5jFjxgxee+01QkJC8PPzY/78+ej1Z61etFotjz76KBERETg5OREbG8vHH3+MKIrExsby2muvNRjzwIEDCIJASkpKs99bZ2iTaFtWVtbgcUsf7eXee+8lIyMDrVbLrl27GDlyZN1rW7ZsafSHPXr0aP755x9qampITU3liSeeaOBxK9NBEtdJnyNHgXugVafq/fsGYn76Eaf4eKvOYyvMN2qO0Iws/913KXj7HfwX3Effw4fwX3AfBW+/4xDC7cTIiVzR+wpKisyZtnITMhnwu/12/O++G00X8vjsqZjPdVnFVdTorVzembQBPp4Cm5r3D7U4+hpYMQU+mQ4lWR0exrxB5SaEAhBbe67T5+aSeeed5L/d9mwmR+Gjwx8xe81s1qett3coDktGWQYAPk4+eDt72zcYGZk20DdYOjcdzylv8HydaFt6VrQVXF2p3LqNqj17LOdf2edSEBSQfRBKMi0zpg0xmcS6e/+2ZNoGL1pE3I7tOPfrZ+3QWqQmKYmT0y4l4+Z5klVDnWjb9oZwXdXHtj5+7k74uWkQRUjOLWfbqW18evTTNlWNtou4i8HZy7JjtoTRIFkieEW0u7lcV0EURap0hjZ/3H5RDPdO6M3rG5NY8vsJqnQGlvx+gtc3JnHfxFhuvyimzWO11+Lxm2++oW/fvvTp04cbb7yRFStWNDnGk08+yZIlS9i7dy8qlYpbb7217rU5c+YQHh7Onj172LdvH4899hhqtRpBEBg7dixbtmwBoLi4mOPHj1NdXV0nbG7dupURI0bgWmuJcu2115KXl8dvv/3Gvn37GDp0KJMmTaKoqKhuvpSUFL7//nt++OEHDhw40CDLtrmN+9LSUr76Ssq8N9uWpqamMnXqVGbOnMmhQ4f4+uuv2b59O/fee2+zP6+W4ouPj2f48OGsWrWqwXtWrVrFDTfcAEh2q+Hh4Xz77bccO3aMRYsW8cQTT/DNN980eM/mzZtJTU1l8+bNfPrpp3zyyScNdMe5c+eyevVq3n77bY4fP87777+Pu7uUhHHrrbeycuXKBuOtXLmSsWPHEhsb2+z31hnalC/v4+NDdnY2gYGBeHt7N/nLEkURQRAw2smnR6aTmM3K22mNcC4Gk6HVDqIKFxec+/bt1DyOhKOItvUFW7MlgvlzQa1IYE+rhGvjr2VG75n0+209INaVJcnIyHQN/N01eLiYqDTlsOfUSS6KaX+FTZvRVULWP1CZD5MWgS2yO3e+AyUZYNSDS8crQeqEDn0AcLaqoObYMSq3bsNwJpuABfd1OlxbklOZw9HCo2Qd2UnOqn0IThqCHn7Y3mHZDP3p01T8/Teugwfj1ExlmexnK9PVMGfapuSVozOY0KikXJ6mRFt1YCDBzz6Dc3w8gpOFuoe7+cPoe8E3xrailoXILquhSmdEpRCI8nNt/Q1I9jj2RhMejqG4GEwmdIf+xqnsFChU7fKRv/KnKzGajLwz6R3ifbpuEk5ckDuFJ4tIzqtkacoiCmsKJa9e//72Dq3jKFVw+ZtS1ZCV7p30OTlUbt+Oy7BhOMXY/ppXrTfSb1HHmru982cK7/yZ0uzXrXHs2Sm4atr+f/zxxx9z4403AjB16lRKS0vZunUr48ePb3DcCy+8wLjaLPzHHnuM6dOnU1NTg7OzM5mZmTz88MP0rdVP6le4jx8/nvdrLVe2bdvGkCFDCA4OZsuWLfTt25ctW7bUjbt9+3Z2795NXl4eTrXn8ddee42ffvqJ7777jv/85z+AZInw2WefERAg3cfmV+UDTTchC69NujFnz15xxRV1cb744ovMmTOHBx54oC7ut99+m3HjxvHee+/h7NywIXlb4pszZw5Lly7lueeeA6Ts23379vHFF18AoFareeaZZ+rGjImJYefOnXzzzTdcd911dc/7+PiwdOlSlEolffv2Zfr06WzatIk77riDpKQkvvnmGzZu3MjkydLGR69eZ22v5s2bx6JFi9i9ezfnn38+er2eL7/8slH2rSVp01/cn3/+ia+v5C+3ebMVG5DI2I9Zn0PaNgjq2EUqsyyTu/64C61By6br2m5k3x0w766n5lXUbV7YBaOpyaZjdV8b7WfAbiajsAq9UcRVoyTUyzFN62Vsj6jXU334MAo3d5z7dN2b/+6OIAh4hP0K6p38kFzCRTFWFO1iJ4NSA0WpUJAEAX2sNxdA6Wn4q7Y52MXPdqqM0exrWlEu3TeZN6ic+/QhaNH/UHTBxqlXxV7F6JDRxGUZKf7ifpTe3gT+17F97CxJxY4d5Cx6CtdRo4j6ZGWTx5h/77JoK9NVCPdxwcNJRbnWQGp+RZ2Ia7b3KKopolRbipeTJKj61FvwWoxLnrP8mDbCbIsW7e+GWtn8uVA0GDAUFaEOtG4lY1tRuLoSsXw5TvHxKE/W9jMJOQ80bm16f42hhlPlpxAR8XPuOv7sTdEnyIN/ThaRnFtOjFcMhTWFnCw9aXnRducyOPI9TH1Z8u23BVZcj1Zu3072//0P1+HDifric6vN09U5ceIEu3fv5scffwRApVIxa9YsPv7440ai7aBBg+oem+0N8vLyiIyM5KGHHuL222/n888/Z/LkyVx77bV1dgDjxo3j/vvvJz8/v04MNou2t912G3///TePPPIIINksVFRU4HdOX4Xq6mpSU1Prvo6KiqoTbKFxE7L6/PXXX7i6uvLPP/+wePFili9fXvfawYMHOXToUIPMWFEUMZlMpKWlkZCQ0GCstsQ3e/ZsFi5cyD///MOoUaNYtWoVQ4cOrROKAZYtW8aKFSvIzMykuroanU7H4MGDG4zZv3//BtX6ISEhdU3UDhw4gFKprBO7zyU0NJTp06ezYsUKzj//fH799Ve0Wi3XWtEqpE2irTlgg8HA1q1bufXWW+tUdZlugsrpbIfLDuDn4kdWuVROWv8G71xK165Fl56O+7jxuAzowruY9Yjyc0UhQFmNgfwKLYEezq2/yQoE3Nd8qYG9m5HlVuZSaagkMUfqwh4X6I7Civ5BMl2L/LffpvDDj/CaeTWhL7xg73BkWiDENYpy/U7SStJaP7gzOHtCzDhI2QjHf7W+aLtxEeirIGIUDLymU0OZs9Pyi6TroNkKRh0aim9t+VZXo79/f/r790cM1ZN74424jhghdWLuIaKt0tsb15EjcRt5frPHyJm2Ml0NQRDoG+LBnvRiEnPK6kRbV7UrwW7B5FTmkFaaxuDAwfYN1EExi7axrfjZVu3bT+a8ebiPG0fE8vdsEVqruA6t9abP3Cl9boefbUZZBiIinhpPfJ3t3DS0k8TV+tqeyC2nV98Y9ububZBhbjFO7YHT+yB5g3VFW4NWshsJGwYK69lHKjw8cR0+HNcx9vFBdlErOfbslDYfb6yoRFAqWf7PKZZuTkWtFNAbRe6bGMvd43u3e+628vHHH2MwGAgNDa17ThRFnJycWLp0KV5eZ/UStVpd99icAGYySQlXTz/9NDfccANr167lt99+46mnnuKrr77iqquuYuDAgfj6+rJ161a2bt3KCy+8QHBwMC+//DJ79uxBr9czZswYQPLXDQkJqbNTqI+3t3fdYze3hhs4LYm2MTExeHt706dPH/Ly8pg1axbbapvNV1RUcOedd7JgwYJG74uMjGz0XFviCw4OZuLEiXz55ZeMGjWKL7/8krvvvrvuuK+++oqFCxeyZMkSRo8ejYeHB6+++iq7du1qMF79nzdIP3Pzz9vFpfXEsttvv52bbrqJN954g5UrVzJr1qw6Cwpr0K4aDZVKxauvvsrcuXOtFY9MF8VN7canUz8l0jMST41ns8eVrV1HxZ9/ovT27jairbNaSYSvKxmFVaTmVdpNtHVkfkj+gXcPvksf10nAxXUejzIyAK7nj6Tkm29RuLYty0PGfsT59CIpD3JrbOA/mHCZJNomroWxC603T8ZOOPIdIMClr3QqO0Vn1HGqQupQrKvxx0WtJMy7+1QVCGo1wf/3pL3DsDmel1yC5yUtN3QxN22SRVuZrkRCiCd70os5nl3OVfV6TMZ4xjQSbU1aLVV79qJLT8f3xjmWC6LsjHSe9+sNvSdablwrYxZtW7P7qjlyBEQRZT1RxGHIP4FRJ6Bsh59t/XNdV29MGV8r2ibnVjB5pFT+bG4malHipsDRHyW//on/Z/nxzaRvhy+uhsD+cM/fVpvGc8oleE6xXpOz1hAEoV0WBfh68famZJZuTuWhi+NZMCmurgmZWqlgwSTL230ZDAY+++wzlixZwiXn3D/MmDGD1atXc9ddd7V5vPj4eOLj43nwwQe5/vrrWblyJVdddRWCIHDRRRfx888/c/ToUS688EJcXV3RarW8//77DB8+vE6EHTp0KDk5OahUKqKjo9s8d5h7GFqjFhd1y/ez8+fP58UXX+THH3/kqquuYujQoRw7dqzNPq9tjW/OnDk88sgjXH/99Zw8eZLZs2fXvbZjxw7GjBnDPfUS1upnEbeFgQMHYjKZ2Lp1a509wrlceumluLm58d5777F+/fo6odpatDtFYuLEiWzdutUascjYA301LL+wNsuoulNDDQ0air+Lf4sXcI9JE/GaeTUug87r1FyOhqP42joqWqMWF5UL+hp/4KzHo4wMgNuY0cT9vYPgJ5+wdygyrTAoSLKvqDRlYxKtbLnS51JAgDP7JfsCa2Aywm9S2RjDbpZKRDtBZlkmJtGEs8IV0eBBbG1Vgammhoq/tmPIz7dA0Pbhr1N/8dHhj8itzLV3KA7JzLiZXB13dZf2d5TpeZiza49nN2wm3ZSvramqiqzbbyf3+ecxdqD5dLPs/wzWLYQ9H1tuTBuQas60baUJmd9ttxK7dQv+8+1b9XYuxrIysv6JJGV9L4wBbc/+7E5VBeb1yOmSaoJcIgA4WXLS8hPFTgYEyDkEZdmWH99M8u/S57Ch1pujC2IWaM2CLcCCSXE8dHE8r29M4u1NyRafc82aNRQXF3PbbbcxYMCABh8zZ87k44/bdr6rrq7m3nvvZcuWLWRkZLBjxw727NnTwFpg/PjxrF69msGDB+Pu7o5CoWDs2LGsWrWqQYn/5MmTGT16NDNmzOD3338nPT2dv//+myeffJK9e/c2G4OzyhkvJy/UCnWzxwC4urpyxx138NRTTyGKIo8++ih///039957LwcOHCA5OZmff/652UZkbY3v6quvpry8nLvvvpsJEyY0yGSOi4tj7969bNiwgaSkJP73v/+xZ8+eVn/O9YmOjubmm2/m1ltv5aeffiItLY0tW7Y0aGamVCqZN28ejz/+OHFxcYwebd2s83aLttOmTeOxxx5j4cKFrF69ml9++aXBh0wXI3Uz5ByGIz+CyvoZot4zZxL6wgu4DBxg9blsifmGzbzrLtOQB4Y9wK4bdlFTKJVnyE3IZOojKJUISuuVcclYjhHhsYiiElHQkV2RY93J3AMhYqT0+MQ668xRkSsJt05eMPF/nR7O7GvqqQoDBOJqrw3aEyfIuuMOTl51dafnsBfv/PsOb+1/iyOFR9CdOk3J998j6rt+B/HWMGm1Uof1VpiTMIdnxjxDsFuwDaKSkbEMLYm2zkrnurJYAJWPD66jR+E5fTqm2qYzFsHcBDllE+iqLDeulUnOKwfOJm60hDooCE1EhLVDahcKDw/0Z05jqqqmcv+hNr+vO4m23q4aAj2kkm9RL3kOZ5RnYDAZLDuRe8BZIdUsrFoD89jxbbcOaC/Gioo2XRMdCaPJxANjo7h7oBei6WzCgVm4NZpEi8/58ccfM3ny5AYWCGZmzpzJ3r17OXSo9f87pVJJYWEhc+fOJT4+nuuuu45p06Y1aLY1btw4jEZjA5/c8ePHN3pOEATWrVvH2LFjueWWW4iPj2f27NlkZGQQFBTUqe/XzL333svx48f59ttvGTRoEFu3biUpKYmLLrqIIUOGsGjRogYia33aGp+HhweXX345Bw8eZM6chlUfd955J1dffTWzZs1i5MiRFBYWNsi6bSvvvfce11xzDffccw99+/bljjvuqGu2Zua2225Dp9Nxyy23tHv89iKIotiuv1JFC/5lgiBgdPB/4rKyMry8vCgtLcXTs/ky/h7DT/PhwBcw8m6Y9lKnhjpZcpKfUn/CXe3Ofwb9x0IBdg2+3pPJo98f5qI4fz6/baS9w3FIDEYT/RZtQGc08dcjE4jwtZ7vi0zXxaTVorBUZ2oZi2Mwmjjv44tROOXx/Mi3uLKvlUtZd30AaVthxG3WK5s1GiA/EYI7v5n4/sH3WXpgKcGKC0k+ehmPTO3DPeNjqfz7b3Keex51RDiRH3xggaBtz6PbHmVd2jruH7yAsXeuwFhSQtTqL3EdMqT1N3dhCj/5hIK338HnppsIfPABe4cjI2NRqnQG+j+1AVGEPU9OJqBWwNIZdagUKhSCDXyrRRHeGgQlmTBrlWSN4+AUVmgZ9vwfCAIce2YqLpquufFctX8/Kl9fNO0olb7212tJLErk7QlvMyFygvWCsxE3frSL7SkFvHR1f5YkXUuNsYY1V60hyjPKshNteRm2LIa+l8HsVa0f314KUmDpMFCo4dE0cLKOFV3OC4sp/fFHAh58AN85FrRJaYaamhrS0tKIiYnB2bljCWaiKKJNPIFoNODUqxcKK3qPdjcqdZVUG6txU7m1ao/Q0/jrr7+YNGkSWVlZLYreLf0Nt1WbbPeV2GQyNfvh6IKtzDkYDWezl8y73J0grzqPlUdW8ktq0xnXxtJSy5ZTORDmXfaT+RbMPOhmZBRVoTOaup3Ho4xlMBQUkDZrFsljx/WI7L2uikqpwEWQutr+m5Nk/QlH/kda3FjT51CpsohgC2e9/qoqpc638bX+3W5jxtD7t3VE1Ouq29Xo5SX5/aWVp+N2wQW4DB3aI/5XtcePY6qqQuHS/GIxvTSdpOKkBlmJMjJdAVeNihg/ye+wfratRqmxjWALko9438ulx4lrbDNnJzFX1oV5u7Qo2GbeeitnnnwSfY6VK1M6wtc34pryFhq3tp+3TKKJ9NJ0AHp597JSYLalztc2r4por2gA6zQji6/1NT25RWoYZmmSN0ifoy+wmmALUHP8GKaKClQ+Plabw9IIgoDSy1Pyle4hDVQtRZmujNzKXEp1pfYOxWHQarWcOnWKp59+mmuvvdZiWcot0e6/2s8++wyttvGJRqfT8dlnn1kkKBkbkfUPVBeBi2+7uoY2R4ynVCZzqvwUOqOu0evFq78i6fyRZNdL5+8umEXb0yXVVGotXFLTxdl5ZidX/XwVr+99DaDO41FGpj5KX1/0GZmYSkupSUy0dzgyLRDgJJV4JhVbwffNVuQlwl9LQF9j0WHvOe8eXh37GgW5kmfauVYwQhdeLJhLYdNL0wl97VWiv1yF2/nn2zkq6xOyeDExv/yM11VXNXvMJ0c/YeYvM/no8Ec2jExGxjI0Z5HQEsYKCycpmJNHTvwmJZU4OCn5rfvZ6rKyqPx7J6U//YyigxmCVkNfDSfWw7GfQCn5VJqaWN+fS05lDjXGGlQKFWHuYVYO0jaYfW2Tcsub9HK2GMHngV+c5G9bXWL58ZNqRds46zYIi/r0U2J++B63Cy+06jyWRh0aiiY83PH+Fx0cF5ULnk6euKnlZtFmVq9eTVRUFCUlJbzyyis2mbPdq4dbbrmF0tLGSnt5eblN/BxkLMjx2t3sPtOkTKNOEugaiJvaDaNoJKs8q9Hr5gYs6qDu5/fm46bBz00DQFqBnG1bn5SSFFJKUsgqlxoJyX62Mk0hKBSEvfkmsZv/xGXgQHuHI9MC5g26M5UZtpu06OTZa1ZnEUVY/yhsehY2WLb5XaRnJH09LkRb7Y+TSkG4T/cpwbPqYtaBEZRKnOPjUbeQSaEQFHioPer+N2RkuhJ9g6WsvMSc8gbPv/PvO8z8ZSabMzfXPSfqdKROv4ykESMwFBVZLojIUeDqBzUlkLHDcuNaCXOmbWwLfrbq4GAiP1lJ0KOPShl+jsSZf8GkB/cgRO9ocl95leSLxqJtpcu6+fwf5RGFStH5taMjEFebaWt10VahgHv3wHWfgoeFM/O05ZDxt/Q4znp+tlB7TezXD6VsM9kj8Hb2JsIjAg+N9bK3uxrz5s3DaDSyb98+wsJss3nVbtFWFEUEoXGW3KlTp5o0WpZxUEQREtdKjy1gjQBS6YF5wdLUxS74f/9H/N69+NxwvUXmczTM2bap+XIzsvqYG/OYtJLBf1ygfNKXaRq3USNRh4TYOwyZVugXEAtAieGUbSbMPQZvD4Ef7pCygzpL4lqpPFHpBBcs6Px455Bcr6O4UiGgy8oideo0zjz6qMXnsiVRnlEoBAXl+nIKqgsAEI1GyzYk6qIsGr2IHdfvYEq0dRfLMjLWoLlM25zKHJKKk0guOdtZXdBowGQCUUR74oTlglAopSQShRoKbGC900nMom1LiQiCWo3bqFH4zr3JVmG1HbPAFzkKQaFAl56OqayM0jUtb452pyZkZsyZtrllWoKcI4GzaxeL04SGYhHUrnDLOrj4WfCPtc4c3QTRYKCdLZ1kZOxOm7fIhgwZgiAICILApEmTUKnOvtVoNJKWlsbUqVOtEqSMFdBXQe/x0sK1l+VM5GO8YjhSeKTZHUqle/dNre8d6Mbu9CJS82TRtj7mv4Wycsn7KF7OtJWR6dKcH96X91PAKJRRpivDU2PlbIvABPCKgNIsSN0MfS/t+Fj6mrPZtWPuA59oi4QIkFKcwl+n/yI5ywdQE1dbNltz/Di69HQE167t5a1Ragh3DyezPJO00jSU360nf+lSfGbNIvC/D9k7PKtQ+usatElJeFxycasVAIIgoBS6ZjMimZ5NQqh0Dk/Jq0BrMOKkkv6OZ/WZxZToKST4JjQ4PuzNN1AFBKDy9bVsIBOehCmLwdnxk4BS81q3R3BoMv+RPkeOAcD/nnvwmT2r1ZL37ijaejirCfVy5kxpDeilBJO00rRmE9U6jShCQbKUWe7mZ5kxFUqIOF/6sCK5L76E4OKMz3XXoQ4NtepclkYURbQpKYhaLU5xcQhy0+NWMZqMGEUjaoXaOv8LMm2mzaLtjBkzADhw4ABTpkzB3f3sRUqj0RAdHc3MmTMtHqCMldC4wRXvSBcOC/4Tmi/iVtuhdGDMmbYpcqZtA8w3eLmF0k24nGkr0xIV23dQvmEDnpdOw2105722ZSxP/+BATHpPFOoyDuckc0HkMOtOKAhSRciu5VKWbGdE253vQEkGeITCRZYVGnfl7OL1fa8ToBgGXFtXcuk2ahQRH30Epq7frDXGK6ZOtI338MBUVkb1wYP2DstqlK1bR8XmzagC/GXbFpluS6iXM57OKspqDKTkVdA/VLpfGxQwqMnjnfv0sU4gnl1DBKrQGiSBD4gNaPqetnzLFnSpqXhMmYImPNyW4bWOyQhZu6XHkaMAcBnQv01vNa/vupNoC5JFwpnSGsrLvQlyDSLaK5oaYw0uKitstv7wHzj8DUx9GUbdZfnxrYSo01H89deINTV4XXaZvcNpN4IgICgUiICo1YIs2rZKua6c0xWncVO71TXpk7EPbRZtn3rqKQCio6OZNWsWzrKJc/fAwrsmdd2lz8m0rT58hOIvPsdl2DB8rrvOonM6Cr1rd9tT8+RSUTOl2lKKaiTPM22VH85qBeE+XTvbTMa6VPy5iZJvv5XKCmXR1iFxc1KhNgVhECs4nJtpfdEWzoq2J9ZJTWo64sNeehr+el16fMlz0ualBYnwiGBa9DT+OS4t4s0ZWEpPT9wvvMCic9mLGK8Ytp7aSlpZGh4T7sLpm69x7tfP3mFZDc/LpqPy98e1hYZrP6X8xMeHP+bSmEu5e/DdNoxORsYyCIJA3xBPdqcVkZhdXifa2hV9Nagd837RnGXr7+6El6u6yWNKVn9FxdatiDod/nc72Hkh7zhoS0HjDkEDGr1sLh1vKrPu2THPklySTH+/tom8XYU+wR5sTconLV/HH9f+Yd3JQgZJom3yBsuItnnH4Z/3IOFyiLu48+M1g2gyEfTkE9QcOYqmVy+rzWNN1OHhCCoVglKuimkLWqPUnFCj1Ng5Epl2e9refPPN1NTU8NFHH/H4449TVGtCv3//fk6fPm3xAGWsQHkuZO2RPKksTH0D9/p+MdWHDlL68y9UbNlq8TkdBXMzgrSCSowm2SsHzor33poAEJ2IDXRHoZDLK2Sax+OSKfjePBfPabLdjiOToLyHisTn8MEGgi1IJZwuPlBdBFn/dGyMTc9I1kCRo2GA5SuDxoaP5aWLXibv9HAA4oO6X1VBXTVNyUmU3t64DBqEoOoezWiawmv6dEKeexbnvn2bPSalOIX0snTKdGXNHiMj4+j0a8bXdlPGJpYfXE5hdWGD54u//oYzTzyJPi/PsoHkHYcPJsCHkyw7rgWpa0IW2PzGn/vkSbiNGY3H5Mm2Cqvt6CogbBhEjWm0AVr688+cvPxyKrY2vV6L8IxgYuREgtws3EjLzpjtjE6c04zPOpPVep+nbwetBaozE9fA/k9h78rOj9UCCmdnfK69lpBnnkZQtFtCcggUTk6yYNsOdEYdAE5KOSvZ3rT7P+7QoUPEx8fz8ssv89prr1FSUgLADz/8wOOPP27p+GSsweFv4OPJ8N08iw8d4RGBUlBSZagir+rsjZzr0KEEPHA/ntM7Udbq4IR5u+CkUqAzmsgqqrJ3OA6BWbR1F6SSN9kaQaY13EaNJOjxx3EdMcLeoci0QJ+AEEBJar6NKguUKoifJj02N9FsL2MfgfipMPUlqzUDOVVcTY3ehEalIMLHBWNFJUWrVlG1b59V5rM1ddU0ZVborN1FMf8sulu5sEzPIiFEuj87ntNQtH3r37dYdmAZJ4obNh0r/vJLSn/4gZrDhy0biEcw5ByCvKNQmGrZsS2E2QatJT9bn2uvJXLFCpzi4mwVVtuJHAV3/AnXf93opZrEE+hSUin55ls7BGY/zJusyXlnRVuDyWCdyfzjwDsKjDpI29b58ZJ+lz5bMctWpmdizrSVRVv7027R9sEHH2TevHkkJyc3sEi49NJL2bbNAiceGetjXvBGtWw23xHUSjURHhFAQ19b54QE/O+6C6/p0y0+p6OgUAj0qs22TZV9bYF6NhkGydi/pS67MjIyXQezHUyKLRsvJtR6qKVs6tj7/WPhhq8hdLDFQjKjM+rIKs/iRG4JAL383VApFWgTj5P73POcXviwxee0B9Ge0YDUVb5KX4WhqIj8pcs48/gT9g3MCmhTU9uURdgdG/PI9Dz6BpszbcsbVMrFeJ6toKuP98yr8bv7LjRRUZYNxMUHomvXJx3doLMydZm2AV38nraJbEmfOXMIeuJxQl9+qdFre3P28uGhD9mfu98W0dkUswBfUKFjTfImJn07iXv/vNc6kwkCxNdm2yZv6NxYlYVwao/0OO6Szo3VAqIoUv7nnxhqK6y7MoaCAnSnTiHq9fYOpVk++eQTvL29Wz1OEAR++umnFo/ZsWMHAwcORK1W1/Woao2nn36awYMHozNJmbayPYL9abdou3fvXu68885Gz4eFhZGTk2ORoGSsSEXe2Y6hnWnm0gL1LRJ6Gr0DpFIpWbSVMP8NVJRLHYblTFuZtiCKItqTJ6n46y97hyLTDBG+KpxDvmG/4Vn0Rhvd+PaaANd9Dv/Z3L731ZRaJ556JBYlcukPl/L0v3OBs1k7gkqF+7hxuF0wxuox2AJvZ2/6+fXjgrALKNeVgyBQsHQppT/+iKGgwN7hWZScZ58jZew4Sn/5pdljtEYtpyskazBZtJXpyvQJ9kAhQFGljvxybd3zzd3T+86dS+D99+MUG2v5YPrWbtAlrrH82BYgtc4eofE9rajTUb5lCyatttFrDoGuqsWSfE14GL5z56L0aPy9bTu9jbf/fZv16eutGaFdcHNSEeEreSgXlSvIq8ojrcSK61izwJq8UWoK3lFSNwGi5E3sFWaR0JpCl5bGqXvmkzJxkkOLnc2y+UXY+goAhuJijCUlmGqkZoJsfUV63QrMmzdPaoB2zkdKSkqL75s1axZJSUl1X5tF1I7w0EMPMXjwYNLS0vjkk0/a/D5RFBFFEYWgQK1o2rtbxna024jMycmJsrLGvl1JSUkEBARYJCgZK3LiN0CEkMHgZZ1upjFeMWzO2lx3g2fSatEmp+AUE43CzbKNXxyN3gFyM7L6mLOt84ukphbxcqatTBuo/vcAGTfcgNLHh7gd27usd1Z3JiHYF5XHEYxKHanFGfT1t8LC/Vw0rtDviva9p7oYlo6otUV4EZyss3Fkvt6pTH7AWX88l8GDiXh/uVXmtBdfX1avpNYNfG+7FU1kFEI368Qs6nQgCDj1ad7PNqMsA5NowkPjgZ+znw2jk5GxLM5qJTH+bqTmV3Isu4xAT6ma0i6JGH0uhXULIWu31IfDw3H8U3UGExm1FmhNVY9V7t7DqbvuRh0VSe/165ts5mVXjv8KP90N510PM5a1660D/AZwea/LGR403ErB2Zf4QA+yiqrRVYfwxaVf1FWWWIXoC0HlAmWnIfcoBDduCNcmkmozda2YZQtgLC7GKS4Wpa8fgroLCngKJWx+AQBV/1sRTSYEjaZWsH0BJjxptamnTp3KypUN/YZb08xcXFxwcbFMI8bU1FTuuusuwsPbp/uISJsJGqXG8c5jPZB2r4SvuOIKnn32WfS1uyyCIJCZmcmjjz7KzJmWb+whY2HMpUbmMlMrMDRwKNNipjEoYBAA2qRk0q+5hpQp3b+xkLm8JkXOtEVn1HGq4hQA2mp/nFQKwn1c7RyVTFfAZeAAlD4+OPXpg7HWN13GsQj0cEYoupLqU3OorHbg/+stL0FlvlQ+qHJu/fgOYhY09DX+QM+yggl6+GF8Zl3XZGZWVyZ69Zf02bsHp9jezR5T3xpBXtTIdHX6hpy1SDDTkmgr6vXUHD+OsYlknk7hFQahQwERTqyz7NidJL1Qajbs4aQi0KPxRpWpvAxVcDBuo0Y75jkh828QjeDi3eJh1QcOkHXvvRR9/kXdc5dEX8LiixZzSbR1BUJ7EVdbIZOer+e8gPPwcvKy3mRqF5jyPNzwLfg1f41pEaMBUv6QHpvtFqyE67Bh9Pr1VyI/+tCq87QbXWXzH/qas8eNewTGPgybX0B1aDlqbzcUO16VBNuxD8OY+9o2bgdwcnIiODi4wcdbb73FwIEDcXNzIyIignvuuYeKirPaQX17hE8++YRnnnmGgwcP1mXq1s+YLSgo4KqrrsLV1ZW4uDh+qa0OSk9PRxAECgsLufXWW+ve15T1wk8//dTofGUSpYb1Tkon5s2bx4wZM3jttdcICQnBz8+P+fPn1+mBAFqtloULFxIWFoabmxsjR45ky5Ytda9nZGRw+eWX4+Pjg5ubG/3792fdOun8XlxczJw5cwgICMDFxYW4uLhGQndPp92ZtkuWLOGaa64hMDCQ6upqxo0bR05ODqNHj+aFF16wRowylkJbDidry0r7Wk+0HRcxjnER4+q+NpaUoPTzwymm+5cOmjNtU/IqEEXRMW/YbERmWSYm0YSz0pVygwexoe4oFT335yHTdgS1mrhtW7vmbn4PQRAEYl0n8W9BCTnFAkTYaGJRhG2vwtGfYPYq8G3hupJ3HHbXLjCmvgRK6/09mQWN0lIfQCqbFU0mMBq77d9xjaEGZysK4Y5Aa9VBdaKtZ/e/v5Hp/vQL8WTtoWwS6zUjM4u2+dX5lOvK8dCc3ZzJmHsz1f/+S9gbr+M5bZplg0m4DM7sl5JNht9i2bE7QXKuJKz0DnRv8h7fc9o0PKZORaxy0IbEZou8qJYte2oSE6n4YxPa5GR8bpzTI9YzfYKlNVxSro0Sb0bc3rn3l58BN2mjmDDbZD873P3M4tDmX4u7BObUa6i3szazfNur0oeZba9Cxk64pZ6H9psDoaqw8ZhPW8ZuS6FQ8PbbbxMTE8PJkye55557eOSRR3j33XcbHTtr1iyOHDnC+vXr+eMPSaT38jq7ofDMM8/wyiuv8Oqrr/LOO+8wZ84cMjIyiIiIIDs7mz59+vDss88ya9YsvLy8+Prrxg0Im6J+pi3A5s2bCQkJYfPmzaSkpDBr1iwGDx7MHXfcAcC9997LsWPH+OqrrwgNDeXHH39k6tSpHD58mLi4OObPn49Op2Pbtm24ublx7Ngx3N2l/7n//e9/HDt2jN9++w1/f39SUlKorq7u+A+4G9Ju0dbLy4uNGzeyfft2Dh06REVFBUOHDmXy5MnWiE/GkqT+KXWq9O0NAc2X+1ka94suJH7Hdsf1d7IgvQLcEAQordZTVKnDz717lYu2hwDXAF686EXWHUlnHUJdubCMTFtwuBtDmUb0DnDn38wS2zYjEwSp23LeUWkxP6aZRiGiCL89KmUU9b0Mek+walhpZZJ4V1Ptj1opEO3nii49nbQrZ+A8aBBRX3zebRa9B/MPsuDPBfg6+/LjlT8CYCwtpXL3blwHD0bVg6yyzBZAsp+tTHcgIUQSZI9nnxVtPTQeBLgEkF+dT3ppOgMDBta95tQnHm1KCsZSC2faAvS9HLIPQf8Zlh+7E9Q1IWvhnlYQBARHtIOrKoL8ROlxxKgWD/W64gq0J0/iM2sWgiBQoaugoLqAcI9wVIp2ywddAnPfjaTccnZn7+bPrD/p79efy3tfbufImsE7Eu7bBxX5oLTe70Q0mWSbsk6wZs2aOnESYNq0aXz77VkxOTo6mueff5677rqrSdHWxcUFd3d3VCoVwcHBjV6fN28e119/PQCLFy/m7bffZvfu3UydOpXg4GAEQcDLy6vJ97aEuSGlk1LSMnx8fFi6dClKpZK+ffsyffp0Nm3axB133EFmZiYrV64kMzOT0FBJSF+4cCHr169n5cqVLF68mMzMTGbOnMnAgdI1pFevXnVzZWZmMmTIEIYPH173M5FpSIf/wy+88EIuvPBCS8YiY236Xga3boDqEmnha0WMJiNnKs7g5eyFp0Yqt1J0M7+7pnBWKwn3cSGrqJqUvIoeLdp6OXlxWa/L+GP3v8CZurIjGZn2IOp0oFAgqLrnIqErE+mvQOl+jO05qdxPnO0mTrgc0v9qWbRNXANpW0HpBFOsWwWkN+nJKssCwKQNINbfHZVSQdWJE4h6PaJB320EWwA/Zz+Kaoqo0FVgNBlRKpRk3TOf6n37CHnheby7gVXWqfvuA6WKgAULcOrVvCCbXpoOQC+vXs0eIyPTVUiotUdIza+kRm/EWa0EpE2J/Op80srSGoi2QQ8/TPCiRdYRdALi4bpPLT9uJzHbnzUl2horKlC6O3CCgjnL1r8PuLXswa1wdSX4iSfqvv4n+x8e3PIggwIGserSVdaM0m7EBrqjEKCkSs/uMwdZdXwVU6KnWFe0PfOvVDkUfRHEdTABzt26G6XlGzaQ++qreM+YQcCCBVadq908cab51wRlw68fToHtb8C2VxEVagSTHtPoB1BMeASEc85hDxy2WIgTJkzgvffeq/vazc2NP/74gxdffJHExETKysowGAzU1NRQVVWFq2v7LMcGDRrUYGxPT0/y8vI6FbMoipg4a48A0L9/f5TKsz/TkJAQDh+Wfk6HDx/GaDQSHx/fYBytVoufn3SuWbBgAXfffTe///47kydPZubMmXWx33333cycOZP9+/dzySWXMGPGDMaM6R4NfC1Fm1fBn332WZuOmzt3boeDkbEyCiVEtryzainu+uMu/sn+h8UXLnbcHUor0TvAnayialLzKxnZS25MklyblSBn2sq0l+z//Y/StesIf+dt3C+4wN7hyJyDj2c1rhGfccLgjCjeZzthss+l8NsjkPWPlGFy7oJFXwMbaptKXLAAfKKtGk5WeRYG0YBacEY0eBFb62frMWUKvTf+jqmie3mch7qH8tVlXxHtGY1SId3Au408H2NxceOFTxfEpNNRvmUr6PUELvxv88eJJtLL0gE501amexDs6YyXi5rSaj0peRUMCJNKcGO8Ytids7uRr213by7cFHWZtgGN72nTr70OQaUidMlrOJ8jXjgEmTulzx1YC5p/91EeUZaMyKFwViuJ8nMjraASpVHKSrR6A76jP8KOt6DsTPtEW321JEqqNNaLrZaqffsxnMnGWFbe+sG2RtOOc9DOZZIVwoQn0UXPQrFnKeqdb4Kzm+R529FxW8HNzY3Y2LPNetPT07nsssu4++67eeGFF/D19WX79u3cdttt6HS6dou26nMqEwVBwGQyNXu8QqGoy6I1U9+bFpAE29pDzPYILc1TUVGBUqlk3759DYRdoC7L+Pbbb2fKlCmsXbuW33//nRdffJElS5Zw3333MW3aNDIyMli3bh0bN25k0qRJzJ8/n9dee62NP4XuT5tF23nz5tWlZp/7izYjCIIs2soAEOkRyf7c/ZRWFZN+441ooqIIfuKJHnGD1zvAnS0n8knt4c3I1p1ch4fGi5T8QkAtZ9rKtBvRYESsqqJq715ZtHVAzg+LRTysAKGG3Mo8gt1t1OHbOwJCBkP2AUj6DYaec9+Rnyh5uHuGwYUPWj0c86LORQiBelYwgkKBJsJWZr+2QyEo6O/Xv8Fz/vPnO14GTgcRBIHIDz+k5vhx1GFhzR6XW5lLtaEalUJFmEfzx8nIdBUEQSAhxIN/ThZxPLusgWgLNhCwmiI/CU6shVH3gMq+1WtGk8jJZjJt9dnZ6DIzQaFAHdqCz6Y9MWfaRo5u81v0OTkUf/EFntnbYVD336CKC3QnraCS6kop6SajLKOuosQ6E06RRNuUP8BklBKs2sLBr+D3/4ORd8Gk/1kntloCH3wAj4kTUPr7W3Ueq7L1Fanp2IQnYdwjaIxGiHwFAgKk56GxcGsl9u3bh8lkYsmSJShqqxS++eabFt+j0WgwGo0WmT8gIIDy8nIqKytxq9VlDhw40OAYo0maS61Uo2jDZvyQIUMwGo3k5eVx0UUXNXtcREQEd911F3fddRePP/44H374Iffdd19dXDfffDM333wzF110EQ8//LAs2tajzSkRCQkJaDQa5s6dy9atWykuLm70UVRUZM1YZTrD5sWw5iHIPWaT6f47/L/snrOba90vonrvPsrWrkNwcbHJ3PbG3IysJ4u2oijy9M6nuWfTXeiFYjQqBZG+DtxhXsYh8bvtVmJ+/IGA++5r/WAZm9M7wBtR7wvA3jMnbDu5uZlm4trGr4UOhgX7YdbnFs2WaA6zkGHSShm/8T1wg0pQWmlBawcEtRq3USPxu2Vei9nj5t97pEckaoXswS3TPTBbJBzPPptVZ260Z/Zwrk/RqlWk33gjZes3WD4Ykwk+vRz+eBrS/rL8+O3kdHE1WoMJjUpBxDn3tOqQEOJ3bCdi+XuOa5Fw3mzof1WrTcjqo0tLo/Cjj+nzRzIuNWK3t4IxX7/zilxRK9RojVqyK7OtN2HESHDyguoiOL2v7e9L/h10FaC2fjNQhZsbbmPGOGb2eFsxGesEW5DuWQRBkL6e8KT0uo2IjY1Fr9fzzjvvcPLkST7//HOWL1/e4nuio6NJS0vjwIEDFBQUoO1En6CRI0fi6urKE088QWpqKl9++SWffPJJg2M0Sg3OKuc2Z9bHx8czZ84c5s6dyw8//EBaWhq7d+/mxRdfZO1a6T79gQceYMOGDaSlpbF//342b95MQkICAIsWLeLnn38mJSWFo0ePsmbNmrrXZCTaLNoePXqUtWvXUl1dzdixYxk+fDjvvfceZWVWMJ+XsSwmE+z7FPZ+LJVf2ABXtStKhRKVnx9hry8h6NFHeoyJuXn33abNeRyMakM1Y0LHEOISjUnnR+8Ad5SK7uPpKGMbnGJjcU5I6DHnjq6GSqnABamEcH+2jUXbhFrRNnUzaJs417r4QNgwm4RiFu/KynwAKVPHWFJC7osvUvLTT81WJ3VlDuUf4vl/nufTo409J009pONvH98+vDruVe4efLe9Q5GRsRgJwWbR9uz6rpe3JNRllWWhNzUso9WlZ1C9dx9V+9shOLUVhQL6Xio9TvzV8uO3k5R8Scju5e/W5D2t0tvbsauCRtwG134CPm23OHAdNQqva67hvRnO1Gi6f6ZtfLAk2ibnVRHlKf2crJphrlRB7ETpcVIbNz70NXByi/Q47hKrhNXtmPB485m04x6RXrcR5513Hq+//jovv/wyAwYMYNWqVbz44ostvmfmzJlMnTqVCRMmEBAQwOrVqzs8v6+vL1988QXr1q1j4MCBrF69mqeffrrJY53aUd2wcuVK5s6dy3//+1/69OnDjBkz2LNnD5GRkQAYjUbmz59PQkICU6dOJT4+vq7xmkaj4fHHH2fQoEGMHTsWpVLJV1991eHvsTsiiB1YTVRXV/Ptt9+ycuVKdu/ezYwZM1ixYgVOXaDRVFlZGV5eXpSWluLp6WnvcGxD1h74eDJoPOCRVLuXF3V3Ciu0DHv+DwQBjj0zFRdN98lAai/vbknhlfUnuHJwKG/NHmLvcGRkZCzM1M8e5bS4jvM8p/PFVS/ZbmJRhI8mgV8cTH4KPEOla135GUi4wurNNuszZ+0cDhUcovrUHKgcxLFnp6Lfs4vMW25FHRlJ7O9WyECzM7+l/cYj2x5hSOAQPpsm9Tyo2LqVnGefwymhLxFLl9o5wo5T8v33aKKjcRk4EEFjfb9AGRlH4vCpUi5fuh0fVzX7/3cxgiAgiiLX/HoNoW6hPHfBc3g7e9cdX33oENrUk7gOG4qmdnFuUVI2wRdXg1sg/PeEJOTaife3pvLib4lcNiiEpTcMtVsctia3MpfJ301GKSjZM2cPamX3rSxIzClj6pt/4eGk4uIJG9iYsZGFwxdyc/+brTfpgdXw010QPAjuakNGufl/wiMEHjpu1fud0rVrMeTn4zFhApoo+/gZ19TUkJaWRkxMDM7Olsss1ufkYKqsRB0e3iOapcvYj5b+htuqTXaoHbeLiwtz584lOjqap556iq+++oqlS5d2CdG2R5K4Rvocf4lNBdun/n6KY4XHWDJuCZGeVriRc1B83TR4u6opqdKTVlBJv9AesjnQBCm5chMymc6hTUuj9IcfUbi54n/XXfYOR+YcojyiOV0GpyszbDuxIMDtm84uVkxGWPsg5ByGS16AMffaJAxRFM/aI+gC6O3vhkalwOTnh8/cm1C6d0+rhKY8LpU+PuhPn8ZUXY1oMnXJDHljeTnZT/4fAHE7tqPyk5uJyvQs4oKkyqjiKj25ZVqCvZwRBIHvr/i+yeNdBg3CpV73cosTfRE4eUJlHpzaA5EjrTdXK9Q1ITvnnrbk+x+o2LYN7+uuddxM2+SNUlNOv9h2i3xpZdJ5PsIjolsLtgAxtVnU5VoDgc7S2tXqXs5xFwMC5ByCsmzwDGn5+OTfz77PyhvUJV99TdWePShcXe0m2loLU1UVpupqTNXVsmh7DqfKT6EUlPi7+sv2Tw5Cu++oT58+zeLFi4mLi2P27NmMGDGCo0eP4uPjY434ZDrC5hclw20zZs+/vtNrjbhbTsG3FMcLj6Ped4yM/VsRdTqbzOkICIJQ52ub0kN9bSv1lYiiSFKeVEomNyGT6Sj602co/PBDir/6uluWmXd1+gVIHXFLDKdtO/HmF6UuwGb2fyYJts5eUFlgs+tcYU0h5fpyBBSYdP7EBUnnfuf4eIKfeIKABd3Tj9lcNlqiLaG4phgA5/79iXh/ObEbf++Sgi2Aqbwc90mTcDnvvFYF29WJq9l2ahtaY8e95WRkHA1ntZJe/pIXeH2LBLuw+UXY8ebZEnBzEgrYdD1jJqWZJmRla9dQvmED2sREm8bTZkxG+O5WWDpcuk62k7TSNNR6kWlH1OQ8/4IVAnQcnFRKYmr//lVGyf7J6qKtm79k5+TqD4UpLR8rimdtFOKmWDcuwOPiybiPH4/riBFWn8vWqPz9pSzbHtAkvT2YRBOl2lKKaooQkK0NHYU231V/8803TJs2jbi4OPbs2cOSJUvIysrilVdeoW/fvtaMUaa9KJRSJ8Str0D+CShMBqUGco9Kz1urA+Y5xHhG8+BPJvzvWow2zQ4dZ+1IrLkZWQ/1tb3/z/sZvXo0qRW7ATnTVqbjuA4fhteMGQQ++IDkzy3jUJwfJl3/DUIxlfpK201svs5teRnStsGaB6Tnw8+HHW/Y7Drn7+LPzut3coHzcyCqiA3sGRtULioXQt2kDunm5kSCUon7uHFdegGkDg0lYtlSor9u2UutTFfG4l2Lmb9pPnqjvsVjZWS6Gn3NzchyGoq2oihSpa9qdLw+N4+yDb9Tfbj9gmCLmM/zYm2ToMQ1kmhl7gRvo/M8SN97c5m2AQ8+hN+dd+JxiYP6i+YeBW2ZZJMX1L/db08rTcOnAsZ/eZziL75Ae7J7r+niazdftVXSxl16Wbr1J539JSxMhpiLWj6uMAWK00Chhl7jrB6W79y5RCx/D6eY7udlrPT0ROXtjUItZ5LWRxRFQt1D8Xf1R6XoUFG+jBVo829i9uzZREZG8uCDDxIUFER6ejrLli1rdNyCBQssGqBMBzAbbW9+ATJ2SI+9IuCvJQ06J1qb3s4RnPaDqFJNtyupaI3egdKiNbWHZtqmlaZRqa9Eq3VFo1IQeU6XXRmZtqJwdib0Jdtm08i0nUGhIZgM7ihUFRzJS2Fk2Hm2mbj+dc6Mqz+kbLTpdQ7AXeNObqEfUEJcoDuiwYA+Jwd1WJjUnbibEuMdw5nKM6SVpjEsyDZN3xyFan01U6KnUKItwV0jb0rKdC8SQjz49SAczy6ve25X9i4e3PIgMZ4xrJq+qsHxRZ99StHHK/C+fjYuAwdaLpD653lBCeW5sPF/8Pc7Nj/P55drKa8xoBCoy8Q04zJwAC4DB9gslnaT+Y/0OeL8DgndaaVp5PkIlFw6ij79LkTl371tY+ICPYAcCoqlzYuimiJKakoaeDlbHI+gth3n5AkT/wdVReDUMzaJZWyLUqHEx1muoHc02izaRkZGIggCX375ZbPHCIIgi7aOwrkL2qJUm9/gRAXFs3CuikEBA1hlQePwroDZHiE134aZZw5Cha6CvOo8QPJ47BXohkrZNUtlZWRkWsbNSYXKGIRJVcHu04m2E21Bup4VpMDhr6Wvqwpsfp0DKSshuda/Oz7IA21qKmlXzkAVGkLspk3dVriN8Yxhx+kdDUpHRZ2O4q+/oWrvXkJffQVFF2vkZdJq2+RtF+QWxGvjXrNBRDIytifBnGlbzx7B19mXcl056WXpiKLY4LzmMngwzv36oQ4NtXww9dczJr1dBFs462cb5eeGk6qLNRjO/Fv6HDm6Q283V1O4PfYAfgE2vMbbiT7Bkhh6Mt9ASFAI2ZXZpJWlMcTZBg2VRRH0VaBppmLFIwjGLrR+HIA2JQV1RES39ns1abWYqqtRursjqOSsUhnHpc1/nenp6VYMQ8YqjHtE8vwz6iR7BBvf4NRvVHLuDV53x1w6dTK/AqNJRKnoOd+7uYzIVelDuclF9rOVsQiGwkIqd/6D55RLEORSJofCRx1OIakcy0+2/eQzlsGRb0E02eU698a+NyisrKBSDEepCCLa3xXdsTMIajXq0NBufd1rqhkZajUF77+PsaCAmhvndCkfPH1eHikTJ+Hcty/RX62WF3AyPZaEYEm0PZlfQY3eiLNaSbRXNN9f8T1RnlGNzmueF1+M58UXWy+gc9czYx+WxC0bnl/NfrbmpAwAU00NhR9+hMekiTglJDjm+V4Uz2baRrVftK3UV5JXJSVimM/53R2zPUJybjkXxsVIom1pGkMCrSza/rsKNj0LA2bC1MXWnasVRJOJjBtvwlRZScwP3+MUF2fXeKyFPjMTk1aLEBmJ0rPnNg6vT4WuAkEQcFY6o7ShBY1My8jpb92Zra+cvcEx6ho2J7MBUZ5RKAQF5bpyCmsKbTq3vQn3cUWjVKA1mDhTUm3vcGyKeUfeSZQM/ONlP1uZTiKKIicvv4IzCxda3jNPptOEuUn2Nxm28H07l+1vnBVs7XCdW5e2jp/TvgFlNVF+rjiplHhMnECf/fsIf+MNm8Zia8wLePM5H6SKK9+5cwl46CHUYWH2Cq1D1Bw7BgYDolbbqmBbVFOESZQ9tmW6J0GeTvi4qjGJ1FURqBVq4n3icVLaIevu3PXMR5Pgj6ckQdJGNOVnW/n3TgqWLSPrnvk2i6PdlGRAebbkgRrWfhubrPIsQPJv99R4Sr7G+/aR89zziN20z0CUnxtqpUCVzshA35Fc2ftKwt3DrT+xkztU5EDyhqZfP70PDn8H1cVWD8WQXwBqFSiVaKKjrT6fvRDc3FC4uNp0A8jRyanKIb00nSpDY/9yGfshpxF0V8wm/eYSIvPXYLNMpILH/8fzhxV8eoGRtNI0/F38bTKvI6BUCMT4u3Eit5yU/AoiepCnqznrylATAFDXTV1GpqMIgoDbqJFoT6ZhqupZmyBdgT5+sRxI86ZGdLHtxHa+zomiyPzB8/np6L9s0QYS3+tsVYGgVqMKCLB6DPakl1cvAM5UnKHGUIOzSrJB8v/PHfYMq8O4jxtH7KY/MBS1viC++bebyanM4cNLPmRw4GDrBycjY0MEQSAhxJO/Uws5nl3GwHCvNr/XpNNZ1hbl3PP8T/fAgVWSgOXkabNScbN4XV+0VXp743HxxWiiG2cfOwwZO6XPoUNA3f5rdF/fvuy6YRf51fkAiDU1ZN11N6byctzHXoT7OOs3w7I1aqWC3gHuJOaU0999OvcNb6PfbGfpNQEUKqnZWGEq+PVu+PrelfDv5zDyLpj2slVDUQcFErdtG4a8/G5d3aaxhqVLF0YURXRGHYB9NuhkmkUWbbsj597gQGOPWxssaKv37Sf2dA1coCStNI0RwV2nTNIS9A6URNvUvAom9Am0dzg2wyzalpRKJuayPYKMJQh95RW5XNlBmRx9ESs2ajD52XBzygGuc4IgMCN2BrsO9QJTVo/boPJ19sVT40mZroyMsgz6+Paxd0idQhAE1GFhrWYI6016TpWfwiAaCHYLtlF0MjK2xSzaHqvna7snZw9rT64lzieOOQlzGhxf+ssv5L36Gm5jLyL0hRfOHa5jNHWen/EulOdA6ib48zlJuB35H8vM1wJme4T6oq3r0CG4DrWBz2ln6DMNrv8ahI4X17qqXYlSSxU1ChcXfObcgLGwEHVkpKWidDjigjxIzCnnRE4FE/vaSLR19pR8h9P/guSNDUVbUZSeA4i7xCbhCIKAOqjnrF9lpPsbs6WlWtF9xfquiGyP0B0xGZs26R/3iPS8yWiTMMLefJMDd44lM/Acz7seQmxdM7IKO0diW8y/a221HxqlgqgelGUsYz1kwdZxMZ/rMouqqNHb5vriKNc5aJiBZcjPJ+uuu8l/+x1EG5bu2gNBEM762pY1vMaLOh1Ve/agTet+1/6ssiwMogFXlStBrjZazMvI2Ji+tc2YEnPOirZZ5Vl8n/w9205ta3S8wtUVQ34+NUePWS6I5s7zN/0A0RdKj397GA6sttycTVBarSe/XAtA74BmGkQ5Ki7e0GcqxFtO6At84AFCnnsOp5ju63FrtnZLzi1Hb9STWpKK3qS3wcRTpM/nWiTkHJKsE9SuZ//2ZSyGKIrd/p6tLWiN0nlOo9Q4bvVAC6SnpyMIAgcOHABgy5YtCIJASUmJXeOyBLJo2x2Z8HjzGUbjHpFetwEuAwegmX4J1U5CjxRte9de8FPzKu0cie3Qm/RklmUCYNIG0ivADZVSPs3IWA5RFDFVyxYJjkSAhxMezipMIqQV2GiTygGuc0cKjrAnew9JBVKTlrhAD2qOH6diyxbK1q/vkje87aVOtC1peI3PeWExGTfNpeSbb+0RVrvRnz5NzrPPUrpmbavHmu9nYrxiesTvWKZnkhAiNeU5nl1eJ2Y05WNtxvX884la9QXRX66yXBAtnedvXgMj75Ye/3wPHPvFcvOeg9nPNtjTGQ9nKfus5tgxjOXlVpvTUXhy+5M8u/NZsiuy7R2KTYmv3bQ4kVfGxG8nMuPnGaSXplt/YnMWbfp20Na7n0r6XfrcazyorFu2rs/N5eTVV5P74kvdRsh898C7LD+4vMnXlv31Cu/88SymKut4uObk5HDffffRq1cvnJyciIiI4PLLL2fTpk0WnWf8+PE88MADnRrDLNrWt0Zo67jjx49HEASpiZmzM/Hx8bz44ovd5m/I3rRZTTEYDGi12gbP5ebm8swzz/DII4+wfft2iwcn0/Vpsrt0D6F3D8y0NZeMqgVnRINngzIyGZnOUvLTT6SMHUfeq6/ZOxSZegiCgE/oVtziXuCTo5/YOxybseLICm79/Vaqnf5BIUCvADecYmMJ+r//w/fmm+0dnk0YEzqGmXEz6e/fv8HzriNGoPTzQ3CyoLelFana/y/FX66m+PPPWz3WnFXcUzqpy/RM4oLcUSoESqv1ZJfWABDjKf3N51TmUKVvKHAoPT1xHTYMhauNqqsEAaYshsE3Ss0of7wTKvKtMlXqOU3IRFHk1H0LSBpzAVX79lllTouQ+Q9seu6sr207MZgMrEtbx7dJ3yLSWHjR5+aS//Y7GCu63zonvtbaLSW3kgiPCNzUbhRUF1h/Yv948I6SGu6l1ctoT64VbW1gjVC9bx/aY8ep2rOn22xMKgQFyw4sayTcLj+4nOUnP0eBgFhdY/F509PTGTZsGH/++Sevvvoqhw8fZv369UyYMIH58x2vgWFn/WzvuOMOsrOzOXHiBI8//jiLFi1i+fKmxXKZ9tFm0faOO+5gwYIFdV+Xl5czYsQIli1bxoYNG5gwYQLr1q2zSpAyXY+qvXsp27iRyBqpjOhM5RmqDT0rO65XbQlVYaWO4kqdnaOxDWZx3plgQFF30yMjYwmU7lL5uUMvknoovm5qFKpyUoobZ2B1V8znO5M2kCg/N5zVStShofjeOAefWdfZOTrbMC1mGk+PeZqx4WMbPO85dQpx2/8isJNZH7ZC0ysG31tuwfOyy1o9tn6mrYxMd8VJpayzAjhe62vr7eyNr7MvAOll6fYK7SwKBVzxNgyaBVd/AO7Waf54rp+tsagIwckJQanEuV8/q8xpERLXwF+vwcGO2UeYRBPPjnmW/wz6T5P+3Vl33kXBu+9S+sOPnY3U4Yj0dcVJpUBrMPF/Q99k5/U7GR062voTCwIMmwej7z3raVtZCKf2SI9tINq6jh5N2Buv43fnnVafq7NU6ava9DG331z+M/A/LDuwjHcPvAtIgu2yA8u4I+FWbh11H0pfn1bHbS/33HMPgiCwe/duZs6cSXx8PP379+ehhx7in3/+qTsuMzOTK6+8End3dzw9PbnuuuvIzc2te/3pp59m8ODBfP7550RHR+Pl5cXs2bMpr832nzdvHlu3buWtt96qy3ZNT08H4MiRI0ybNg13d3eCgoK46aabKCiQNiC2bNmCRqPhr7/+AqRM2xXvrKB/dH9yc3NbHLcpXF1dCQ4OJioqiltuuYVBgwaxcePGute1Wi0LFy4kLCwMNzc3Ro4cyZYtWxqMsWPHDsaPH4+rqys+Pj5MmTKF4mKpQez69eu58MIL8fb2xs/Pj8suu4zU1NR2/166Im02CdyxYwdLly6t+/qzzz7DaDSSnJyMl5cXjz76KK+++iqXXnqpVQKV6VoUrVpF+W/rCXz4Ye4adhdh7i039uiOuGpUhHm7cLqkmtT8Coa7+do7JKtjXsyKOunGOU7OtJWxIK6jRhO5cgUuQ4faOxSZcxgdNIVD20MI7jPI3qHYBKPJSEZZBgAmXQCxUfK5rj5dzYPapX9/XPr3b/1A4GSJtDEhi7Yy3Z2EEE+ScitIzClnUoLk3xztGU1RTRFppWn082soWOqysihbvx6Fswu+N91omyAVSkmwrY8oSuKXhUg5J9NW5edH73Vr0efmoXBxsdg8FsecYRs1pkNv1yg1XN778mZf95k9i7I1a9H06tWh8R0ZpUIgNtCdo2fKOFVkol+IDTNOL3qo4ddZuwARggaAl/XX0yofHzynTbP6PJZg5Jcj2/2e9w6+x0eHP0Jv0jMlegofHl/Bv0WHWDl1Zd0xU7+fSrG2uNF7D998uM3zFBUVsX79el544QXc3Bp7YXt7ewNgMpnqBNutW7diMBiYP38+s2bNaiBopqam8tNPP7FmzRqKi4u57rrreOmll3jhhRd46623SEpKYsCAATz77LMABAQEUFJSwsSJE7n99tt54403qK6u5tFHH+W6667jzz//rLM+uOmmmzh48CD//vsv77z0Dqu+WkVQUFCz47aGKIps376dxMRE4uLi6p6/9957OXbsGF999RWhoaH8+OOPTJ06lcOHDxMXF8eBAweYNGkSt956K2+99RYqlYrNmzdjNEp9KiorK3nooYcYNGgQFRUVLFq0iKuuuooDBw6gUHRvO8Y231WfPn26wQ9906ZNzJw5Ey8vLwBuvvlmVq5c2dzbZXoYmqgonAcMwKlvH+YPvsDe4diNXgFuZ0Xb6O4v2pp9zsrKpd3KntZNXca6KN3dcBttg0wHmXYzJKQXpppi0vNt1wDMnpypOIPepEeBGlHvTXyQO6ba5lvOffui8vOzd4g2o8ZQQ0ZZBuEe4bipGy9MRJ0OQdM1bBJaQxTFOnuEXl7dT6iQkalPQognPx84w7Hss83IYrxi2J+3v0nbM11aGvlLXkfTq5ftRNtzKc6A72+HK5dCQB+LDJmcJ2WznWv5pQ4KtMj4VkFXBdkHpMeRo6wyhfesWfjMnm2VsR2B+CAPjp4pIym3nEv6N840thl9L4UHj0mNyGQ6jVJQojfpUSvUXBx1MRvSN7T+pg6QkpKCKIr07du3xeM2bdrE4cOHSUtLIyIiApCSI/v378+ePXsYMWIEIIm7n3zyCR4eUhXrTTfdxKZNm3jhhRfw8vJCo9HUZbqaWbp0KUOGDGHx4sV1z61YsYKIiAiSkpKIj4/n+eefZ+PGjdxxxx3sP7SfK2ddydVXXg3Q7LjN8e677/LRRx+h0+nQ6/U4OzvXVepnZmaycuVKMjMzCQ0NBWDhwoWsX7+elStXsnjxYl555RWGDx/Ou+++Wzdm/3ob6jNnzmww34oVKwgICODYsWMMGDCg1fi6Mm0WbZ2dnamu1/zln3/+4dVXX23wekU39LSR6RiBDzwAXaQs0pr0DnDnr+SCul367s7Dwx/mouDp3P1pCmqlQJRfF+uyKyMj0yHMi9mT+ZWYTCIKRffwQWsO8waV2hQEKIgL9EB7Ioms225H6eND3N87uo0XXGtcv/Z6UkpSWD55OReEnd2k1aakcObRxzBpa+i9Zo0dI2wZQ3ExxpISNFFRCK1kauRX51Opr0QpKInwiLBRhDIy9uFsM7Kzoq15s6Ip0dZ5wAA8pk3FZeAgRFG0zzlw/eNwajd8NgNuXQ8+UZ0arkZv5FSxtP6NDXRH1OtBpXL88/vpfWAygEeo5JHaAf7J/gdRFOnn1w8vJ69Grzv8z6CTmBNPjuZk88DmZZypOMNXl32FQrBBRp++BjK2g6CA3hOlDFsbZNnWnEii5vgx3EaMQB3m+FWyu27Y1a7jPz78MR8c/gC1Qo3epCe1JJVdN+xCrKxCn5eH0ssLhZMT62eu73RsbW3Adfz4cSIiIuoEW4B+/frh7e3N8ePH60Tb6OjoOsEWICQkhLy8vBbHPnjwIJs3b8bdvXESVWpqKvHx8Wg0GlatWsWgQYMIiQjhyReeRKlQtin2c5kzZw5PPvkkxcXFPPXUU4wZM4YxY6RM/8OHD2M0GomPj2/wHq1Wi19tosOBAwe49tprmx0/OTmZRYsWsWvXLgoKCjCZTIAkCMuibS1mH40XX3yRv/76i9zcXCZOnFj3empqap1qLiNTnwpdBccKj2EwGRgT1rESna6KWchIza+0cyS2wdvZGydjLKK+mJggN9TK7l2qIGN7TFVVFK/+iupDhwh74/VWRRYZ2xDh64qT9wFMThnsyArhoqjz7B2SVTELFrpqf0A615syM9FERaEOC+v2i9n6RHtGk1eVR5murMHzqsBAao4fB5MJQ34+qjaU1NmD8g2/k/P007hPnEjEu8taPNb8ew/3CEej7B7ZwzIyzZEQLAkE6QWVVOuMuGiUdbYg5o2r+qh8fQl/4w2bxtiIK96BT1IhPxE+u1ISbj06niWZml+BKIK3qxo/Nw1Fn35K8Wef43fH7fhcf70FA7cwmbXWCJGjOmwVsfTfpRzMP8ir415lavTUZo8z6XSUrVuH65AhaKI6J5I7En1q+3KczDOSZ9iKQTSQV5XXpL+vRdn8opQlnbQeIkZJoq2Zra+AyQgTHrfK1GXr1lH4/vt4XXUVoS8ubv0NdsZV3fbGh8sPLueDwx8wf/B87jrvrjpPW4Wg4BaPKRiqKhHUahROTu0atzni4uIQBIHExMROjwWgVqsbfC0IQp1o2RwVFRVcfvnlvPzyy41eCwkJqXv8999/A1BWXEZ1ecd7EHl5eREbGwvAN998Q2xsLKNGjWLy5MlUVFSgVCrZt28fSmVDUdgsKru0Yjdz+eWXExUVxYcffkhoaCgmk4kBAwag03X/3kFtXu0uWrSIt956i969ezNlyhTmzZvX4Jf9448/csEFPbcMXuYs5+4s7c/bz22/38Zr+3pex/feAWbRtmdk2gIk50plZHFyEzIZKyCo1RQsW0b5hg1oLXQjJNN51EoF7n6H0PjuZFvmfnuHY3XMJfI1VX4IgnSudxs9mt4b1hPx4QetvLt78dLYl9g+ezvTYhp64Ck9PQl/dxmxW7c6rGALYCwpQXBywqmeBVhzmIWqGE/Zz1am+xPg4YSfmwaTCEm193Zm0TajLAOjyQHtcNz84KafwCcaitOkjNuqog4PV+dnG+COIAhUbtuG/swZRL3BIuFajTrRtmOWUqIotvl8l/3k/5H92OMUff5Fh+ZyVMzNlNPya+oqK5rarLA4CqUk2AJk/QMfTICUPyTBdvML0utWQh0aisvgwbiOPN9qc9gDs0BrFmwB7jrvLuYPns+yA8v4OOdHlN7eCGrLbcb6+voyZcoUli1bRmVl4+StkpISABISEsjKyiIrK6vutWPHjlFSUkK/djQ61Gg0dd6vZoYOHcrRo0eJjo4mNja2wYfZZzc1NZUHH3yQ15a+xsChA3n47ocbiMFNjdsW3N3duf/++1m4cCGiKDJkyBCMRiN5eXmNYjFbLwwaNIhNmzY1OV5hYSEnTpzg//7v/5g0aRIJCQl1Dcp6Am0WbceNG8e+fftYsGABK1eu5MMPP2zw+uDBg3nooYeaebdMT6Lo009JHj+B/Fo/kl5evYj0iCTaM9q+gdmB3oHSCTGrqIoavQPe3FqQE0UneHXPq/x1+k9AbkImYx0EtRrf228j6PHHHFoI6on4a6RFzYnCFDtHYn3MGZcmXSCRvq64aM4uogSl9RZUjoiT0qnZzGKP8eMd2/cR8L/rTvrs3YPfHbe3eqz59x7jLYu2Mt0fQRAaWSSEuIXgpHRCb9JzpuJMk+8zVVaiPWkDcas5PENg7s/gEQL5x+GLq6GmrPX3NUHqOU3IwpctI3zZUjymTrFYuBZHFKGg9joc1THRtrCmkHJdOQIC0V7RLR7rfdUMVMHBaCK7l2VMmLcLLmolOqOJQBfpe2vKFsTijHsEJjx59usz+2HfZ5JgO+FJ6XUr4TPrOqK/Wo33jBlWm8MemERTA8HWjFm4xdUZTXg4SnfL2votW7YMo9HI+eefz/fff09ycjLHjx/n7bffZnRtj47JkyczcOBA5syZw/79+9m9ezdz585l3LhxDB8+vM1zRUdHs2vXLtLT0+usA+bPn09RURHXX389e/bsITU1lQ0bNnDLLbdgNBoxGo3ceOONTJkyhdk3zWbxO4s5fvQ4S5YsaXHctnLnnXeSlJTE999/T3x8PHPmzGHu3Ln88MMPpKWlsXv3bl588UXWrl0LwOOPP86ePXu45557OHToEImJibz33nsUFBTg4+ODn58fH3zwASkpKfz55589SntsV11pQkIC999/P7NmzWrUoe32229vsEMg03PRpZ7EkJMDBmkXOtwjnLVXr+X18a/bOTLbE+DuhKezCpMI6f/P3n2HR1WlDxz/3mmZTHqZVFJJAqEJgggqKIq9i2tDRayIZe1l7bqu5bdrA7H3XlesuC4oq4iAgIj0NFJIb5M69f7+mEwASZkkM3NnMufzPD6Q5N57Xkkyc+57z3nf+uFdImFT7Sbe3PomO9u/B/Y+oRYETzMuXEjsvHkiaetn0rsezJW37lY2EB/oTtqajeIB1TAgabWoe6j59mfdSVux0lYIEqO7SiS4krZqlbp7EYZrx8G+2jduZMchUym78iqfxdijmEznitvQWNiz0VnrdhAKavdP2qpCQ4k45hi0CX78MEqS4K+b4OrVkOD+Sr19uV7rUsNTCVGH9HmsYfp0cr77D7EXXzyosfyVSiWR11XX1oBzd7FPkrbgTMwmjd/78balXk/YDmcLJy48IGHrsuCgBSycuNAr42ZnZ7NhwwZmzZrFzTffzLhx4zj22GNZvnw5zz33HOB8OLZ06VJiYmKYOXMms2fPJjs7mw8++GBAY91yyy2o1WrGjBmD0Wjsbvi1atUq7HY7xx13HOPHj+eGG24gOjoalUrFww8/zO7du3nhhRdICktixpgZvPjCi9x9991s2rSp1+u6KzY2losvvpj7778fh8PBa6+9xsUXX8zNN9/MqFGjOOOMM1i3bh3p6ekA5OXl8Z///IdNmzYxdepUpk+fztKlS9FoNKhUKt5//33Wr1/PuHHjuPHGG/frrzXcSbK7VZJ7UVBQwKuvvsrrr79ObW0tVqvVU7F5hclkIioqiubmZiIjI5UOZ1iyNzdjLipCExeHruuXMJiduWQVG0ubePaCgzl5QnL/JwSojTUb+bbkW95aaaatfgrf3ThTlEgQhCDy5E/LeLXwVrSOODbM/0HpcLymsbORmR/MBCRatj/Agpn53HxQJKWXX0Ho+HEkP/poUNW0tTvs3PD9DRQ2F/Leye8d0LDGtGwZrT/9RPxVV6FLC+yVWCaLiZLmElLCU4gPjVc6HEHwuk/Wl3PzR5uYmhXLh1c5V4bduvJWlpUs45YptzBv7Lz9jrebTOyceiiapCRGfv0VKsPQa0MOyZ6N8O3d8JfXIXzgD3qPe3IlO6tbeX3+IRw1yo8TtR724Y4PeeiXh5iROoMls5f0f8IwdctHm/h4fTmnHFbBysZFTE2ayivHv+KbwSs3wQsznX9X6+CeWq8OZzeZUIWH+12viM7OToqLi8nKykKv13t1LNluB0nyu38DIbD19TPsbm5yUD+RHR0dvPnmm8ycOZNRo0bx888/c++991JeXj6YywnDjDoqylmM/k8JW1mWsdr9O6nvDcFS13ZSwiQuyvsrbfVT0KgkMuM9u8VEEPbl6Oyk9adVym7BFPZzSMooAKxSAx22wTcy8HeumnZaORZkHXmJ4XRu346lqIjO7TuCKmELzpV32xq2UdZSRomp5ICvN77zLs0ff0Lbqp99H1w/Gj/8kNLLr6D5y6/cOj5SF8kE4wSRsBWCxr7lEVzrfGaOmMl5o85jdOzoA45XR0aSu+oncn/4XvmELUDKJLjky0ElbG12B8V1zl1yI8NVlC24msb3P0C2+Xk9Ww/o3lUQNbBdBR2bNtGxZYs3QlKEa6VtsykG8OFKW4Cd3zr/VOvAbnHWtPWiPbffwa7ph2H67juvjuOvLLt307ltG47W4X2/LgSmASVt161bx1VXXUVSUhJPPfUUp59+OpIksWTJEhYsWEBiYqK34hQC3Mc7P2bGBzN4eM3DSofic66krauZwXDmakKWFR+GVi2eUgreU/XQQ5RdfjlNH3+idChCl4NSRiDbDCDJbKkpVDocr3HdtNk6nUmA3IQIDFOmkPbSixhv+KuSoSmmu6N804EPUaLOOJ3YSy9FP36cr8PqV/svv9D2009YKyqUDkUQ/FJOQjgalURLp42KJufDuFNHnspd0+7i0ORDezxHExfnyxD7t++DtA1vwRd/ddZ97cfuhnasdplQrZrI33+l9YcfqH/1VfDnuuWyDC8dDZ9cDi3Vg76M630uOyrb7XPqX3udknPPo/appwc9rr9xlXrbU+v8s7ajlhZLi/cHdjUdm3WXc4XtrLucH3spcSvLMp3bt2NvbkYbrPmcrt9r2WxROBDfM5lN7GrcRXXb4F8zBO/SuHvghAkTMJlMXHDBBfz888+MHTsWgDvuuMNrwQmBx1xcTOuK79GPHUPYtGndnzdoDDSbm337hNJPuOpgDeeVtha7ha31W/mj0pmozU0UNR4F7wqbNp22VT+j8nDTAGHwIvRa1PZEHJpi1pRvZ0qK/yXpPKHT1kmYNpzGeudqy5EJYah1GsJnzFA4MuVkRWXxS+UvPda4jJ4zR4GI3BN31VWETpmCwY1mH2sq1/B92fccmnQos9Jn+SA6QVCeTqMiJyGc7VUtbK9sYUSMH6yeHayGoq6ErR20YXD8w/sndP/EtdhiZEIYhgnZGG+6CVWYwb93UzQWQ8V6qNoMpy0e9GUGs9I2YvYx1D79NBqjEdluHxZNOV1J2921DtJSjdR11FLcXMwE4wTvDbpvwtZVw9b15/cP7/+xh0iSRM5/vqVz2zb0+fkevXag0CQmok1KQtK4nR4bNjrtnVjsFmyO4b+LIFC5/VO5Y8cOzj33XGbNmsWYMYMrai4Mfx3r11Pzf/9H2OGH75e0db3pB2PSdqTRmVQqqm3D4ZBRqfx4sjdIhU2FXPTNRWgJB+4mN0HUshW8K/KkE4k85WT/vnkKQtHaVBooZmvdLqVD8ZoLx1xITsgJnPfyKkbEhGLQBd8E/88C9T1eP2oU+lGj3Dp2TeUa3tn2Dha7RSRthaCSnxzJ9qoWtlWamD3GuQqv1dJKiamE7KhsDNr9E7m2ujpqn34a655K0l95WYmQexabDactgqUL4ZdnQR8JR/W++MiVtM0xhqNNTSX+yit8Fenglf7i/DNlEmgHV/+zw9bBnrY9wMCStrq0NHJ/+gn1MHqYnhylJyJEQ4vZRlJoum+Stg57z03HXB877F4ZVtJqCZ3gxf8vP6fSapUOQTGx+lgMGgNqVeA/aBmu3N6/XFRUxKhRo7j66qsZMWIEt9xyCxs3bhQ3zMJ+NMnJRJ50ImGHTd/v8xmRGQA0mhtp7GxUIjTFpMca0KolOqx2Kk2dSofjFa4bdbU9CRArbQXvk9Rq8f7jh1IMztf6nmqbDie7attA1pCXGIGjvZ3G996jfcNGhtjbNWC5ttCWNJf0+HVZljEXFNC5dasPo/KsqclTuWTsJcxIDd4V1UJwyk92PojfVmXq/tzZX5zN+V+dz7aGbQccrwoNpemTT2lbtQprTY3P4nTLpLlwwmPOv//wCKzuvclWoStpmxBAc9rS1c4/06f1fVwfXK/j0SHRxOhjBnTucErYQtcK1K57mjBVCuCDh5Oz7ux9Je2Rtzm/LggepFFpCNeFE6oJVToUoRduJ21TU1O56667KCgo4K233qKqqorDDz8cm83G66+/zs6dO70ZpxAgwg8/nNQnniDussv2+7xBayAlzEdvdn5Go1aRGeecxAzXurauxjydbc46Zq7tRILgC462NqVDELrkxTqTd3XmUoUj8a6CrvrduQnhdG7fQdUDD1Jxww1B+yDBtRqrrKWsx4ajjW+/Q9Epp1LztP/UOmxbu5bWlSuxNzW5dfy05GncPOVmscpWCDqjk1zNyPbW8syMyiQ+NL7H+p6qsDASbr2V1GeeRh3uhwnPaQtg1t3Ov397p7PObQ8KusqaTSjaQPu6dc7O8v7OtdI2fXrfx/VhMPVs/8xWW0vHpk2DPt+fjHLd01icdeyH431sxS23Urv4WezNzUqHoihbYyOW8nIc7e1KhyII+xlUp6Cjjz6at99+m8rKShYvXsyKFSsYPXo0E4J4Sb3Qv0DdPukJrmZkhcM0aev6nnZ2xKNRSd1JakHwJnNxMUWnnkbhKacG7QpHfzMpybnVvF2uxu6lLXxK2tm4k1P/fSor6p8BnCuwJLWKsJkzCDvsMIWjU44x1EiYNgy7bKe05cCEvWHywUh6PSqdToHoetbwyquUXbWA5i++VDoUQfBr+cnOpG1JfRvtFmfNw0WzFvH9Od9zVNpRPZ4TN/8SIo87DpXBT2vgzrwFDrvO+fcvrofK/ROMsixTWNOKJDswvrGE3RddTNuqVQoEOgBtdVDXtYgqrecmce5w1SYfSGmE/cL4+Wd2HX0Me267HdnhGHQc/iK3K2nb0hIL0GPt9kBmrazE9OWX1D33HKiDu9yTo6UFe1NTUCVtrQ4r1W3VNJuDO2Hv74b0mxkVFcXChQtZuHAhv/32G6+++qqn4hICkGy3I9tsqEJCevx6VlQWq/asCs6kbUIYbBm+zchcExiH2UhWfBg6zaCeBwnCgGiTkrCUlCDbbFgrKtCNGKF0SEHv0PQc5F/VSCoru00VZEenKx2SRxU1FVFiKkHq6tWQlxhBaFoa6S++qGxgCpMkiazILP6o/4Pi5mJGRo/c7+sho0eTt3aNXyVttRnp6LKyCJ0wvt9jm83N7GrcRXZ0NrH6WB9EJwj+wxgRQnx4CHWtZnZUtTApPQatOsDrP0oSHPsQmFsgPBGS9l94VNncSZvFToTDSsT0aXT++iuGaYMvOeATrlW2xnwwDP51SpZlYkJiBp20DT3oIFR6PerYWOwNDWji4wcdiz/I6yqPUF0by4JjFpATnaNwRJ6lMhhIuu9erJVVw668xUCpo6KcD5jDguffwWwzU9dRR4g6hKiQKKXDEXrhsccpEydO5JlnnvHU5YQAZC4spPj0M9Dn55P16ScHfL17pe0we0LpDtdK2+FYHsHusLO7eTcADouR3Cw/3AonDEuq0FDSXnqJkFF5aGIGVndN8I6kSANS20QsNhUVjR1kRysdkWdNT5nOkzOf46q31gMwMpBqHXpZVtTepO2fSSoVkh8lbAGS/vY3t4/dWLOR61Zcx+jY0Xx06kdejEoQ/FN+cgQ/7jKzrdKZtO2P7HBgLijAvG0bkaed5p+lYyQJTnnK+eef7OqarycmxzHipseQZdk//x/+LGkCpE0d0iWunXQt1066dtC7ZVRhYWR/9SXahIQhxeEvXOURSutUXDZ2AXrt8GrWpI6KIub885UOwy+oo6IYXt/d/pntZgB0av+aown7c3s5XGFhIZdeemn3x+np6cTGxnb/l5CQwI4dO7wSpBAYLCUlIMug7flZQDCXR3A1MSisHX61N/e07sHisKBCi2yN6d5GJAi+EDbtUJGw9SOSJJEtXYG5ag4trZFKh+NxUSFRREljsLfnkBodSphOjWyzKR2WX3D3PT4g6kL+iatue1bk4FaeCUKgc5VI2N7VjKzD1sGV/7mS4z8+nk7bgU12ZauV4rPmsOf2O7BVVvo01gHZNxFraYcP50HF+u5FFjldiy4CImGbfwos+BFO+qdHLjeUTvLDJWELzpXmUaFaHPLw3TEpBC9X0jZE3fNOaX9WUlKCJEn89ttvHj3WH7mdtF20aBGJiYndHzc2NnLnnXfy5JNP8uSTTzJlyhSefPJJrwQpBIaIY48ld9VPpDz6aI9fd93QVbRWdL9ABIvsrklfXauZ5vYDm7QEMtfKaY09AVCRK1aeCUJQyxnmNbx3Ve/tKG7dvZsdB0+mZO6FQV9X2dW0xpXg/DN7ayull13OzsMOx9F5YJLHl2SLZUDHuxLRWdEiaSsEp/xk5wP5bZXOpK1erWdL/Rb2tO1ht2n3AcerQkIwTJqE4ZBDsLcEyHvB9w/D1s/g7Tm0lP5OhKWNsfoAnLP7UYJZtljo+GOL0mEMiSRJ3SUSNpaX8VPFT/xR94fCUXmGvbWNlh9+GPYNyGoXLaZ2yZKev7ZkCbWLFnd/LDscODo6PPZAvqqqiuuuu47s7GxCQkJIS0vj1FNPZfny5R65vstRRx3FDTfcMODzLHbnfKi3pK271z3qqKOQJAlJktDr9YwZM4Ylvfybe0paWhqVlZWMGzfOo8f6I7eTtsuXL+fMM8/c73Nz5sxh3rx5zJs3j9tvv93jP3xCYJEkCU1cHCFZPd/UxOnjiNBF4JAdPU7whrPwEA1JkXoACusCZPLqJtfNrKXDWbMqN1EkbQXfMi37lopbbqXj99+VDkWgq4a3ZOWPmp6Td4HKITtYtHER35V+BZKNvMRwOnfsRLZYkM3mwFiJ5UX7rrTtKYGtCgvDXFCAo7mZDoVXOpRedjkFxx9P29q1bh3fnbQdZI1HQQh03SttK1u6SwX0V/Ys4603yXjrTfSj8nwW55AcdQekToGORuYV/JXzSpZzzL2XUv3oY0pH1r/OZrB2DPkyP1X8xAmfnMDDvzw85GtZKyooOGY2pRdfjL01sO998rp2ES4rXcrV/72a97a/p3BEntGx/lfKF1xNyTnnKh2Kd6lV1D2z6IDEbe2SJdQ9swjUe1NiltJSzIWF2E2mIQ9bUlLC5MmTWbFiBf/3f//H5s2bWbZsGbNmzeKaa64Z8vU9wZMrba+44goqKyvZunUr55xzDtdccw3vvdfz74plgA/Pe6JWq0lKSkKj6b/i60CO9UduJ21LSkpISUnp/vjyyy8nKmpvseLMzEzKy8s9G50wrOw3wQviEgnDra6t63tp7ohHrZLIig+e4u2CfzB9uwzTl1/SuvJ/SociAIawWsJH3csvnfcrHYpHVbVV8eLvL7K+7QWQJXITIog4djYjv11G0gP3Kx2e4tIi0rh24rX8/Yi/45AP7BguSRLJf/87WZ8vxXDo4DubD5XscNC5ZQvW3aWoo6P7P16W9yZtRXkEIUhlx4ejVUu0mG2UNzqTg8NuTh8SAXM/goSxxDgaOafjByRZRpeZqXRk/fvleXg0Hb7/x5AuU9hUSEVrBQ2dDUMOSZOSgioyEinMgKUosB/iupK27W1GcqJzSDAMj/IPjk4zusxMQidPVjqUQXG0t+Nob9/vQbFssTg/v09S0LhwIXFXL3Ambp99FtibsI27egFxl13WfawqRI9stTqv69g7l5GtA191v3DhQiRJYu3atcyZM4e8vDzGjh3LTTfdxC+//NJ9XGlpKaeffjrh4eFERkZyzjnnUF1d3f31+++/n4kTJ/LWW2+RmZlJVFQU5513Hi0tLQBccsklrFy5kqeffrp7tWtJSQkAf/zxByeeeCLh4eEkJiZy0UUXUVdXB8DyFcsZlzSO9avXd9e0ffzxx0lISKC6urrP6/bEYDCQlJREdnY2999/P7m5uXz++eeAcyXutddeyw033EB8fDzHH398v/EBOBwOHn/8cXJycggJCSE9PZ2HH3Y+VPpzyYPGxkbmzp2L0WgkNDSU3NxcXnvttR6PBVi5ciVTp04lJCSE5ORk7rjjDmz7rLA+6qijuP7667ntttuIjY0lKSmJ+++/391vv0e5nbRVqVTs2bOn++Mnn3ySuLi47o+rq6vRagO8k6gwaLIsU/XQ32l4800cHb0/6XXd8AybCd4AjDQ6k5nDrR6Sayusw2IkI85AiCbYSrgLSqpdtBhJrSFuwVWEHz1r/6/9acuT4BtTRoxEkmTssp0W8/B5veve9m+LB9TkJIYjqVToMjIIHTtW0dj8gVat5aqDruLYjGN7rYUYPuMI9Hl5iq5KllQqclYsdzYxHDmy3+MbOhswWUxISGREZvggQkHwPzqNipyE/UskuEqiFDf1U8fa4Qic8jGGWBrnfECxI5HcQ6vInhdK5FHKPWRyW+lqsFsgzDiky5yRcwavHf8al467tP+D+yFJEmlLniV3+XJCJ0wY8vWU5NpFWFedy79P/zd/PfivCkfkGZHHH8fIZd+Q/OADSocyKDsOnsyOgydjb2zs/lz9q6+y4+DJVD/00H7HNrz+BgB1ixazffwE6p5ZRNiMGdQ/9zyVd93dfZwmMYHyhddQOPtYLIWF3Z9v+ve/BxRbQ0MDy5Yt45prriEs7MAFTdFdD40dDgenn346DQ0NrFy5ku+++46ioiLOPXf/1c+FhYV89tlnfPnll3z55ZesXLmSR7tKUj799NNMnz69e6VrZWUlaWlpNDU1cfTRRzNp0iR+/fVXli1bRnV1Neeccw4A02dM56KrLuLOa+6ktaWVjRs3cs899/Dyyy+TmJjY63XdFRoaut+K2jfeeAOdTseqVat4/vnn+40P4M477+TRRx/lnnvuYevWrbz77rv7lWzdl+uYb775hm3btvHcc88RHx/f47EVFRWcdNJJHHLIIWzatInnnnuOV155hb///e/7HffGG28QFhbGmjVrePzxx3nwwQf57rvv3P438BS31wePHTuW//73v0yd2nNHym+//TZga0QIQ2erqqLxnXdAo+mzA+Xc/LmcOvJU8mICZKuUB7m6jBfWDK9mZK4EvMNsJC9NNCETfEytwvTll8Rff91+iTPXE/T4669TMLjgNCohno6Ce7BZDZg61EQEXm+DHu0tBeO8Kc4R9bsDljo6mvAZR7h1rOv7nhKegl6j92ZYguDX8pMj2FZpYltlC8eNTeq3PIIsy5QtWEDH+g1kffxRYKxYBXa1h3Gj5W98GvoQieZCWHYtzPvCr2rF7sdug/J1zr+nTx/SpaJCopiSNMUDQTnpMobHg65RXSttyxrbabfYMOgCc4t1b6QA3TI+KFotstWKpNUSfsThtP34435fllRur2nsU0FBAbIsM3r06D6PW758OZs3b6a4uLg7Ifrmm28yduxY1q1bxyGHHAI4k7uvv/46ERHOn8WLLrqI5cuX8/DDDxMVFYVOp+te6eqyePFiJk2axD/+sXcF/quvvkpaWho7d+4kIT2B6++8njUr13DllVfyxx9/MG/ePE477TSAXq/bH7vdznvvvcfvv//OlVde2f353NxcHn/88e6P//73v/cZX3JyMk8//TSLFy9m3rx5AIwcOZIjjuh5/lZaWsqkSZOYMsX5GpbZx3vOkiVLSEtLY/HixUiSxOjRo9mzZw+333479957L6qun4MJEyZw3333dce/ePFili9fzrHHHuv2v4cnuP0bOn/+fG644QYOOuggTj755P2+9sUXX/Doo4/y1FNPeTo+IVCo1cRddRWO1lakPlZc58fl+zAo/zLS1ZxnGK20NVlM3bVwHBajqGcr+Jxx4UIAZ02qro/3Tdi6vi74jlatIjMmkYKaVgprWkmNDlU6JI/ofkBlMZIcpSfM0kH1k/8iJH80UaefHvQ1bQHqO+rZXLeZEHUI01N6Th60r19Py4oVRBx1FIaumxF/5lphLerZCsFuTHIkn1LRvdLW9TtR0lyCQ3agkvZPdkiShKOpGUdrKx2b/wicpG1NCxVyPE8k/x+PWR+H4x7y34QtQPVmsLRCSBQk+O99lqWkJGB+Bv4sLjyEuDAd9W0WCmpaGZ8ahU22oVUF7i5j2eHwWIJSKaM2rAdACt07z4y79FJiL74Y/pSIzlv1E3UvvUT9c88jdSVu7aYW5zXU++8Oyln+X+d19Xsf1Eb/qbdTf9zdXbBt2zbS0tL2W8E6ZswYoqOj2bZtW3fSNjMzszthC5CcnExNTU2f1960aRPff/894eEH3p8XFhYSlRqFVqdlyatLOGbaMWRkZPDkk0+6FXdPlixZwssvv4zFYkGtVnPjjTdy9dVXd3998p/KcPQXX1NTE2azmWOOOcat8a+++mrmzJnDhg0bOO644zjjjDM47LDDejx227ZtTJ8+fb+5++GHH05rayvl5eWkp6cDzqTtvtz5d/cGt5O2V1xxBStWrODUU09l9OjRjBo1CoAdO3awY8cO5syZwxVXXOG1QAX/pk1IIOHGG5QOw6+5VmWVNrRjttmHRRmBSF0kv1zwC2e88A2/y5CbKFbaCr5nXLgQZJm6ZxZRt+Q5sNlEwlZhI41hzqRtbSsz84a2XdNf7N1VkEBOYjid27bT8MYbaFNTiT7jDGWD8xM/VvzIPavu4dDkQ3tN2jZ/8QVN73+AbLEqkrStfeYZ1DGxRJ5yMpqYmH6Pd33fXVvBBSFYjU7qakZW5UzapoanolFp6LR3UtVWRUp4ygHnJNxxO6rQUEJycnwa61Ds3l3NW98+RMuEKThe/AGV3s9X2Jd21cZMPxR6KU3jjmZzM69sfoWsqCzOyDnDYw8iZYuF3fMvpWP9erK/+brXhtX+LjcxnPqiBp757V/8vvJbbp58M+eODtwGXvWvvELTxx8Te/HFxM6dq3Q4g6IyGA74nKTTIel0B3y+/vXXqX/u+e77g32bkP35fsHR1o69xYQmPh51pPN1r69FaT3Jzc1FkiS2b98+oPN68+cypJIk4XAc2D9gX62trZx66qk89tiBzRSTk5Opt9cDsHHtRsBZ0qGhoaHHcg7umDt3LnfddRehoaEkJyd3r1Z1+fN1+4uvaIC1sE888UR2797N119/zXfffccxxxzDNddcwz//+c+B/890Gcy/uzcM6PHKe++9x7vvvkteXl53sjY3N5d33nmHDz/80FsxCsPMV0Vf8dT6p6jrqOv/4GEkISKE8BANdodMaX270uF4jIRESZXz+U+u2C4sKKS7XprNBlqtSNgqLDKqgtARb/DJ7qeVDsVj9q3fnZsQgSY2hpgLLyTy1FMUjsx/5ETnMDp2dJ8JzohjjiHqrLMIP+JwH0bm5DCbqXvpZaoffhhHm3vvw66t32KlrRDs8pOdD+Z3N7TTZrahUWnIiHBuf++u+f0nhkmT0I8eHVDbr6W1q4nvNJFcUbB/wrZsHSy9xlmOwJ/s/tn5Z/q0IV2mqLmI17a8xnObnvPozhFJp0MdEQFaLR2bNnnsur7mKpFganfQYevotSxIoOhYvwHr7tJBNdgKND3twDMuXEj89dc5m5MtWbLf8Q6L2dnMrI8+Pf2JjY3l+OOP59lnn6Wt7cDSiE1NTQDk5+dTVlZGWVlZ99e2bt1KU1MTY8aMcXs8nU6H3W7f73MHH3wwW7ZsITMzk5ycnP3+CwsLw2w3U1pcyj2338NLL73EoYceyrx58/ZLSvZ03d5ERUWRk5NDamrqAQnbnvQXX25uLqGhoSxfvtztfwej0ci8efN4++23eeqpp3jxxRd7PC4/P5/Vq1fvtyJ61apVREREMGLECLfH85UBr4k/77zz+Oyzz9i6dStbt25l6dKlnHfeeTgcDr788ktvxCgEAOuePcj7FJruy4u/v8grf7zCzoadXo7Kv0iSNCybkdW0mDF12lBJkG0c3JM5QRiqjt83O/+iUoHVesAETPCtpGgNmohtVJo3Kx2KRzSbm7u7aTvMRvISwwnJzSXp7rtIuOEGZYPzI+Pix/HRqR/xt0P/1usx4TNmkPKPhwk/8kgfRuYkWyzEX3UVkSediDb1wFWBPSlpLgFE0lYQ4sJDSIgIQZZhe5Wza3l2dFczsmHUYPjrmHzuPPwquOKavZ80t8C758DGt+Hz60CBlVY9kuV9VtoOrZ6t63vojde6hNtvI2f5fwN6V4prN2F7WywARU0DWwXob1Ief4y0F18g8rjjlA7F++yOHnfguRK32Pf/fVZHx6AdMQK1G7tx+vLss89it9uZOnUqn3zyCbt27WLbtm0888wzTJ/u/H2dPXs248ePZ+7cuWzYsIG1a9dy8cUXc+SRR3bXZnVHZmYma9asoaSkhLq6OhwOB9dccw0NDQ2cf/75rFu3jsLCQr799lvmz5+P1Wal09LJnQvv5NjjjmX+/Pm89tpr/P777/zrX//q87qe0ld8drsdvV7P7bffzm233cabb75JYWEhv/zyC6+88kqP17v33ntZunQpBQUFbNmyhS+//JL8/J5LxixcuJCysjKuu+46tm/fztKlS7nvvvu46aab3Eo4+9qQIyooKOBvf/sbI0aM4MwB1voQho+Sc89j+8GT6XRjC8AJmSdw7qhziQuN80Fk/sXVjKygZngkbf+x5h/c9r9bUOnLyIwLGxYlH4TAU7tkCXWLFhG3cCH5W7f0+uRc8J3JKc4SShapFovdvQd6/sx1MyvZo0EOEfW7A5Q6IgLjtdeQ+sQTbq0k67B1hpsYfgAA2yJJREFUsKd1DyCStoIAMDrZuVXYVdc2MzIT6Dtp2/q//1Hz1FNY9llJ5q/azDbKWqz8Zswl67QT9n4hJAJOWwSSGja9C8tudyZMleawwRE3QP5pkHLwkC7lzaRtSFYW2oQEj1/Xl/K6kra1DdFA7w34AoU6MpLwmTPRprj3ADOQGa+7ttcdeMaFCzFed+1+n1OHGdBER6PqoczCQGRnZ7NhwwZmzZrFzTffzLhx4zj22GNZvnw5zz33HOBc1LV06VJiYmKYOXMms2fPJjs7mw8++GBAY91yyy2o1WrGjBmD0WiktLSUlJQUVq1ahd1u57jjjmP8+PHccMMNREdHgwSvP/M6leWVvPiCczVqcnIyL774InfffTebulbF93RdT+krPlfi9J577uHmm2/m3nvvJT8/n3PPPbfXmrI6nY4777yTCRMmMHPmTNRqNe+//36Px6ampvL111+zdu1aDjroIBYsWMBll13G3Xff7bH/P0+SZHerJO+jo6ODjz76iJdffplVq1YxY8YMzjvvPM4880wSExMHHMSzzz7L//3f/1FVVcVBBx3EokWLmDp1ar/nvf/++5x//vmcfvrpfPbZZ26NZTKZiIqKorm5mciuGiXC0Dja29l1xAwc7e3k/boOdQ/FpAWnZ78v4P++3cGZk1J58tyJSoczZCd+ciLlreW0776CYzIP48WLPddxVhDc0VvTMdGMTFmmDguHvXcYktrMm8d/xKSkvrvn+rt/7/o39/58L7bWHDrKLue3u47B0FyPNjVFNCDrgd1hx+KwEKrpvQmdtaYGW2UloQcd5MPIBqakuYTzvjoPjUrDj+f+KL7XQtB75JttvLCyiAunpfP3M8bzReEX/O2nvzElcQqvnfBaj+eUXHghHb+uJ/kf/yD6LP9e4PN7eROnLV5FfHgIv949+8ADNn0A/+7qhj7jFjjmHt8G6EXXLL+G/5X/j7sPvdurtVptjY2oo6ICrglWU7uFiQ9+B6p2IkY9CMCaC9Zg0B5YV1XwnM7OToqLi8nKykLv7/WlBaEHff0Mu5ubHNCr5bp167jqqqtISkriqaee4vSubslLlixhwYIFg0rYfvDBB9x0003cd999bNiwgYMOOojjjz++365sJSUl3HLLLcyYMWPAYwqepTIYyFv/KzkrV4qEbT9GGp3/PsOlPMJd0+5iTMhc7J3J3U+gBcGnetjyZG9tw9HcjH7cuAO2PAm+ERmqQ213zgnWlm9TOJqh625CZjGSEBFCaFUZhbNnU3D0MW53CA4Wz296nkPfPZSXfn+p12PafllDwcwjqbjlVh9GBp07drhdygkgMyqT1eev5sszvhQJW0EAxnSvtO0qj9BVv7q3mrYAkcceS9TZc9Clp/V6jL9oeOVV5uz6nomGXl4nDjoXTupqavPjP+Gnp3wWm7d1N12M9l7TxepHHqHgyKNoW7XKa2N4S7RBR0JECDgMRGqd2+ZLTCXKBjVIDW+/Q8Pb72CtrlY6FL/lMJuxNzfjMJuVDkUQ3E/aTpgwgb/85S/ExcXx888/s2HDBm6++eYhT2KfeOIJrrjiCubPn8+YMWN4/vnnMRgMvPrqq72eY7fbmTt3Lg888ADZ2aKbrz+QJAltovvbXtqsbWyrD/wb+YHKSeiqaVvTOixu9I9IPQJH00xwGMR2YUERPW15Mu/YTsMbb9L5xx9EnnySQpEJUZpUAP6o3aVwJEO3N2mbQF5iBJbyctBq0SYliWTen4RrwzHbzX0mcfTjxiFptagjIrC3+uYhpt1kovj0M9gx5RDsLS1unydJEtH6aO8FJggBJL8rabu90oTDIZMVlcVNk2/igcMe6HVeGztvHil//zuGAdRnVIJstxP9xQdcvuUrJtobez9w6hVwzH3Ov//3PtimYE+XrUuhoXjIpRrMdjMVrRWAt0vBSMgWC60//ujFMbzHtUAlWuuc3wRqLef6V1+h+u9/x1IU2HV5vclWU4OlrAyHyaR0KF5hc9iGRS4iWLjdynPHjh2ce+65zJo1a0Cd7PpisVhYv349d955Z/fnVCoVs2fPZvXq1b2e9+CDD5KQkMBll13Gj/286JvNZsz7PCExDdNfvEDSbm1n2rvODqerzl9FpC54ylRkxIWhUUm0WexUmTpJjup962ggkGWZndXOG+7cBLHSVvAPhsmTib/mGkInTiREPNhTTIohg8bO1ZQE6E3Nvly16xxmIzkJ4UTMOpTR63/F1tikbGB+yLXyrq+bWXV4GLmrV6MO913zSmtFBeqoKFQREc5O5oIgDFhWfBg6tYo2i53yxg7S4wzMHzdf6bA8Qrbb+emwMzD8vp6I/sr0zbgJzCao2QY5x/gmwD9rrYUPL3b+/fYSCB1806RSUykO2UGENoI4vfd6jsReMo+IE44ndOJEr43hTXmJEfxUUIfGngj80efDSX8l22zE/OUvtK/fQOiECUqH47dUoaHOnTkat9NlAaXUVIrZbiYtIo1wnVh45e/c/iksKiri9ddf5+qrr6ajo4Pzzz+fuXPnDmmFSV1dHXa7/YCyComJiWzvpaHVTz/9xCuvvMJvv/3m1hiPPPIIDzzwwKBjFPpXu/hZ7I2NRJ9zDvpRef0eb9AaSDAkUNNeQ3FzMQcZ/beenadp1SrS4wwU1bZRWNMW0EnbdVXrKGqsxGQ1oZJiyDb67uZbEPrz56YCgu/lxmazZQ/UmsuVDmVIZFlmTNwYaprttFoSuncVSDrdgHaYBAvXKq3SllKsDitalbbH43yZsAXQ5+eT+8tq7I19rKD7k7+u+CsAN0y+QTQiEwSc89jcxHC27DGxtdJEepz79TxtdXVIISF++9BEpdPxafp0igwTeDvFjQToMfeBww5qhZI6Zb84/0wYM6SELewtb5EVleXV3SPa5GS0ycleu7635XW9/3e2x4E6MFfaShoN8VdfrXQYfk8TH48mPl7pMLxClmUsDgsO2YFGNTyT0sON2+URUlNTueuuuygoKOCtt96iqqqKww8/HJvNxuuvv87OnTu9GScALS0tXHTRRbz00kvEu/lLdOedd9Lc3Nz9X1kAdC4NNKavv6bxnXew19e5fY7r5icQ3+yGylXXtqDG/e2Z/uijnR/x97V3oY38nYy4MPRatdIhCUKPHG1tWKv7rpMueN7ExFEAtMl7AnoLliRJPD7zcbRVtyLbIkX97n4khiUSqgnF5rBR0VLR7/GyLPvs50OSJDSxsW4d65AdrNqzihVlK1BL4v1NEFzyu+vaOncvVrdV893u71hTuabXcypuvoVdR8zA9M03PolxMCw2B7sb2gHISXBj5Zkk7U3YyjIsfwiKfvBegH+2u2tXavr0IV/KdT/my4dTss2GYwA1xv1Bbtf7f31jNBCc97FKCeR5pL+RJIm8mDxyonPQqXVKhzPseeJnd1BtG48++mjefvttKisrWbx4MStWrGD06NFMGOAS+/j4eNRqNdV/KoJdXV1NUlLSAccXFhZSUlLCqaeeikajQaPR8Oabb/L555+j0WgoLCw84JyQkBAiIyP3+0/wrPirriT2sksJyet/la1LVmTwJm1dE8HC2jaFIxka1/fObjG6N7kVBAW0b9hI4amnsue228SEz8empechyyqQLJSa9igdzpC0mW1UNHUAkK2xUHb1QmoXLRY/Uz1QSSoyIzOB/t/j99x1F7uOmIGlh/mb0mRZ5pmjn+GOqXeQEp6idDiC4DdGJzkTV66k7YqyFdz0w028ve3tXs/RpqSAJGGtrPRJjANlKa+g+MtvUVkthIdoSIwMGdgFNrzpbEz23gVQts47Qf5ZaeAmbZs++ZSCY4+j6cOPfDKep7hW2tY3RQOw27Qbu8OuYEQD17lt24AacipNq3Xu1mlvb1dkfF8+WPYllaQiRBOCShpUOlAYANfPrutneTCGtB46KiqKhQsXsnDhQn777Teee+65AZ2v0+mYPHkyy5cv54wzzgDA4XCwfPlyrr32wK2to0ePZvPmzft97u6776alpYWnn36atDT/70o6HEWdfjpRAzzHNSkIxFpAQ+VaaVtY65vmK97gkB2UNJc4/25O6J7ECIK/0RjjsTc0YpVU2Gpq0P6pHI/gPSlRYWCNB10Na8u3kxGVqnRIg9JubWdndScA8eEhhOwupOb777EUFYkyHL3IjMpkW8M2ik3FzGJWr8fZKiux19fTtvoXQnJyvBaPtbqGihtuIHTiRBJuu9Wt7b9qlZrDUg7jsJTDvBaXIASiMa5mZFXOHWN5MXmMjx/PyKiRvZ4Te+l84q66EnW4f84Xmz/7DMfixdyaMoGv5lw/8BIBB50HW/4NRd/DO3Pgkq8gabx3ggUwt0LlJuff06cN+XK+Tto6OjuwVVZi+vprYi+c65MxPSFCryUlSs+e5mi0Kh1Wh4WK1grSI9OVDs0t9uZmis88C0mvJ/d/K1EHwGI2tVpNdHQ0NTXOHXMGg8FnDWCtdXU4TCbUsbFooqN9MqYwfMiyTHt7OzU1NURHR6NWD37XlkeKWJjNZlasWMHSpUt54YUXBnTuTTfdxLx585gyZQpTp07lqaeeoq2tjfnznUXtL774YlJTU3nkkUfQ6/WMGzduv/Oju36B/vx5wb+5JgWuxF8wGdlV+zWQk7ZVbVV02jtBViNbY0QTMsFv6dLSSHvhBULHj0NlcL/2njB0kiQRJiXTTg2bqnbyl7EKNWsZood+eYhlxd+hjT6Z3NgTCclKJ/Guu5A0Yst8b9wtgRS/cCHxV1+N/iDv1rbv/GMzHRs34mhrQ5Ju8+pYgjDcucojlDa009JpZXLiZN49+d0+z9HEDK3mqrepDAY6o+NZm5hP7mB2j2lC4Lx34K0zoWyN88/5yyDeSw+jKn4F2Q5RaRA9tEVLDtlBiakE8F3SNur0M1CFhRF54ok+Gc+TchMj2NPcSYw2lRpzMcXNxQGTtLWUlqGOiUEdFRUQCVsX1w5sV+LWV+wmE47WVlTNzaiHUdK21dqK1W7FoDUQoh7grgJhwKKjo3usIjAQbidtzWYz999/P9999x06nY7bbruNM844g9dee4277roLtVrNjTfeOOAAzj33XGpra7n33nupqqpi4sSJLFu2rLs5WWlpKSqVWLbtryylpSDLaFNTkQbQXdHVXbqspQyr3YpWPfjl4oFmZNdksNpkxtRpJVIfeP/v3Tfi1nhALcojCH4t7NB+ukALXpMQmkaJbRO7mgJ3V0WpqRSbbEa2G8hLDEebkkLsRRcqHZZfc3c3jWHKFF+EQ+hBB5Hy+GMwgG2Ay0uX025t55CkQ0gKG9pkWxCGk5gwHUmReqpMneyoamFKpnt1ov1Z3KXzeUB/ECt+K+e2wc5pdWFwwYfwxilQtRnePB0uXTbkpGqPSruakHlglW2nrZNZabMoNZUyImLEkK/nDnV4GNFdu2wDzaikCFburGWUdi7/PDqPUTGjlA7JbaHjx5H78yrsDQ1KhzIgkiSRnJxMQkICVqvVZ+Na9uzBERqKLiMDVcjwSW7e89M9bKrdxHWTruPYzGOVDmdY02q1Q1ph6+J2lu3ee+/lhRdeYPbs2fz888/85S9/Yf78+fzyyy888cQT/OUvfxl0QNdee22P5RAAfvjhhz7Pff311wc1puAZdc8uoXnpUow3/JX4BQvcPi/BkIBBY6Dd1k5ZSxnZ0dlejNK/ROq1JESEUNNipqi2jYlp0UqHNGCupK21Mx5JcrNhgyD4gdaVK1GFhfksWRTsJsROY8cWM3rtwUqHMmhvnPgGF7+5jJ/bzOSIJmRucT2YLW4uRpZln21l7I0mPp6o004b0DlvbnmTDTUbeHTGo5ycfbKXIhOEwDQ6OYIqUyfb9knaWh3W7tVbPWn5738xff014bOOJurUU3wZrlt21bbhUKnJMQ5hThsaDRf+G147Eep3OVfcXr3KuRLXkw65AhLHQphxyJcyaA08NvMxDwQ1eLLNNqDFP0pyrcRuashgUsIkhaMZOEmS0MTFKR3GoKjVao8kwNylzx6e+YmNjRuptlSTFpuGXq9XOhzBDW4vOfjoo4948803+fjjj/nPf/6D3W7HZrOxadMmzjvvPJ/+Agn+Q5YdSCEh6LIG9qImSZLb2yeHo+66tjWBWSLB9T1zWBJIjzWg14rff8H/NX3yCWVXLaDyrrtxmM1KhxMUZqYfiqXuGBrqA7fmvEaloazGAA49uTEhtP38M7YAW6XiaxmRGUhItFhaqO+s7/NYc1ExdS+9hGnZMh9F5x5fbxcWhEDiKpHgakb2xK9PMPXtqbyz7Z1ez+ncth3T19/QtmqVT2J0l7W6BrtDpqirbNmQFyKEG+HipRCTBUfe5vmELUBYHOSf6pGVtkpq37CR3RfPo/qxx5UOxW15XQ9vd1a3KByJIAxcm7WN6vZqQMxvAonbSdvy8nImT54MOOvHhoSEcOONNyq+ekJQVurjjzNq4wYiju690UhvgroZWYKzrm1BgNa1dX3PHGajqGcrBIyI449Hk5JM+DHHgMOhdDhBYd/Gi4HafbfdYqO8sQOArLYaSi+9jMITTwrY/x9fCFGHkBrubDzX34PZtp9+pPZfT9D00cdeicVWW4vp66+xlFe4fU5TZxMNnc7EfGZkplfiEoRA9uekbYQuApts6/P3PfyoozDeeCMx553rkxjdYWtspGDWLHadehp0dqDTqEiL9UD9+6hUuGYNTDhn6NfysqbOJmwOmyJjO9rbaV+7lubPP8dhsSgSw0DldjVfrmtr5fXN7/PU+qeUDchN7Rs2UnzOudQ9P7D+Q8GufcMG6p57jvb165UOxSNcD6Rj9bFEhQy0lbygFLeTtna7HZ1O1/2xRqMh3E87gAq+JalUSNqB12Xdd/tksMkZRittXZMXQfB36vBwRn71FYm33YoqNFTpcIJCRpwBra4Rs24Lf1SVKR3OgP2n5D/c/P0dqMM3ExemI9zSgTYjHf3o0eKhdT/c3U0TdvjhhB9zDBHHHeeVONpWr6bippvZc8stbp/juqlJCkvqdau3IASz/CTnA/sdVS04HLJbCzFCx48j/qorCZ040RchuqVz82aQJCyyhFkTQnZ8GGqVh17b911h21IFX94I1s6hX3fbF/DDo1C5aejXAm5eeTNT35nK8tLlHrneQIQdfhgJt95C9tLPUO2TZ/BnBp2GtFjnHPKJDf/glT9e6X7I58/a162j8/ff6dy2TelQAkrzF19Q+/QztH7/vdKheERRV48Jsco2sLhdPEaWZS655BJCuoowd3Z2smDBAsLCwvY77tNPP/VshMKwFdTlERL2rj4LNM3m5u7trg6LkTyRtBUCyJ+Ttf5Qb3M406pVRKR9ilW3i2VFSYxPnqt0SAOyrmodP1V9i0p/JDnhRxI27VByvv0W2abMqqRAcs3Ea7j6oKsZGT2yz+NCRo4k7dnFXotD0unQjx9P6GT36yq75iWuh8uCIOwvKz4MnUZFu8VOaUO739Wxdlf4zJnkrfqJ977+FX43d8/PPcrhgHfOdjYna6mCc96EoTRg/v1D2PY5qHWQfNCQw6torcDqsJIQmjDkaw2UJEnEXXaZz8cdqryECMoaOhgTOYuDR6TikP1/91bUGWegTUlGYxx6HeRgEjZ9Oo62NvTjJygdikeI+U1gcjtpO2/evP0+vvBC0Tk52DV/+RXNX3xO5HHHET1nzoDPdyVtK1orAmqC5wmuLcO769ux2h1o1e53tFaaawUStihwhIjyCEJAstXWUv3IIximTSPmHP/fvhjI4nVZlHe2UNviu46/nlJscu0qMO63qyBQGqYoaWz8WKVDACDyhBOIPOGEAZ3jWi0oVqIIQs80ahWjEiPYXNHMtkoTs8ekoZbUtNvaqWmvITEsscfzHJ2ddG7bhiokBP2YMT6Oumfq6Gh+1xmB8u4mUx6lUsHxjzgTtzu+hs+uhjNfdH5+oGQZSlc7/55xmEfC+/qsr9nTuocEg++Ttn8WKPeDeUkRLN9eQ450ObcdMl7pcNyiTUwg6tRTlQ4j4EQedxyRXtoJpARRrz8wuX3X8dprr3kzDiEAdfz2G20r/0fIyJxBnZ8RmcFXZ35FanhqQLxBe1JylB6DTk27xc7u+vahNz3wIdcTOpvZiCTtTUALQiAxfbPM2RDl59VEnXqqKJfgRcckXsFzPxSiS0xXOpQB6y4FY07obj4ieIejvR3zzp1+sXXa9X3PihQ3NYLQm/zkvUnbE8cnkxaRRomphGJTca9J24bX36D2qaeIPOkkUp/4l48j7l1BjYeakPUma4Zzhe37F8DmjyAkAk5+AgZ6/9NQBG21oA6BlEkeCU0lqRgRMcIj1xosa0UFdc+/gL2lhRFPPaloLO5w7TLcWR14OyaF4NY9vxFJ24ASOMv7BL8TffYcku6/n8jjB/f0SaPSkB6Zjlql9nBk/k+SpP0a9AQSCYnE0DQc5kTSYgyE6oLv+ycEvpgLzifqrLNIe+VlkbD1sr01vNsUjmRgWi2t1LTXAF0rbVUdFJ58Cntuv100IXODQ3bw3vb3+Meaf9Bube/zWGtNDTsOnUbJhRfhaPPcz4lssw3qeyVuagShf6OTupqRVbUAkBmVCeytmdgT/bhxqOPjUUUq/xCs8oEHKP/rDXT88Qe7vJ20Bcg7Hs56EZDg11fhu3udK2cHwrXKNvXg/WvmBjjZZqPp449pWbYMS3m50uH0y7XLcGd1C40djRQ2FSocUd/a162j+cuvsNbUKB1KwLK3tGBvblY6jCGxOWxipW2AEklbYdD0o0cTc965hB409HpKwWik0VkPOtCStqfnnM68tCWYq0/xzjYyQfABSaMh5R8PEzrWP7ZwD2euGoG7ak0Blex0TWwdtghwhJLeUI6lsJDOrVuDbnfIYKgkFc/99hzvbX+P0pbSPo/VJiR0/2epqPBYDE0ff8KuI2ZQ+8wzbp9jsVsob3UmDbKjRc03QehNfnJX0rbSBLjXYDjs8MPI/fF/JN9/v9fj64tstWL66mtavv2WhnoTLZ02VJKzVq9XjZsDpz7t/PvPz8DaFwd2/u6upG36dI+E8+62d7l15a38r/x/HrneYOkyMjDeeCMZb7+FNjVV0VjckZMQjkqCFnkXMz+cyTXLr1E6pD41vv8Be265haaPPlI6lIBU9fA/2HnIVBrfe1/pUIakorUCm8OGXq0nOSxZ6XCEARBF2QRF/VL5C//e9W/yY/O5ZNwlSofjU66Vtq4tWYHEuR1IIldsFxaGCVtdHSqDAZVBdIr3tJHGMEIznqdTX8EfNVmMT8xVOiS37C2NYCTGoCXxsKlEvfiCaEI2AGflnoUDB2Ha/hMhWZ98jDo62qPjd2z+HXt9PbLD/SYxpaZSHLKDCG0Ecfo4j8YjCMNJfrJzDlje2IGp07q3wbCp96St3zzw0mhIf+VlWlf+j11JI4FfSY81EKLxwe6xyfPA3ALrX4fRpwzs3FLPJm3XVK5hRdkKJiV4ptTCUMRfeYXSIbhNr1WTERdGSWM8AHta99Bp60Sv0SscWc9CcnPRjxuHYcohSocSkLRJznIv1spKhSMZGte8NjMqE5Uk1m4GEpG0FQbFVltL5/YdhOSMRJs8+Cc1la2VfF38NQ2dDUGXtHVtwSqsDZwtw65VcrtqnFvhxEpbYTgwffMNlffdT/Rfzibx1luVDmfYidBr0WlsOFRW1pRvC7ykrcVIbkIEmqgowmfOVDiqwHLD5BvcPtbTCVuApHvuIfrss9HEuZ98dSWcsqKy/CfBJAh+KNqgIzlKT2VzJzuqWvYmbftYabsvJZtOSZJE6PjxhI4fzzerSwAvl0b4s8OuhSmXgm4AD4o7m6G1BpAgbapHwhBNFwcvNyGc4rpW9KoIOh0t7DbtZlTsKKXD6lH8gquIX3CV0mEErOizzyZ6zhyvzFN8SdTrD1wixS4MStsvv1B2xRXsufW2IV3n4MSDueHgG5g/br6HIgscri3DRTWtAbNluKi5iMPfO5ytjn8CiMY8wrAg6fU4TCba1/2KbLUqHc6wFKl2bnf8o6ZA4Ujc57qZdZiN5CSKB1S+5Kn3RJVej2HSJHTp7jfBc9XjdNXnFAShd/uWSHAl/mraa2iz9r4goe3nnymeczZ7bvGPh6SuHW8jfb0QYd+E7R+fwNalfR+vj4I7dsM1ayA0esjDWx1WylucpWD8JWnraG+n/tXXKL38igHtkFCC8x5IIpQkwP2HFULgUUdFBXzCFmB8/HguHnMxR6YdqXQowgCJpK0wOJIK3ciRhIwa2hPFjMgMLht/GYelHOahwAJHRpzBWQ/JbKO2xax0OG4paS6hxdqCDWdTmZEJXq79JQg+EDFrFiOWPEvmu+8gabVKhzMsJYc5k2bFpt4b1PibvSttExgVpaHx/fdp37gxYB6y+QNZlqnrqGNz7Wa3jq9/+WUKTz6Flu++83JkvQvXhZMTnUNeTJ5iMQhCoHCVSNhWaSJSF0l8qHO7eJ8JLI2Gzi1baN+4wRchHqBzx05qn1lE544dwN6kratpps+VrIKPL3P+V/Dfvo9VqcHomdWcZS1l2GQbBo2BREOiR645VLJDpm7JEtp++om2VT8rHU6f8pKcP/t2SwLgv0lbW2OjmLcIAExJmsKth9zKydknKx2KMEAiaSsMStQpJzPyqy9JuudupUMJWCEaNemxzqfsgVLXdsaIGdx/8Kt0Vp1OWmwoBp2osCIMDxFHH42kET/P3pITPRKA6s4yhSNxj9Vh7W6e5TAnMKqjhqr7H6D8uuvFlvkBKG8tZ9aHs7hk2SXYHfZ+j7dWVWMpLKT9l1+GPHbL8uXUPf88ndu2Dei8uflz+ffp/2be2HlDjkEQhjvXStutlc6yWe6USAgdO5bUJ58g4403vB9gD0xff03dkiXUPfc8sHcOrlifhvRpMOZ0cFjh/Qv3Nhvzsu6t0n5UCkYdHobxumtJ/vtDGA6ZonQ4fcrr2oFjao4G/DdpW3rxxew6YgYdmzYpHUpAa1nxPXvuuBPTN98oHYoQhMQdqqC48pZytjdsJz0yPehWtuQkhFNS305hbSuH5cQrHU6/dGod7W0JODrTyM0UpRGE4UeWZZo//ZSwww9Hm5SkdDjDxkGJeXxRBe2OSkXrGLqrosXZYVd2aJFtkaTFhmGfMQN1TLTSoQWUlLAUdCodFoeFPW17SItI6/P46DlnYZh6CGFTh16vsfnLL2n5Zhmo1Ojz84d8PUEQDjQ6yZm03VnVgt0hc/Pkm9GoNGREZvR6jiosjMgTT/RViAcInXgQEcfOJvLEE2nusFLTtdttpFGh3WMqNZz1EljaoOA7ePccmPcFpEzce4y5FV48EkZMhVOfAk3IkIfdN2nrT2LnBcYDs6z4MNQqifb2OAxxfTfgU4qjrQ1LWTlyZyfatL7ff4W+dfy+iebPPgONWtHXr8Fqt7azvWE7WVFZxOhjlA5HGCCRtBUU9+ofr/LRzo+4YvwVQZe0HWkM57/bagKqGdmu6q4mZKLGozAMVf/jERrfeouIY2czYtEipcMZNqanj0L+TQJVB5WttaREJCgdUp/2bUIWqdeRMm0y0vQXFY4q8KhVajKiMtjVuIvi5uJ+k7b6/HyPJVjDZ8xEklQDWq1ld9hRSSq/f6ggCP4iKz4MvVZFh9XO7vo2xhrHKh1SvyJmzSJi1iwA1u9uBCApUk+EXsHySBodnPMmvHM27F4Fb58F87/ZWwqhfC3UF4DN4pGELfhv0jZQhGjUZMWHUdjonM+UNJfgkB2oJP/ZyKwKCyNv7RrMO3aiiY1VOpyAFj7zSCSNFsPUQ5QOZVC21m9l/rfzSQ1PZdmcZUqHIwyQ/7yqCAHDUlpK4QknUnHTzR65nmuyUGIq8cj1AsnIrvpZgVAeQZZlHlr9EKvrPgHJQm6CWGkrDD/RZ5+NKiKC0EkHixpgHpQWEwG2OAB+KduqcDT9SwlP4QjjHKzNB5GXGCGSeEPg6lLs662j0WedSeoT/8IwaZLb56ypWsOh7x7KTT/c5MXIBGH4UKskRiW66tq2uH2erbGR5qVLaXz/A2+F5pZCVz1bXzch64nOAOe/DymToL0e3jwdWmucXyvtKhmTPs1jw/lz0lZ2OGhZ8T1lC6/B3uq/C1vyEsORrTGo0NBp76SyrVLpkA6g0ukIHT9O6TACnuHgSRivvcYjO4GU0G5rJyUsheyobKVDEQZBJG2FATMXFmIpKcFc5JmGMu7UvxquXJ1qC2v9P2lb017Dhzs/pEr9Kcgqcv1hgisIHqYflUfO998Td+l8kajzIEmSCJOSAfiteqfC0fRvVOwocjTnY204ktyEMGSbTemQAlZ2tPMGwd33eFtdHY0ffkjj++97M6weFTcX02HrcKv+riAITq4SCdurTFjsFt7c8iYPrn4Qm6P3101LSQl7br+D2sWLffqAtPXHH7G37E0uF9T6UdIWQB8JF34KxtEQOQJ+fc35+dKuOrcZ051/rnwcvn9k0MPIsrw3aRvpf0nb2sXPUvm3v9G6YgXNSz/b/2tLllC7aLEygf2JcwGLGoPkLKcVjPeyQmCYOWIm3579LYuOFrsIA5FI2goDZpg8mfRXXyHhZs+sRNl3pW1fE7zhyFU/q7K5k1azf/+/u2o1OSyxgMZ/JriC4GHqcIXq2g1zxhDn1vhdDZ554Odtu6qdN/NjVW3smDyFkrkXitXXgzDQlbad23dQde991L3w4qD/vS3lFdhbB/4w9JxR5/D5GZ9z/cHXD2pcQQhG+cmulbYmNCoNi39bzEc7P6K8pbzXc/T5+YROmUzUySchW60+idNaU0PZFVey6/AjsDc3A3t3uo30pzmtIRYu+w5yj4Uf/uFMzpb/6vxa+vSuhO3Dzlq4g1TXUUertRWVpCI9Mt1DgXuOpFFjb2oi9OCDiTjqqO7P1y5ZQt0zi0DtHymMUUnOn33ZagT8K2nrMJspW3A1dc+/gGyxKB3OsOCwWOjYsgXL7t1KhzJo6iG8bgjK8Y9XPCGgqCMjCTvsMMJnzPDI9ZLDktGr9dgcNipaKzxyzUARbdARH64DoNjP69q6JiJ2i5HU6FDCQkRJbGF4MxcVs3v+fDo2b1Y6lGEhs+sB3Z72UoUj6Zssy2yo3sCO2j0A5Jgqkc1mHB3tYvX1IAx0N41h8sEYDjmE6DPPhEEmc6oefICdh0yl6bPPBnSeVqUlKyqLkdEjBzWuIASj/GTnStttlS2oJBXnjjqXqyZchV6j7/UclV5P5ttvk3jnnah0Op/EaauqQpedTUj+aNRRUcDepG2O0Y+StuBccXvU7TDrLlj5KFjbQVLD1s+dCdtZd8GRtw368kXNzoenaRFp6NS++fcfCOPChcRffx0dGzbQtHQpsDdhG3/9dRgXLlQ4Qqe8rv4eLS3OerH+lLTt3LyZ1h9+oOGdt0GrYL3mYaTm8f+jZM7ZNL7n+51AQnATWRdBcSpJRWZUJtsbtlPcXNxnx9nhKNsYTl1rAwW1LYwfEaV0OL3qbsxjTuiepAjCcFb/wgu0r/6F6kceJfPdd5QOJ+CNNeawsgGabb2vvvIH9Z31zFs2DzlWgtoHyTjtROKOnIy9xf/L2Pgj13t6o7mRxs7GfrsWq0JDyXjrzSGNaW9oBFkmJFvUbhMEbxvdlbStaOqgud3KzVM80/PC00InTGDk1191r8LvtNopa2wH/Li57pG3wbqXnLVtZYdz5e0QE7aAX5dGcHElZuueWUT9c88jW61+lbAFyIgLQ6uWMLfHExqDXy0+0qalkfi3vyHb7eKBs4fo80ejioqCAPv37LR1ctKnJ5ERmcGS2UsI1YQqHZIwQGKlrTAgsizT+MGHtP3yi0e3WijVqMQfuMoMFNYExkpbh8VIbqJoQiYMfwm33UrkSSeS8vhjSocyLBw6YjQAdlUjbRb/fb2r76gnITQZ2RpLhC6UpOhQdJmZopHHIBm0BlLCUgDfvcdnffwRuat+Qp+f7/Y5rZZW/vbj33h588s4ZIcXoxOE4SUqVEtqtDMJsL3KNKBzZVnGWlPjjbB6pQ7f209CliHaoCUuzP9Wm3a78FOQVIAMat2QE7YAY+LGcNm4yzg289ihx+dFxoULkbRaZwkNSSL24nlKh7QfrVrFSGM4ttZ8Hpj4Hs/Pfl7pkLppExOJvfgi4uZfonQow0bU6aeT98tqEm8f+u+gL+027aa2o5ZdTbvQq3vfASH4L5G0FQbE3thI1X33UTr/UmS75xp1uLZPurbrBJORxsBoRub63jgsRtGETAgKmrg4Up94At2IEUqHMiyMT07Bsuc82ooX0tDmv0mxUbGjuHH067QV3kROYrhYoeIBg2k4KtvtdGzejOwY3M+KJi4OaQBbQoubi/mi6Ave3fYuKklMjwVhIPata+uQHVS2VrKpdlOf51hKSth56DSKTj3N6/XC7a1tB4yxb2kEv36d3/GNc5WtWgd2i7Om7RBNME7ghsk3cNrI0zwQoPfULlmyt+axLFP92KPKBtSD3MQIcOipbgz1758jYcgkjSYgv8f7rqwPxPgFkbQVBsjR3kH4rFkYpk5FFeq5pfVZ0cG70tbVjMw1efRHbdY2atqdKyEcZrHSVghO1qoq0YhqCLRqFam6w3F0prO7zqx0OH1yNiFTMzZSTfVjj9O8dKn43g/BQJO2ssNBwTGzKfnLOZh37vRmaN1cDyZdsQqC4L5969ruatzFcZ8cx9X/vbrP101tSgpyRwdyezs2L6+2rbz7bgqPmU3L9993f67QlbT154UIrqZjs+6Ce2qdf37/sEcSt/5u3xq2mR99RNTZc2j+6GNqlyxROrT95HX9/OyqblE4kr2s1dW0/vgT9lb/3dUk+I6Y3wQ+kbQVBkQ3IpW055aQ8cbrHr3uvuURgu3G2LXStqS+DZvdP1eflTSXAOCwhYPD4N8TXEHwgvqXX6bwuONp+fZbpUMJaK7Xu4Ia/7m56cmurvjGW2ppeO01ap58SqxOGILupK3JvaStpFIRkpeLKiICa/nAaiDvuesu9vztLsxFA3sI3L0SRdzUCMKAjU5yJm23V5nIjMpEQqLF0kJ9Z32v50g6HVmf/ZtR639Fm5jotdhku532deuw7tmDJj6++/MFtX6etN03YesqiXDkbUNO3JrtZtZUrulejOGP/tx0LHT8OFL+/nfir7+OumcWUfPEkx7d8TkUeUnOhSwbGr/jph9uYnnpcoUjgpb//peyK66g4q9/VTqUYaf5888puWAu9a+8qnQobhPzm8AnGpEJfiEjMgMJCZPFRENnA3GhcUqH5DOp0aHotSo6rQ7KGjvIig9TOqQDdJdGMBtJjQ4lPES8dAjBxdHegWyx0PrDSiJPOEHpcAJWclwH2uhfWFFRzCVcq3Q4PTrvy/PY2dGJpD2LlLTRxMydi8pgUDqsgOa6URhIkiDlkUdQR0Uhadx/v5FtNkxff4Pc0UHcpfMHFKO4qRGEwXOVR9hR3YJG0pEankp5aznFzcXEh8b3el7IyJFej01Sq8n573e0rf4F/dix3Z937XAb6a9JW4e956Zjro8dg0taFjQVcPl/LidWH8vKc1cOMUgvsTt6bDpmXLgQR2sbjR9+iL2piaQH7lf8gWpe1+7DGnMh3+1exYiIERyTfoyiMUkqFdoRIwidfLCicQxHtoYGOjZsQBMXq3QobnPNb7KjRHPWQCUyL8KAyLLslTdHvUZPSngKFa0VFDUXBVXSVqWSyI4PZ2ulicKaVr9M2u5tQpbgvysSBMGL4q66kpDRo4g41r+bdvi70LA69MmfsbUtFfwwadth62BL/RbQguzQk33IeJKOnap0WAFvgnEC35/zPXF699/bNXGDmAfIMqn/+iedf2xBlzWw5KtrFbBI2grCwGXEhRGqVdNhtVNc10ZWVFZ30vaQpEOUDg9VaCgRR8/q/thmd1Bc59w6nmP003ntrDt7/9oQmpG1W9vJiMwgyZA06Gt4m/G63ucHhoMn0fDaa7T+73/Y6+v3Wz2thPRYAyEaFebmsVx/2CHMzpymaDwAMeefT8z55/vNauThJOKoo9DExaEfFxjNaR2ygxJTCSDmN4FMJG2FASk6+RQktZrUp570+NPxnOgcJCQ6bB0evW4gGJngTNoW1LYyG+9tERss14u9w2wkL9FPJ7eC4EWqkBAijztO6TAC3pSUfN78YzQaOU3pUHq027QbAIctjDB1JClRosuuJ4SoQwgJDfH6OJJWS8TRRxNx9NEDOs/qsFJmKgPEShRBGAy1SiIvKYJNZU1sqzSRFZXFjxU/9lvH2tHeTv3Lr2DetZPUp59GUvmmcl9pQztWu0yoVk1qtOd6dASCQ5IO4cszv8Qh+2dJtv5EzJ5Nyj//D8PEiYonbMH5s5+TEM6WPdnkGyYzNs5/kuGSWq10CMOOLjMTXWam0mG4rbKtErPdjFalJSU8RelwhEESNW0Ftzk6O7EUF2PetQt1dLTHr//M0c/wzZxvmDlipsev7e9cT/kL/bQZWVFTV3kEi5HcBNGETAhussVCw7vvIlssSocScA5NH0lH+SU0VhyDqdOqdDgH2O+1zhiGrbIy6Oqs+xPTN99QcsFc6l56yavjlLWUYZNthGpCSTT434NTQQgEY7pKJGyvMnU//OgvaSvpdNS/9hot3/0XS0mJx2Pq+O03Sq+4kubPP9/v83tLI4ShUgVnvXKVFLhpgKiTT0abmtr9sd1kUjCavSUSdvpBMzKxulbYl2temxGZgUYl1msGqsB9tRZ8TtLpGPndf0h76UXUsZ6v4xLIk4ehGpngLIlQWOufSdsjUo9A1ZmHw5xIrlhpKwQxWZYpveJKqh98iPrXXlc6nIATqdeSEOFccemPD6lcW+Qd5gQmq1spOPoYCmYdLRK3HvBtybdct+I6Ptzxodvn2Bob6diwgbafVrl1vOnb/9C5bRuyzTag2PatZ6t0fURBCFT5yc5mZNsqW/Y2H+wvaavREH/VlSTeczfqqCiPx2T69j+0/fgjrT/+tN/nd3W9//htaQTBbR2//07hccfT9O/PFIvBdW+0oXIbXxd9TV1HnWKxVD34EIUnnYxp2TLFYhjurFVVmL75hvYNG5QOpV+iXv/wINLtgtsklQrdiBHoRoxQOpRhZ29H9Vav1Q0eiivG/pXFn+QDftxlVxB8QJIkouechXnXLnRp4rVwMEYmhFHbXs/GPbuZlB6jdDj72Vu/28gorQk0GjQJCX73mhyIylvK+aHsB0I1oZwz6hy3zok46igkjYaw6dP7PdZhNlNx881gs5GzYjnaFPe3AYqbGkEYur1JWxNZUc4GSHva9tBh6yBU03sJgvgFC7wWU8y556COiSF0wvj9Pu96aBhsc1qbw8axHx9LSngKS45ZQlSI5xPlvtby3+XYm5po+ugjok47VZGSAKO6Vtr+1vki637czRNHPcGxGcr0QGhf/yuWoiIkrVaR8YNB00cfU/fss0SdeSaGg/272Zuo1z88iKSt4Dc6bB0s+G4BJaYSls1Z1ucEb7jJig9DksDUaaOu1YIxwvu1/wZiV41zu09KlJ4IvZgECMEt8tRTCT/qKNSRkUqHEpDsEcsJz/uAz3cfzaXTnlY6nP10J23NRoynzWLUFWdib2hQOKrh4YjUIzBoDYyLc795hzYlhZhz3Evw2hsbMRwyBVtlFZrk5AHF1p20jRQ3NYIwWKOSnImryuZOJEcY0SHRNJmbKGkuIT8uX5GYdJmZxF95xQGfL6gNzqRtRWsFdR11tFpaidANj3JnxhtvQB0bQ8xf/qJYDVdXeYTOtnjUUbv7XWHuTRlvvUXHxt8wTJmsWAzDXeiE8ejHj0eXkaF0KP0SD6WHB5G0FdzWvHQpDrOZ8Bkz0A7whsgderWewuZCms3N7DbtZnTsaI+P4a/0WjVpMQZKG9oprG31q6RtXUcdmytrAchJHB4TPEEYCkmSRMJ2CLKj09hWDTWdZUqHsh+7w05JcwngXGmbkxCOSqdDleQ/TUUC2ajYUYyKHeW162uTksh47bVB7VYRNzWCMHSRei0jYkIpb+zoLpGwsWYjxc3F/SZtbY2NdP6xhbDp05A03r09lWU5aFfaul7rMqMyh01ZOkmSiLvkkv0+Z29tRR3uu+9tanQooVo1NnM8avovC+JNmpgYIo6epdj4wSD8yCMJP/JIpcNwi5jfDA/D49Va8In6116n6t776Ny23SvXlySJx2c8zoenfBiU3ZtHGv2zru3TG57miZ3noI1dSV6QTW4FoT/tGzeye94l2BoblQ4lYByU5EzctToq/KpW7J62PVgcFmSHBj3xQddR3F852tsx/ec/1L/8slvHDzRhK8ty901NMM49BMGT9i2R0N2MzNR3Akt2OCg89jjKrrgCc0GBx2Kpf/ll2tauPaAxU2VzJ20WOxqVREZcmMfGCwTBsKug+cuvKJx9LB1/bPHZmCqVRF5iOA6LEVA2aSsILrIs848j/sHth9w+rH/ng4FI2gpuC58xg7AjjiAkL9drYxyWehj5cfno1DqvjeGv9q1r608aO53JKNkaI5qQCcI+ZIeDqvsfoH3NGuoWP6t0OAFjWtooZFlCVrVT01avdDjd9tazjeegSBUV111H7aLFfpVYDnQ7G3fyeeHn3Sua3WFvbqbi+r9S88ST2Ft678wtOxyDisnqsHJW7lnMSJ1BemT6oK4hCIJTfleJBGddWzebkalU6MeNQ5eRgb2p2SNxWCsqqPnnvyi9ZD725v2v6ZpnZ8QZ0KqD61Z4uK+6kx0Omj76CHtTE82ffebTsXMTI3CYEwDnv7MSc4faxc/S+P4H2JuafD52sJKtVqVD6JUkSRyeejgXjrkQg9agdDjCEIjyCILbEm6+SekQhjXXFq3C2jaFI9nf4mMWc+gjX9LSaiUnQZRHEAQXSaUi6b77aProI+KvvUbpcAJGZmw02KJB28ia8m2cNnqG0iEB+yZtE5hir6N1+XLMO3divO5ahSMbPpb8toTlpcu5Y+odZEZlunWONjmZ8COPRDtiBHJnJ0Qc+D5kb26mYNbR6MeMIe3VV1Dp3H/wq1PruPWQW90+XhCE3rlW2m6vauHU6c7EYH1H/w/n0l960aONk2SbjajTT8fe1oomNna/r7mStrlBOKcd7klbSaVixLOLafrgQ2LnX+LTsfMSw3Gsj0NCTbutner2apLCfFdeydHZSd0LL4DVStjhh6GOjvbZ2MGo8f0PqF28mMgTTyTprr8pHY4wzImkreBXGjob+KroK9qsbSw4yHvdZP3RSFfS1s9W2jZ3WKlulgCdWGkrCH9iOHgShoMnKR1GQJEkCYOUTAeNbKzc5X9JW7OR2LyRJP7tTlAp09RkuHJ35d2fpb3wfJ9f7/jjDxzt7VirqweUsBUEwbNcSdsd1S0cbDySn877iaiQqH7P83Sne11GBimPPdrj13YFaT1bWZYpai4Chm/SFkAdHk7cZZfu9zlHWxuqMO+WwnA2I1OjssdjV1dT3Fzs06StbLEQ31ViRDtihM/GDVaSPgR7XR3mbduUDqVXP1f8TKO5kUkJk0gJT1E6HGEIRNJWcIujvR1Jr0dSeXcbUaullcfXPU6IOoQrJ1w5bIrku8NVHqGiqYN2iw2Dzj9+PV0rEpIi9UTqPTupFoThxlZbi8ZoVDoMvxcfkkaZfSs7GwuVDqXb3pW2RtJHZxM7ZrrCEQ0/rhqXrsSBp4RNm0b2l19gq28Y8LnlLeWEa8OJ1kd7NCZBCEbpsQYMOjXtFjuVTXZyE/tP2PpasDYhazQ3YrKYkJDIiPT/rveeIMsytU88Scv3K8h46y00MTFeGyuvq1mzpT0OdYQzaTs9xXfzCHVkJMbrr/PZeMEufOZMMt9/j5C8PKVD6dV729/jh/IfuOvQuzhv9HlKhyMMQfBkxIQhqX70MXZMnkLD2+94dZyU8BS0Ki1mu5k9rXu8Opa/iQ3TEWNwJkWL/KREwtdFX/PAuuvRRq8Rq2wFoQ+y1UrVgw9RMPtYzEWeTUgNR66GCHvadiscyV7XTLwWa83p2Nszum++BM8a7EpbcN58W8rKsLce+P4oqdWE5OQQdujUAV/3wdUPMuODGXxe+PmAzxUEYX8qlcQoV13bqt5rUPek6qG/U3jCiXRs3jykGMzFxdhqa3v9ekFtcCZti5qcc5OU8BT0Gr3C0fiGvamJ5s8/x1JQSOvKlV4dKzlKT0SIBts+dW2F4UsTG0voxImoDP5bK3Z03GgOTjiYUbGjlA5FGCKRtBXcYtm9G7mjA3Wkd29kNSpN99PfYHyz21vX1j9KJGyu20xx22+odLVBWftLENym0WCtqEA2m2n9/gelo/F7Y4w5ADTZyhWOZC+jdgyd9dMxyNHE7NiErbFR6ZCGnczITADqOuowWUwDOrdswQIKjz2O1pU/eDSmdls7AOkRogmZIHiCq0TCtkoTnxd+zoLvFvDvXf/u9zzL7t1YSkqGnLSt+ee/2DXzSBo//PCArzW0WWhoswCQbfTudnl/U2wa3vVse6KJiSH9tVdJfvjvRJ9xhlfHkiSJ3MRwHBbnbivXv7cvyHY7HZv/QLbZfDam4P+umXgNb5z4BpMSRBm3QCeStoJb0l9+iexvvib8yCO9PtZQVuIEOleJBH9pRrZvY548sdJWEHolSRJJ995D+uuvHVBLTTjQ1NTRAFilBjqsHQpH47Sr2rkqbLq6hfJLL6XwuOMV6f48nIXrwkkIda5CKmkuGdC5IVnZoNVi3bP/LhxbXR01//oXLcuXDyqmt096mzUXrGFs/NhBnS8Iwv72TdqWmkpZtWcVm2o39Xte3BVXkPbSi0SdfPKgx5ZlGUdLC8gyoRMmHPB1V8mv1OhQvylD5ivDvQlZb0Kys4meM6f7Y9lqRbZYvDJWXmIEDnNX0taH97HmnTsp+ctfKDj6GDFv8aHOHTuoe/4FTN98o3QowjAXXO9WwqBJWi0hWb55k+9O2vrwCaW/6E7a+kkzsr2NeRJEeQRB6Ic2NRVtaqrSYQSECckjkO2hSOoONlYWcFj6eEXj2VK/ha+Kf0HSqckL06FNS0ObmIgkSYrGNRxlRWdR01FDUXMRE4wHJlV6E3fVlRivv+6ArYgdv/1G/UsvEzJqFBHHHDOomAxa/93eKAiBJt9VHqHSxG2nHU1SWBLj4/t/jR9MeZM/kySJjDffwFpdgybhwPryBUFazxaCN2m7L4fZTMUNNyKFhJD6r38iqT3bbDQ3MQLHeueDyZr2GtqsbYRpvb+i21pZiSoykpC8PDFv8aH2teuofeopwmfNIvLEE5UOZz+tllY0Kk3QlEIZ7sRKW8HvuCYTrtpLwcSfyiN02DrY0+Zc0eSwGMkR5REEwW321laaPvtM6TD8VohWjdbh7Kq8rmK7wtHAN0XfsKL+KXQxvxA2dSo53/2H9NdeVTqsYclVz3igq5A0MTE91o7TJCYSfc45RJ54gkfiEwRhaEZ3rbStNplJ0udwdt7ZPq+pqE1M6DF55Ura5gZz0jYyeJO25m3baP3pJ1q//x7zjh0ev/6oxAhw6FHZY8iOyqa+o97jY/Qk4uijyftlNSn/97hPxhOcQidNIvKUUwifdZTSoRzg3e3vMvWdqTy+TvxMDAdipa3Qr7bVq2n7+WcM06YRfvjhXh/PlbQtMZV4fSx/41ppW1TXht0ho1Yp97R0t8nZIMhhM5AQFktUqFaxWAQhkDja2yk67TRseyrRxBsJP8L7r5uBKEabSi3FbK9X/gFdakQqIbYcOjpSu2/mJa14zfMGT5dACh0/ntDxg1up/caWN/ih7Afm5M3hlOxTPBKPIAS78BAN6bEGShva2V5p4rCceLfP7dy2jfaNGwmbNp2Q7IElF2W7HaDP1ZO7apxlcIJxpe1Dhz9EcXMxebH+2+3e20InTiT1iX+hjoxCP2aMx6/vKiXXsutWPnjwJPRaz67k7YukUqGJifHZeAKEjhtL6j//T+kwelTcXIyMTKw+VulQBA8QK22FfrX+9BP1L73ss+Y6rifADZ0NNHU2+WRMf5EaE4pOo8Jic1De2K5oLK6Vzg6LUTQhE4QBUBkMRBwzG216OqoQndLh+K3D4+bSuutvxNtOUjoUzsk7j5biK7GZDiY3UbzeedNQkrbtGzdStvAaqh580COxbKrdxK/Vv/psNZQgBIv8ZOfr6NZKE1vqtvDJzk8oM5X1e17tosVUP/gQbT/9OOAx29etY9eMmVT/X+9JlMIgLo9wSNIhnDPqHCJ1kUqHoqjIY4/drxSHw2z22LWNESFEhWpxyCq/2DUpBK+iZud9fDCvrB9ORNJW6JfhkEOIueACwg6b7pvxtAaSwpzbZoNtta1aJZEd76x9pPSbvaumsMNiFPVsBWGAEm68gezPl2I45BClQ/FbE5IykG2RftF4sbyxHbPNQYKtFccl57Hn9ttFMw8vcSVty1rKsNqtAzpXNltoXbEC03ffIcsy9tZWrNU1g47FlTjOjsoe9DUEQTjQ6CRnYnB7VQuLNi7i/tX3s7Zqbb/nhU2bRtjMGWgSkwY8ZusPK7E3NGBvaurx621mG3uaO4HgTNoKB7LV1lLyl3Oof+11j1xPkqTu1ba7qn1zH9f81VeUXDCXxvc/8Ml4woGGOhfxNFmW95ZDiRZJ2+FAJG2FfkUcdRRJ995DxNFH+2zMwda8Gw5Guura1iibyNivCZlYaSsIA6IyGFDpRfH/vvhLDW+z3cyWPXUAHC43YCkooGPzH6KZh5ckGhIxaAzYZTtlLf2vvNtX6KSJGG++ibQlzwHQunw5BUceSdnCawYch91h7y4DFMyNeQTBG/K76tpuqzQNaHV97MUXkf7ii0Qef9yAx0y4+SbSX3uV2Isv7vHrrvea+HAd0Ybg2gWzes9qPiv4jFJTqdKh+BXTd99h3rmThtdew97S4pFr5iVGoAqp4qmt1zD3q7keuWZf2n9ZQ8eGDVjKxPdWCQ1vvsXOKYdQ869/Kh1Kt+r2ajpsHWgkDWkRaUqHI3iAqGkr+KWsqCxWV67uXtofTFx1bZVOZHQnbS0J3U+NBUEYuJbly2n+/AtSn3wCSSWelbpkG8PRxX9Hs76SwobRjIwdoUgcP5T9wO0bbkU/Ih+14VpGnPocsnVgK0AF90mSxOjY0bRaW2mzDuzhpCokhPgrruj+2FpZBSoVurSB/+zsad2D1WElRB1CcljygM8XBKF3Y7qStruqW7kwIgPYu4PLWyStlrDpve8KdDUhc82zg8mnuz5lWckybp58M5eMu0TpcPxGzPnnI7e3E3HccagjPLNAJS8xAtmup95WSHO9BqvDilblvRr5cVddSeikSehH+7bZn+Ck7Zp/2GprFY5kL9c9/IiIEV792RN8RyRthT452tqwt7WhMRp9uurItVUxKFfaGp3lEVyTSyXYHXZKXI3IzKKmrSAMlq2xkT233Y6jrY3mf39G9JyzlA7Jb0SFatFHb0HWVrGqdItiSVvn+4wM9lAyM5OIOCpXkTiCyesnvO6ROUX8gquIvfiiQdUkdD0UzojMQK3yXbMYQQgGI2JCCdOpabPY0cnOhyIDmdPLViuy1YrKYPBYTAVBXM82Py6fRnMjo+NGKx2KX5EkibjLL9/vcw6LBZVu8CuxcxPDkW1RhDVdxrvzTkMteff9RTdiBLoRysyfBAibPp3c1T/7VRO47nq2YhfRsCGW/Ah9av1pFQUzj6R03iU+HXdU7CimJE5hTJznO3v6O3/YMrynbQ8WuxnZoSFen0SUQTylE4TB0MTEkHDrLcRdcQWRJ52odDh+J5nj6Kw6FWuH+93FPc01uXWYE8gRD6h8YigJW9lup23tWmqXLEG221EZDIO6WRL1bAXBe1QqidFdq2072pzdyytaKzDb+3/AUvOvJ9gxeQqN777r9nh7br+d+ldexW4y9XpMMCdtLx13KS8f9zLTkqcpHYpf69iyhcLjT6BtTf/1l3szKjECkKiuyiXJkI5KEumW4Uyl1/tVwhbE/GY4EitthT7Z6mpBpUKb7NutgxMTJvLaCa/5dEx/kR3vnEw2tltpaLMQG+b7ult7SyPEk5cY3F1mBWGoYs47T+kQ/NYh8Sewo6CUuuYwxWJwvd6pOmLIXvMfOuwHoZ8wQdS09QFZlgf071y7aDFI0PD6GzhaWwmfeSSh48Y6v7ZkCdgdGK+71q1rubZqi5UoguAd+ckRrN/dSGmtmghdBC2WFnabdpMXk9fnearICGSLhc6dO90ax1JSQvPSz0GjIfrsOb0eV9C1GELsHhN60/jmm9gqK6l79lkMUw8Z1DwgLjyEuDAd9W0WCmpamTAi2vOBdmlZvhzZasNw6FS/SxwKyilpLgHE/GY4EY9+hD7Fzp3LqN82knjnHUqHEjRCdWpSo0MB5UokZEVmMTHsQqyN08TkVhA8zNbQoHQIfqO7hrdCr3UO2UFx1+Q2u16F/f8eoeyqBYrEEkxq2mv4yxd/4eiPjkaWZfdPVKuoW/ws2vR0APbcfDOmZd9Su2QJdc8sArX709ruzsripkYQvMLVjGx7VeuAmpFFn3EGI5d9Q8qjj7o1jjo6msR77iZu/nzUUVE9HmOxOdhd3w4E30rbVsvA64cHq6QHHyTu8ssYseTZIT24zU0MR6Wr4dU/XuXTXZ96MML91b34IhU33EDbjz96bQyhf+3r17Pnb3dR//LLSocCiPnNcCSStkK/VDod6uhoRcZut7bTYvFMN89AMlLhEglpkWmoW2ZhbZpGrmhCJggeYWtspPy66yk++2wcbeIGCiDLGIpKX86W5pWKjF/dVo3Z3oksq0kJTyDs8MMJO2y6WGXrZdEh0exq3EVdRx21He437zAuXEj89ddh3rqVkPx8LLt30/DOO9Q9s4j466/DuHChW9eRZVnUfBMELxud1JW0rTSRFel+0lZjNKLLzHS7cac6OprYuXNJuPmmXo8pqW/D7pAJD9GQGBni1nWHi093fcq0d6dx/8/3Kx2K31OFhJBwyy2ow/fe+8gWy4CvMyoxAlVIFcurX+OTXZ94MsRusixjmDiRkNGjCT14slfGENxj3VNJ86ef0rJ8hdKh0GJpoaajBhDzm+FEJG0Fv/XY2sc49N1DeXeb+zWthoschVefgbPjL4htZILgKaqQEDq3bsVWXUPb2sHXSxtO0mK1hGUtxhT5OvXtTT4ff28pmDj04w4i/ZWXSX3iCZ/HEWx0ah3PzX6Or878ijh93IDO7U7cbtsGajUd69YNKGEL0GhupNncjIRERmTGQMMXBMENo5MikCSoaTGTGJoGKNdg2LVzbWRCeNA9lHOVgokLHdhrrQCmZcsoOvU0rFVVAzovNzEChyUBgOKm4oHtKHGTJEkk3nkn2Z/9G92IVI9fX3Bf6KRJxC9cSNxVVyodSndpBGOokQiduIcfLkTSVuiVtaaG8r/eQO2zzyoyvmtyUdlWqcj4ShqZ4KzvqNRK268Lv6OyoxhwkBtk28gEwVtUBgMpjz9G1icfEzFrltLh+IXsuFhkm3M76+qybT4f33Uz6zAbxQMqH5ueMp30yHTUqoF31jYuXIik1YLdjqTVDihhC3sTRynhKYRqQgc8viAI/QsL0ZARawBAZUsE3E/atm/YSPUjj9K8dGnfx61bR8sPP+Aw993grLsJmTH45rRiq/TgyFYrtc8swrJ7N41vvz2gc/MSI3BY4kCWaLG2UN9Z76UoBX+gG5GK8frriDjqKKVDEfX6hymRtBV6ZSkooOXbbzF9+ZUi4/8l7y/879z/cf9h9ysyvpJcdR4LFEjaNnU2cftPNxGW/RRxESpiFGiEJgjDlWHyZPSjRysdht+QJIlQnI0uN1bu8Pn4RU3OLfIOs5G8eJG8CxS1S5YgW61IWq3zxnrJkgGd70piZEZleiE6QRBcXCUS2lpiASgxleCQHf2e17n5dxreeAPTt//p87i6F1+ifMHVNLzxZp/HdSdtg3AhgkjaDo6k1ZL+8kvEXXklxhtvHNC5eYnhIGtxWJ0/995YYW6rr/fKCl4hsJ2YeSJLT1/KrYfcqnQoggeJpK3QK216Bgl33E7MRRcqMn5USBQx+uDshOmaVJY3dtBptft0bJPFRGroKOydSYxKEFupBMFbrNU1tK5UpparP4nXjQBgZ0ORz8d23UjFNYSRecmplMy9UNwE+Uh5SzkvbHqBlzcPrHGHq+lY/PXXMXrz78Rffx11zywaUOL24ISDue2Q2zgz58yBhi0IwgC4mpHtqTegkTR02Dqobqvu9zzDoYcSM3cuUWec3udxIdnZaBITiTjm6D6PC9akbbO5mYZOZ/NTV11hwX3alBQSbroRSb13R4hss/V7XrRBR0JECA6zEfB80la22Sg89jgKjpqFtTL4dqT6I9lioXPbNjq2bFE0Dq1aS3Z0NqNjxQKR4USjdACC/9KNSCXukkuUDiMoxYXpiArV0txhpbiurXvS6wvpkekcGf4QL2woInd6cE1uBcFXzIWFlJx7HrLDwcgvv0CbkqJ0SIrJjMyivAkq2nb7fOyCrpW22XUO6OzE0dISdPUOlVLXUcfi3xaTaEjk8vGXu3XOvglbV0kE1591zyza7+O+ZEdnkx2dPcjIBUFwV36ys+zMjqoO8nPyccgOWqwtJHftsOiNfvRoku65u9/rJ955Bwl33N7n67bdIXeXGwu2kl+uZGFSWBIGrUHhaAKbLMvUPbuEjg0bGPH8c6h0fe9EzEuMYJ3JCGzvbnzpKZbSMhwWC7S3o0lI8Oi1hcFp+uQTqh54kLCZM0h/8UWlwxGGGZG0Ffza63+8zpqqNVw54UomJUxSOhyfkSSJkcYwNpQ2UVDT6tOkLcDO6hbAWUhfEATP02VlEZKXh2yz4ejsuxbfcDfGmMNPTdBkK/fpuCaLiUazs85c9aijyL7jahwtJp/GEMxcW3Wr26tps7YRpg3r/yS7o8emY90f2/vfdi0Igu+45q8FNS1sueZtdBrPb/Ls70FbRWMHZpsDnUZFWmxwJS67SyOIVbZDZtuzh/pXX0Vub6d1xfdEnnB8n8fnJUawps47K21DsrMYtW4tlt2791sFLChHn5+PKjISVZgbcxkvsTqsPPDzA2RGZXLxmIvRqUWJw+FCJG2FXnX89hvajAw0McqVKNhYs5GfKn7iiNQjgippC866thtKm3zejMwhO9hVE5wrEgTBVySVihGLF6GOigr6CfchqaN5cRdYpDosNgs6jW8mma4Ouw5rJKOSkgjJFje1vhQVEkWsPpaGzgZKTCWMjRvb7znG667t/WtuNiMz280s372crKgsRseOFiurBcGLRsSEEhGiocVso6iutbvGrTtkWcZasQe5s4OQnJz9vuawWLBVVaFLT+/3OgW1zoUI2fFhqFXB9fsu6tl6jjY1lbQlz2IuKOw3YQvOuraODc5VsN6oaasKDRU9EvyIfsIE8tb8ouicorylnKWFSwnVhHLpuEsVi0PwPFHTVuiRvbWVkvPOZ9f0w7CblFt55Nq+6I03O3/nqrtVWNvm03FP/+wMGmMeQhVSJVbaCoIXaWJjgz5hCzAxJR3ZHoIkOfitqsBn446JG8OM0MfprLgg6Ooc+gtXIsGX7/ElzSXc/uPtXP4f90oyCIIweJIkMbqrRMK2Suf9hDuNyACaPvqIwtmzqX788QO+1v7LLxQedzyll1/R73Vc9WxHBuHrvOu1NTtKlIPxhLBp04i9cG73x7Ld3msd/NzECOwW50rbyrZK2q3tPolRUIakUin+EDhMG8a1E69lbv5cVJJI8w0n4rsp9MhWW4s2JQWN0Yg60rdb8/fluqFzrYgKJiONXUnbGt+ttDXbzew27UYVUkdMSDSxYWJbhSB4m+xw0Pj+B9Q88aTSoShCr9WgdSQCsLZiu8/G1ag0VNZFom1J5tDv3qH5iy9EEzIfc73HFzX5rgmdzWHj4ISDmZQwSfEbLEEIBq4SCWvLd3HGZ2dwwicnuHWePj8fNJoey56YC4tArUabNqLf63Q3ITMGYdLWJFbaeovDYqHixpuoffKpHr+elxgO9jAcNud2+d0mz9Ttt5SXU7bwGhrefNMj1xOGjwRDAlcddBV/PfivSocieJgojyD0KCQri5wVy5EtFkXjcNVgCsaVtq4VAUV1rTgcMiofbOkqNZUi40C268mJ77tJhCAIntH5++9U3X8/SBIRxx9H6Nj+t4kPN9HaEdRRytZa3620lWWZgupWMk2VxPzvQ2pWf0/Uqaf6bHxh7+qvElOJz8YcGz+WN058w2fjCUKwc5VE2F0jUagpBJw1xSN1fS8K0Y8Zw6j1v6IKCTnga3HzLyH6zDNwmPu/T+lO2gbZSluL3UJ5i7NWvEjael7bqlW0/Oc/SFotUWeeQUjW/v/GEXotKVF6Gi1GVJo2ipqLyI/LH/K47et+pXXFCuyNjcRefPGQryd4TtvPP1O7ZAkhWVkkP/SQ0uEIw4hI2gp9kvrpjOltmVGZANR01NBiaSFCFzzb9dNiQtGpVXRaHVQ0dfikeYIrOe6wGBnl4+ZnghCsQidOJOaCC9BlZgZtfbLUsAzq2n72afLuoZ8fxxxeS2doJpHnnYfGEFwNavyBEuURBEHwrfyu8gg7K228fNnLpEekE67tP4EqqdV9lhBSR0fTX4EhWZaDNmlb1lKGXbYTrg0nPjRe6XCGnYhZs0i88w50OTkHJGxdchMj+MWUAIYSj73PGQ6eRMJtt6GJi/XI9QTPke0OOn5dj72+QZHx11evxxhqJDU8FbVKlF8bTkTSVvBrEboIjKFGajtqKWkuYbxxvNIh+YxGrSIz3sDO6lYKa1t9m7Q1G0UTMkHwoaR771E6BEXlxY5kUxvUmct8Mp7VYeWTwvcISbCD5iFSb+y9wZXgPa6k7W7TbmwOGxqV96elVrsVrVrr9XEEQXAalRSBJEFdq5ns8BkYww9cOTsQsiy7XdqkttWMqdOGSoKseOW6uiuhqNlZdiY7KluUgvGS2Hnz9vtYdjiQVHurT45KimBVdRqJRjMjIvov5eEOXUYGcZfO98i1BM8KnTCelMceJSR/6CuqB0qWZa5bfh0t1hY+Pe1TcmNyfR6D4D2ipq3Qo/Lr/0rFzbdgKfPNDXRfulfimIJvJY6rrm2Bj+rauiZ4DkuCaEImCAqRHQ7sLS1Kh+FTByfnITvUmK2+Gc9qtzIt5nysTZMZFeeZGylh4JLDktGr9VgdVipaK7w+nkN2cMT7R3DCJydQ017j9fEEQQCDTkNWnDNh6mpG5q7OrVspW3gNFTffAjgTE8Vz5lB+w41YKyv7Pb+g2jl/To81oNcG18qzo9OO5uuzvube6fcqHUpQsNXXU3LuebQsX979udyEcGzNh5DQfi1n5JyhXHCCT6ijoog6/XT0eXk+H7uuo44WawsqSUVGZIbPxxe8SyRthQPIVist33+P6auvkDTKL8YO5u2T3c3Iatt8Ml5hk1hpKwhKspSUsPuii6m48aagaop1ePoYWnc8RHPRAlo6vZ+5NWgNxFpPxLxnDgeFmIPq39qfqCRVdxkkX7zHV7VV0W5rp7q9mli92FoqCL4yuqtEwuqybSzeuJjX/njNvRMlidYVK2hduRLZ4cBSWIh56zZaV6xAFdF/Ga+C2uAsjQCgVqlJi0hjVOwopUMJCg1vv03n5s1U/f1hHGYzAHldC2B2VnvmQby5qIi2X9bg6OjwyPWE4cM1hxoRPgKdWjQSH25E0lbo0YinnyLh1lvQJCYqHYoi3aX9hWuSWVjr/ZW2DtlBSdcLfqQ6lbghbl8TBGFwOjdvpn3DBqy7PdNpOBDEhIUQHx4KQJGPHlLtrG4lua2eWfddTsFRs5AdB3YoF7zPlw1HXWNkRGT4pBSDIAhO+V3NyLZUl/LC7y/w6a5P3TovJCeHxL/dSdqLLwCgGzmSzI8+JOn++1GH91/uwLVTbWQQJm0F3zJecw0xF1xA+quvdDfPy010/tzVtVrY3VhHp61zSGM0ffQxpZdcQvVjjw05XsE7bPX1mJZ9S8t//+vTcV3zG9F0cHgSSVvhAJJWS8TRRxN32WX71eVRiiiPAIU+KI9Q016D2dGJLKvIiU33+niCIBxIl5lJyuOPM/LLL9BlZiodjk/lJDhvwH3xkGpr3VZ21heR1FYLKjUao9Ev3u+CkS9307hKAImbGkHwrfyu5raVtc6Vh+Ut5Vgd/e+qkLRaYi++GMPBByOpVEiSROj48USfdaZb43Y3ITMGV9JWlmXu+ukuntv0HO3WdqXDCQqSRkPSvffs15QsVKMiLTaU0PQXOeXzWaytWjukMVShoWgSEzFMnjzUcAUvaV+zhoobbqDuxZd8Oq4rTyLmN8OTuEMR/F52VDYAZaYytyZ4w0m20ZnEqG+z0Nhm8epYrpXMDksceYnRXh1LEITeRZ5wPNqUFKXD8Dl99O8YMhfxQeFzXh/r3p8fgLTH2JzjIGPtWkYsesbrYwo9Gxc/jiNHHMmYuDFeH0usRBEEZbjKIxRXazBoDNhkG2Um7/fN6E7aBtlK2+r2aj4v/JwXN70oGi8qpHPHDorPPItD1a3IdufP357WPUO6pvH668j54XsiTz7ZEyEKXqAfOxb9uHGETpjg03HF/GZ4E3vDhAO0r1+PpFYTkpuLKkz5TqsJhgRCNaF02DoobykPqhejsBANKVF69jR3UlTXyuQw79Xgcz2hc5gTumswCYKgLHNhIY6OTkLHjVU6FK+Lj1Cj7qigoj3Kq+PIssxuUwkAyYZ0DOEGCDd4dUyhdzNGzGDGiBk+GUvc1AiCMlKjQ4nUazB12kgypFNk2k5xczHZ0dm9nlO7aDGoVcTNn0/Hxo00vPEm+rFjiTzlZEzLloHdgfG6a3s939RppabFWVs02Moj6NQ6bpx8IyazCa1KJG2VUPPYY5h37OBkzUd8kn8Kp4/4K+eNnjrk60qSBJLkgQgFb9BlZJD18Uc+H1fMb4Y3v1hp++yzz5KZmYler+fQQw9l7dretw689NJLzJgxg5iYGGJiYpg9e3afxwsDV/3445Scdz6tP/6odCiAs1HJRWMu4q8H/5VwbXBNumDvRLOwxrt1Hl0v9g6LMehWJAiCP2pZ8T1FZ5zJnttuw2Hx7kp7fzBjxGF0lF2EuuFsr45T21FLp70dWVaRF5vp1bEE/+J6n3Pt4BEEwTckSWJ0V4mEcMm5k6TfsmdqFXXPLKLmX/+i9NLLaF25krolS6j+xyPUPbMI1H3fxrpW2SZGhhCpD67EZaw+lkvHXcoNk29QOpSglfLPfxJ1xhmYbrwL2RZJcY19SNeTbTYPRSYMN+3WdirbKoG9fQKE4UXxpO0HH3zATTfdxH333ceGDRs46KCDOP7446mpqenx+B9++IHzzz+f77//ntWrV5OWlsZxxx1HRUWFjyMfvrQJiWgSEtBl+89NzXWTruPy8ZdjNBiVDsXnXHVtC7xc57Gg0VUewShW2gqCHzBMPhh1VBS6tDQcbb5pzqWkKWlZ2FrHUlEbhtXuvaZgrsRdqCmaC756gdpnnxVNyBQmyzL1HfW0WDzTYbsnzeZm6jvrAciMyvTaOIIg9GxMV9JWtjjn8v3VsTYuXEj89dfR+PY7qOPiANCmptL200/EX38dxoUL+zzflbTNTRBzWsH3NLGxpDz6CCOzkgHYWd2CYwhzjbIFV1N02um0r1vnqRAFL3OYzT4Zp6Rr91isPpZofbRPxhR8S/Gk7RNPPMEVV1zB/PnzGTNmDM8//zwGg4FXX321x+PfeecdFi5cyMSJExk9ejQvv/wyDoeD5cuX93i82WzGZDLt95/QtxGLniH3fyvR5+UpHYoAjOyqa+vtZmTnZV9PR/kFhDtGERem8+pYgiD0Tx0VRdYnHzPi+efQxMQoHY7XJUfqMejU2BwypQ3ea5ziShSkV4aR/Psamj/5VDQhU9jNK2/mqA+PYlnJMq+N4fq+JxoSCdMqX/pJEILN6CRn8tTU4iz15U7zQVfi1l5fj6TVYq2ocCthC8Fbzxbg16pfKWgsCLpeIP4oJyEclSQzvv4FvjtlGvX15QO+hmy307FhA+adO1FFRnohSsGTWv/3P3bNmEn51f2/TnmC67U0MzLTJ+MJvqfoXYrFYmH9+vXMnj27+3MqlYrZs2ezevVqt67R3t6O1WolNrbnWp+PPPIIUVFR3f+lpaV5JHbBt2wOG8XNxayrCr6ni93lEby80rajPR5bywTy4tOd9ZIEQVCcNjExaH4fVSqJlKRKdHHf813hL14bx9V0sVKfguPqvxJ7yTyvjSW4JyksCQmJuvY6r40h6r0JgrLyu1ba7qnpakrWXIwsy/2eZ1y4EEmrRbZakbRatxK2sDdpG2z1bAFu/9/tnPn5mWyr36Z0KEFPr1WTG6nl6h+3kV7UQsUrLwz4GpJazcj/fEvqM08TkpPjhSgFT1LHxGCrraVzxw6fjCfmN8Ofoknburo67HY7iYmJ+30+MTGRqqoqt65x++23k5KSsl/id1933nknzc3N3f+VlXm/U6ngeSXNJZz22Wlcv+J6tyZ4w0lOV3mE0oZ2Oq1Dq4fUl13Vrm1kwTe5FQR/57BYqF20mObPP1c6FK/SRPxGSMK3rK78yWtj7GwsBKA2JI2RC68g9uKLvTaW4J6rJlzFmrlruHri1V4bw1U/U9zUCIIy8hIjUEnQYIpEJalptbZS21Hb73m1S5Z0J2xlq5XaJUvcGq97pa0xuOa1LZYWajqcZQbF651/yEiN4x+nZPLtJIldp44f1DU08fFEHnccklrt4egETwsZNYqM994l5z/f+mQ8kbQd/gJ6P+Cjjz7K+++/z7///W/0en2Px4SEhBAZGbnff0Lvap56iuJzzqX5y6+UDmU/6ZHphGvDSYtIo806/Gs77ssYEUJEiAaHDLvrvbNl+Lea31hZ/TEqfamoZysIfqj500+pe/ZZqh/+B/ZhXOYnPSITgPLWUq+NUdTknNwaQ9LQa8XNjz+ICokiVBPq1THETY0gKCtUpyYzPgxkDXEhSUD/JRJqlyyh7plFxF9/HaM3/0789ddR98yifhO3nVY7ZY3OOXOwlUcoaS4BwBhqJEIn5vT+YFRSBLti0nnlBDXF+8xvgm0hUrBQ6XQYJk1CFeabUkwmi/O+QMxvhi+NkoPHx8ejVquprq7e7/PV1dUkJSX1ee4///lPHn30Uf773/8yYcIEb4YZVDo3/0Hn77/j6PBePcHB0Kl1/Hz+z0GzTXhfkiQxMiGc38qaKKxtZVSS5ydgP5T9QCkfoI2aRm7CXzx+fUEQhiZ6zhxalq8g+qwzUUUM35uwfGMOq5uh0eadXTFt1jYaLbWoHDLHtNiwNzejjoryyliCf3Elh7Kj/KfJqiAEm/zkSIpq2wiTUqilguLmYg5NPrTHY/dN2LpKIrj+rHtm0X4f/1lRbRuyDFGhWuLDg6tPg9hV4H9yEyNwbOhqwNf1/al74UWsFRUkPXB/n/e3sixT/feH0Y/JJ/Kkk1CFevcBpxB4XjruJdqsbWhUiqb2BC9SdKWtTqdj8uTJ+zURczUVmz59eq/nPf744zz00EMsW7aMKVOm+CLUoJF4112kPv004YcfrnQoBwjGhK3LyK6tXd5qRpYVmYPNNB57RxY5icG1IkEQAoGk1ZL+0otEnnjisH4tnJo6GgCrVIvFbvH49V0ddpNqDFzw7pMUzDoaeQjdnAXPeeLXJ5j3zTwKGgu8cv1njn6Gp456ivy4fK9cXxCE/o3pqmvrsCQA/ay0tTt6bDrmak6GvffX7oLavU3IhvN7Zk/ErgL/k5cYjsPiTNoWNRXRuWMntU8/TdOHH9L206o+z7WWltL4zjtU3f8AiKapAcNSXk7diy9R//rrPhkvTBtGiDrEJ2MJvqd4Ov6mm25i3rx5TJkyhalTp/LUU0/R1tbG/PnzAbj44otJTU3lkUceAeCxxx7j3nvv5d133yUzM7O79m14eDjh4SLZNFQh2VmEZPv3m7wsy0E3ARuZ4NxeUeClZmQ5YTPoqJCINmgxhosXfEHwdw6LM6Gp0g2vFUQHp2YiO7RIKiubq4qZnDrKo9d33cyGm6KwGMOIGpGMJG6C/MJvtb+xsWYjBU0F5MR4vtFKdlS2WGUrCArLT3buFGlpSmX22Nl9PkQxXndt71/rpxmZq55tMPZpcDXbFElb/5EVHwZWZw+fitYKpJwMkh96EHtTM+EzjujzXCkkhLgrr8TR2ooqRNyjBQprWRm1TzyBNj2duEsuUTocIcApnrQ999xzqa2t5d5776WqqoqJEyeybNmy7uZkpaWlqPa5oXruueewWCycffbZ+13nvvvu4/777/dl6IKPralcw6NrHyU1PJXFxyxWOhyf6l5p66Wk7b6T22BLiAtCoGnfsJHKu+8m4rhjSbjhBqXD8ahQrRaNPRG7qpy15ds9nrR13cz+YcxA/f6/yDCKbYb+Iisqi401G/utcSkIQuAaneRcaVuxZyTfLVhIiMY7dcULalqA4KtnC/uUR4gUSVt/EaJRkxmTSLVdD+pOdpt2kzdnzn7H9LYoSZuURMJNN/oqVMFDQkaPJvKkk9CPyffqgrOPd37MN8XfcEr2KZyZe6ZXxhCUp3jSFuDaa6/l2mt7fpr6ww8/7PdxSUmJ9wMKUp07dmDeuQv92LF+udo2RB1CQVMBrVbvJC79mWvSWVjThsMho1J57oW/3drOhj1FgINc0YRMEPyerb4OS1ERzZ8tJX7BAlS9NOIMVNHaVOopZ0vdLo9fu6q1DgDZYmSkMRxJJxqR+QtXgqGoucjj115ZtpKCpgKmpUxjbNxYj19fEAT3JEfpiQrV0txhZVd1K+NSvVNT3LUYYWSQJW2tDitlJmdN+OxosbPAn4xKjKDSlIDaUEpxczF5MXndX5OtVvbc+TfCpk8nes5ZCkYpeIomJobUJ/7l9XE2121mbdVaJidO9vpYgnLEnkChW8u337Ln1ltpeO01pUPpUWZkJgBVbVW0W/2rUZq3pcca0KgkOqx2Kk2dHr326srVfFq7EEPGC0G5jUwQAk3E7Nkk3nsP2Us/G3YJW4AUQwYAxV31Zz3ptBF/pWX7gyRIMwgVCVu/4trK642VtstKlvHUhqdYvWe1x68tCIL7JEnqLpGwdU8zVW1VtFo8uxjDZndQXNcGQI4xuOa15S3l2GQboZpQEgwJSocj7CM3IWJvXds/PZxs/vwLTF9+SdWDD2Ktqen+vL21jc4dO5Dtdp/GKgSOi/Iv4uEjHuaY9GOUDkXwIpG0FbppEpMwTJmCftw4pUPpUbQ+mlh9LLC3mUyw0KpVZMQZAM83I3PdIDusMeSJlbaC4PckSSL2ggtQR3lnhZLS8mJHAlDXWebxa++qaSWq08I/Pn+KPbffIZqQ+RFX0rbEVIJD9uz3ZWrSVE7KOokJ8RM8el1BEAbOVSLhxV1/49iPj+X7su89ev3ShnasdplQrZrU6OAqgeOa02dGZqKSxG2+PxmVFIHD3HMDvqizziRm7lxGPPM02oS9yfa2n1dRfPoZ7J57oU9jFTzH0daGpbTUa9fPicnhtJGnMSrWs+XEBP/iF+URBP8Qc+45xJx7jtJh9CkzMpOGzgaKm4sZEzdG6XB8KichnMLaNgprW5mZZ/TYdQsanU97HWajWGkrCAGkdtFiUKsIO/RQdOnpaIx7XxdqlywBu6PPRi7+amJSHp+UQzuVHq8DVlDdQnbzHoy1ZXT8phJNyPxIangqWpUWs91MZVslqeGpHrv2mblnilpvguAnxiQ7k7bWzijUGjUNnQ0evb6rNEK2Mcyj5cQCgWsFp2hC5n/yEsOxd620/XPSVpIkku65G9g7tzMuXIi9vh6VwUBI/mjn1wJ4bheMWn/8kbIrr0Kfn0/Wp58oHY4QwMTdihBQXJMQb9S883feaka2o74QAD3JGCNEV1JBCBhqFXXPLGL33AupfuSR7k/XLllC3TOLQB2Yb/GHpY9GliVQdVBmqun/BDet3rOaZQ33UZJdwu6bH8R4o2js4U/UKjUZkc7SGK6GcYIgDD/5XUnb5orjWHvBWuaNnefR6xd0zZODsglZVzIwO0rUs/U3GXFhqG2ulbZ97CjpmtsVHHss+vETyFu7hoQbbwz4uV0w0mVlgSxjN5m8srOr1FTK+9vf57ea3zx+bcG/iN96AXB2rAwErklIMHaXdiVtCzxYHkGWZcpaSwDIiMjyWmdLQRA8z7hwIdHnnQeAZfduZJute1Iff/11GBcuVDjCwUmICEeyxQHwc+lWj113W8M22lU76YisJ/mEY4k84XiPXVvwDG/UtW3sbKTUVIrNYfPYNQVBGLzcxHBUEjS1aWho83wiwzVPDrZ6tuBsLqySVGKlrR/SqlVkRadjaZzKiamX9PqeZFy4EP3YsVjLyim7/HKQJBrefjvg53bBSJuaSu6qn8j573de2dm1tmotD695mOd/f97j1xb8iyiPIADQunIllXfcSfisWaQ88g+lw+mVNxuV+DtXB9zC2jaPXbOuow6zox1Zlsg3igmeIASa5PvvQ9LraXz9dXZMOhjZah0Wk/poKZ+61hqqmqweu+ahxqPoqKhAtkUG5QqsQND9Hm/y3Hv8N8Xf8MjaRzgm/RiemvWUx64rCMLg6LVqso3hFNS0sq3KRFKUZxtqupK2uYnB9zr/5KwnMdvNSoch9CIvMZodm84iZeJodGpdr8elv/EGJeecg6WoiB0TJw2buV2wkSQJTVyc167vyodkRYp7+OFOrLQVALAUFWNvasLR2aF0KH1y3dDtNu3G7giuTpojjWH/3959h0dR7W8Af2c3W9I2vZMQ0kgEpIoiVooBvQrYAQVRL6IoXMvvXu+1YL1WVBSRa7vAVSwoVgQLHUFpAlJTCCEhCWmQnk2ye35/rLsQCZBAdieZ836eh0d3ZnbyfZkwO3P2zDkAgJIqKyrq2qchw3myF43BSI0Ibpd9EpFnRT78DygGA0RjIxSDAUE33aR2SWft4uC7UZc3CY21ce22z5raQChHzsUNByqhy9jTaZ4wkYk7vph17ss59AIRqc85RMJ/dr6E8UvGI6+yfSaeFEK4JuyV9cs5k94Ek57DnXVEKX/8TmYerjrldno/XyR+t6TZtR0bbOnPXI227FmveWy0JQBA0Nib0e2LxQidcrfapZxSlG8UTHoTGu2NKKguULscj/I3GxBhcVyEtde4ts6xge3WMCl7JBBpQcmcOa6LetHYiOwrr0JTSYnaZZ0V52Ot2cXt92RBZnEVYqsOY+L6hTh4x53ttl9qP25ptK3kTQ1RR5MW5Q8AyK3ZjR2lO5Bdkd0u+y2sqEdNgw1eOgVdQ3zbZZ9E7SUl0h9QGrGzdA92le065bZ/vrYrmTPHQ1VSe7JmZ6Pg0UdR+MQT7b5vjmEtDzbaEgBA5+0Nc1oazN1T1C7llI6fqKQ9H5/sLFyTkbXTuLZZf0z2Ym8IR0qEf7vsk4g85/gxbJNWLIfO3x/2igocfvlltUs7K87hYDJK22cissqGSizN/QwGUy4OJ58L3wsu4BjeHZDzEb/y+nIcrT/aLvvMOcqbGqKOJi3S0dO2sT4MQPt9UeMcGqFriA8Mkk3Y9HX217j1u1vx4Z4P1S6FTiIlwh9e/r+j0O/feHnTzJNud/y1XervOxA67T6Uvv4GG247IdHQgIrPPkfl0mXt+oSX1WbFoepDAPiltAw4pi11Ot0CuiHjSAb2H92PS7pconY5HpUU7of12WXtNq7tntIsAIDRHolwfz5KRdSZtDTpWLfPP0PJnDmo/PIrGLt27bSP00UHKfBNfholXjWotF4Mi+nsngTIKM/A9rp5sPcMRvGtH+CyAbHtVCm1Jx+DD+7oeQfCfMKg1+nPen/VDdUornM0/McHxJ/1/oiofTiHR6iqDIIxrP0bbWUcGmF32W5sK9mGPuF91C6FTiIu2AdetggImzf08G5xm5au7Zz/LX39jWavqeMzJSYi5O4pMKemAXY7oD/7axsAOFBxAAICFqMFwWYOcah1bLQl2CoqUP7BBzAlJnWK2bSdvWWk7mnbTsMj5FYeAADE+sez1xlRZ2OznzAxhTEuDjHPPw9jXBxgs8NWXQ3FYIDO1Lm+lEkMCYHzlLQpPwtDE/uc1f5cQ8E0hCFZwpv5zuRv/f/Wbvs68MdnXKh3KCxGS7vtl4jOToTFhCAfA6qs7dzTtkTeRttxqePQO6w3x+/uwPQ6BQkBqdid8ThuPH9Ayxu1cG0HHNdQa7O7uUpqT4rRiPDp09t9v8cP/cR7eO1joy3BmpWF0jdmwxAd3SkabYfGDUWsfyx6hPRQuxSPa8/hEWoba1HR6Bj3MjUk8az3R0SeFXbfvSdfd889aDpyBAcn3gav8HB0eX0WFIPBg9WdHZ1OQXTt/yHjkA62AZFnvb99ZdmAEBD1oVLezMuKk3QQdUyKoiAtyoJf8h2Ntvsr9kMIcdaNDzL3tI2zxCHO0n6Td5J7dI+wYHdBFTKLq3FFC7eyp7u2IwJ4fSMbuQb7oRbp/PwQcO218B8+XO1SWqV7cHdcnXg1EgLlG58uMdwxqUJueS0ams7um9aDVQcBAPYmP/SIjDrr2oioY2nIOQBrVhbqtm1D46FDapfTZmmhCYAwtstwMHvKshBcBXz0359RdsckCDt7qnRU9U312FW6CxsLN571vlw3NRbe1BB1NKmRFtgbwgAoqGyoRHl9+Vnv09lomxzOeRqoY3JO/LyvqKpdxziljks0NqJ+3z7U/PJLu+2Tk5DJhT1tCebu3RH972fVLoNaIdJihq9Rj5oGGw6W1yDpLC5KU4NTEVr2AnKPFiH5Yvl6JBBpnU+/vugyezYM0VEwxserXU6bOZ8syGqHJwsOVuUioVjA3GSDvbICio7fWXdUO0p24I4f7kCcfxyWXLvkrPbFnihEHVdalD8gDDCIEDQqpcipyEGId8gZ76+8pgHlNQ0AgIQw3/Yqs1M4VH0Iy3OXIzU4FQOjBqpdDp1C9wh/GAJ/wZq6n/HGb6Mxrd80tUsiN6vd+hsOTpwIQ3Q0klYsb5d9Hqg4AIDXN7LgXQt1Sr8V/4YP93yI3MpctUvxKEVRXLOqn21DRkOTHQdLAXtDhOtbXyLSFr+LL4Ip8djwJ01HjnSanh1BlmqYIr/Ahsqzmy25rqkOFY3F2J6g4OsHn0XkU0+1U4XkDt0CuiHYHIwo3yjYxdn1iHaOZcyeKEQdj3Myssa6UABnP1eF87o4JtAbPka5+iVtK96Glza/hDe3val2KXQaKRH+gCJg05cg80iW2uWQB5jTUqGzWGCIiYFoaDjr/dmF3TVmPxtt5SDXJxq1yFZdA71f5/pG+j87/oOfD/2MGYNmSDfgfmKYH3bkV5z1I8M5pTWw2QX8TV6ItJjbqToi6qjqMzJw8I47EDxuHELvvlvtck4rLsQMY9CvqLJ7ocnWBC/9mV2yOL7cE7DZfRF3bn/49JXrM6OzCfMJw+qbVp/1fhrtja5hgHhTQ9TxJIX7Qa9T0FAXCqMPsP/o/rPan8zj2fKpgs4jJtAbBlsEACDzSLbK1ZAn6C0WpPz6S7tNGFZUU4S6pjoYdAbE+MW0yz6pY2NPW8nZrVZkDByIjIsvhq2yUu1yWm1g5EAMiR2CcJ9wtUvxOOfF6NlORvbi5mdgivgaXSMbOOskkQRqN22CraQUlUuXwV5fr3Y5p3Vel0QIuxega8LO4gNnvB/nzaytIQwpkZZ2qo46ukNVh9Bkb4K3lzcifCPULoeI/sRs0CMxzBf2Bse1fHv1tGWjLXVkOp3iOk6FNflotDWqXBF5Qnvea9c21qJveF/0Cu0FLx37YMqAR1lyjXl5gN0OYW2Azr/zDNp/e8/b1S5BNYl/jNOVXXLmjbZN9iZsLvsBxuAmdPO+ob1KI6IOLHj8eOhMJvgPGwadueP3rvcxGuFlD4dNV4Bf8/eiT1TSGe1nb1kWjI0CN62pQ3TEaojYazimbSdhF3bolDM7VvnV+QCAeEv8Ge+DiNwrLcqC7H1hAI6N0XimskokbrStZKNtZ5Ia1gW51UbY9Q3Iq8qTcnJtOnNJQUlYMHKB2mWQB/EqVnKmpCSkbN6E+IUfsrdlJ+GcnCe7pOaMx6a0CzsSdbeioewSnBsZ347VEVFHFnj99dAHBrpe2yoq1CumFQL00QCAXSVnPu7brpIsxJYA1/1WgKqZL7PBthNYlrMM6Z+l45F1j5zxPi6KuQi/jPsFr1z2SjtWRkTtKS3KAnuDo9G2oLoAdU11Z7yvbEl72trsNuRWOOb4YKNt59A90v9YD/OKs+thTp2DNTMTubdOQO4tt6pdCnVCvHMh6P38YEo6sx5MahJCoLi2GI12uR4r6RriC71OQbW1CYcrrWe0D6PeiMqS/rAWX4mUyIB2rpCIOoOqFSuRNXQYqteuU7uUk4rydYw/ezY3NTkVOag3Ahv6nIuAv1zVXqWRGxn0BhTUFLgmEjtTvgZfdPHv0k5VEVF7S430h7D5wqd2GB694NEz7oxQY23CoaOOBt+kMLkabQuqC9Bgb4BJb0K0b7Ta5VArpET4w251fFlxtsOCUOeg8/VF7aZNqN22DfaznIysyd7UTlVRZ8FGW+q0Ri4eiaGLhp71xAWdjdFLh67BPgDOfIiEhiY7DpQ6JjJLlqxHAhE5VC5dCnt1NSq+/FLtUk4qJSgRAFBizTuj99vsNpRZ83EoVMGemycj4p//bM/yyE2cvcVyKnJgF3aVqyEidzknygJAQcnBYbgm4Tr4GHzOaD/O6+FQPyOCfI3tWGHH52z062rpCr1Or3I11BopEcd62mZxMjIpeEVFIfrFF9Dt88+g6M/u32n6Z+m4cvGVf0y0SzJgo63kimfNQul/3kZjcbHapbRZqHcoADkfK0n4oxdB1hlORrYkcx2EaT/8zE2ICuj4Y1sSUfuLfvYZhD/8D0Q//5zapZxUn8hkAECNveCM3l/dWA2ziIe90YLekRwzrrOI9Y+Fl+KFuqY6FNe2/fpECIF7l9+LZ355BhXWjj0ECJHMwvxNCPE1wi6AfUVVZ7wf5/VwomS9bAFOQtYZRQWYYbJHAgD2lbHRVgaKoiDgmmtg7t79rBptK6wVKK4rRl5VnqsthLSPjbYSE3Y7yufNR8mrr0LU1qpdTpsd3xNHNonhZzcZ2Xu73oRP/H8QGXWAYxkTSUoxGhFy221QDAbXMlvVmd80u8OguDTH/+hrkF9Z0ub3B5gCoC+cCtPvU9E9wtLO1ZG7GHQGxFpiAeCMhkgoqy/D6vzV+HTfpzB78YtJoo5KURSkRvkDihU/Zv+KjYUbz2g/WZKOZwscO0ey0bbzUBQFcRbH8E951blnPCwIycditGDljSsxf8R8+Bp81S6HPISNthITTU0IvWsyAkZdA0OXzjfmW0KAo9eUlI22rsnI2t5oK4RAYe1Bx34CeYFHRA6l77yD/Vf9BQ15ZzYUgTtEWQKApiAAwIaDe9r8/mprE5oKDuHD75+GZeK1EHY+at9ZdLOc+RezZr0Zz170LKb3mw6T3tTepRFRO0qLtEDvux/zcx/Ci5tePKN9yNxo6zxHOu+LqHM4JywBQuhgtdeipK7tX0pT52OrqkLlDz/gyKefnvE+FEVBqHco+kX0a8fKqKNjo63EdEYjQqdMQfQLL0Dx8lK7nDZz9bSVcAB350VpdnFNm99bXl+OBlENIRT0iUhu79KIqBOy19Wh8uuv0VRcjKqflqtdTjO+ShQAYHtRRpvfm3G4EtE1ZbApOhhCQqDoeNnTWZzN0zR+Rj9ck3gN7uh1R3uXRUTtLC3KArs1DF4iCFG+UWe0j6wSNtqyp23nkhoZDNEQDODMniihzqexsBCHpk1H8QsvshMBtUnna6kj+oPz4uRAxQHYhR06RZ6b8cRQx0VpUWU9quob4W82nOYdxzgv7kRjEFKjQtxSHxF1Ljpvb8S+9x6qV6xA0M03q11OM+Hecchp2I3MI22/qXn81/uQMygfj5/3N3xyzZVuqI7c5fjPeCLSrrQoC0RjKJoOPILXJ17R5vc3NNmRW+YY5k22Rtsj9Udw1HoUgGMiMuo8ukf4w7YtDDpTKXIqcnBB1AVql0RuZurWDebe58KUnAxRVwfFt+3DG7yz4x1UNVZhdOJoJASyd70s2GgrscaiIugtFuh8zmymVrXF+MXAoDOg3laPwppCxPjFqF2SxwT4GBDqZ0JptRX7S2rQOzaw1e/NOupo+LA3hCElwt9NFRJRZ2MID2/WYCvsdoj6etU/I7pZuiGnFK5hXdqiqO4AdMZKdAmOhCFGns8ILXA22p5JD6SVB1fCx+CDnqE9OeYbUQeXGO4LL52CqvomFFTUIybQu03vzy2rgc0u4GfyQqRFrjGsqxuqMTByIOqa6uDt1ba/N1JXSoQfGssHw1bZDxdGXap2OeQBisGAbp98clb7+Dr7axyoPIALoy9ko61E5OmaSCco+Ps/sK9ff1QuW6Z2KWfES+fl+lZZxnFtk85wMrIdhx2PGOubwhEdINfFLRG1jmhqQsHDDyNv8l2w19WpWst5UX3QcOR82KvObfN7U21PoDZ3MvpFpbqhMnInZ6NtSV0JqhraNkHecxufw50/3ImMI20fUoOIPMvkpXf1kN1TUAmb3dam92f+MZ5tYrifdJPrxlpi8V76e1h41UK1S6E2CvM3wU+kobGyF6pr+OUinV6jrRF5VY55J5zj/pMc2GgrMduRIwDQqXsfnc2Yd52dczIy5+QLrZVR7ui1FGaOk+7ilohapyEvD9UrVqJ22zbUbd+uai2XdesDa9EYHC7siSZb28YAK8pvwMMr16L3iq84flgn42/0R5h3GIC2DZFQ21iLwppCALypIeos0qIs8ArYhEe23IBnfn2mTe91TUIWJtfQCNS5KYqC7n888Zh5uO0TS1PnZq9p+7w0B6sOwiZs8DX4Itwn3A1VUUfFRluJJXzzNZI3rIc5tfP2QIq3xAOQu9G2rT1tC2ocjxhzwgIiOhlTt26Iffs/6PL66/C9QN1x1qIDvGE26NBoEzhYXtvq99U2NMF0MAcXFf4Onx+/5SRkndCZTDiaW5kLAAgyBSHQHOiOsoionaVG+gPCC7X2I22+pnc12ko2ni3g6HlHnVdSuC/0frvxxf4PUNvY+usb6rys2dnIvPQyZF95VZvf65p00NKNHa8kwzsYyXkFBUExtH4Sq47mbMa86+wSw52Ntq3/pq6+qR5VtmIAwLkRSW6pi4i0wadfP/gPudz12l5Xp0pvVZ1OQXyoATrzIWzOb/25/v1ti3A09jd8MPBShE6c6MYKyV3O5GkazqRO1PmkRVlgb3D0rGejbeuN+moUhn82HPvK96ldCp2B7pEWmKMWY2v1B1J2QJKRV0Qkmg4fRlNxMWxHj7bpvby+kRcnIqNOzTkAt4wfdM6L09yyGjTa7DDoT/8djKMHkoBo8sG5kV3cXCERaYXt6FEcnHwXfPr2QfjDD3v8G35b8GL4Bq7F97mluLFv68a2XZn/Eyq7bsSm6HEIuX2SmyskdxiTNAYXRl+Ic0LOafV7nL1yeVND1HmkRVlgtzoabcvry1FhrUCAKeC077PbBfaXytloa7VZkV+VDwGBEO8QtcuhM5AS4Y+mjWnw9bbBS8dmGRno/XwRv+hTGLslQO/XtrGMnZ3UeH0jH54dJHXk409Qt/N3BFx1FXwHDVK7nDPmHK+uLRd4WhFlMcPboEddow0Hy2tdwyWcStaRbACAvSEMyX+Mo0REdDo1mzahfscONObmInjSJBgiIz3682P9uqKgfCvKaq2tfk9hrWMoGM6u23n1CO2BHujRpvfsP8qbGqLOJszfhFBff9Q1BkBnqEBORQ76hPc57fsOHa1DfaMdRr0OsUHe7i+0AzHpTVh781rkVOQgxMxG284oJcIP1qLr0KAAsX6JapdDHuLdq9cZvc/ZSS0hgNe1suHwCJKqXrMGFZ99Dmt25x5WwMfgg/+N/B9W37QaFqNF7XI8SqdTkBDm+IYuu5WTkW0rygQAKE3hiAmU6+KWiM6cZfhwRD3zNOL+t8DjDbYAMCZhPGoyH4Ny9IpWbd9ga0B1YxHSDgr08492c3XUkbCnLVHn5Oht65hcp7VP0DmHRugW6guvVjxxpjUBpgD0Ce/D8S07qRA/E0J8jRCi7RNLk1yEEBweQWLyfboRACDophsRes898DlvgNqlnLU+4X0QbA6W8oIlqY3j2hZX10DYTAgxdoFOJ9/fFxGducDrr4c5JcX12l5f77GfnRzh+FIuq7gaQojTbn+w8iAijwg8+aENF/19mipj8VL7WJO/Bu/seAdFNUWn3dZmtyG3wjERGW9qiDqXMxnXNrO4CgCQFCHX0AikHckRfgDs2Jwv31B/smo6cgRl776Lwy+82Or3FNcWo7apFnpFj1j/WDdWRx0RG20l5XfppQibdh/M3burXQqdBeeQCK39djbR6wZUZzyB/oGj3FkWEWmcdf9+ZF95JSqWLPHIz4sP8YVOAarqm1BSffohEvaWZSGgFijx18OQnAxFx8udzurNbW/i9d9ex+6y3afdtqCmAA32Bhh1RkT7soc1UWeSFuXvarRt7QTDrknIWjFEmNa8v/N9vLDxBU5C1snFhTXBr/vjmLVvEprsTWqXQx4gGhpR/PJMlM+f3+oOEM6niGL9Y2HQd95J5OnM8C6GOr3so9mYuXkm5m6fq3YpHudstM0uaV2jbWZxNQAF3SMD3VcUEWlexRdfoKmgEGVvvwPR5P6bDLNBj+C4b+Gb+CK+2bf6tNtvLdyHvbEK7r3tPCR8+D+310fuc0mXS/CXhL8g2Bx82m2dvfPiA+Kh1+ndXRoRtaPUyDMfHkG2ScgAYFnOMnyw5wMU1hSqXQqdhV6RXQAosMOGQ9WH1C6HPMArPAwB116LsOnTW30NzfH65caJyCTUWFQEW0UljPFdoTOZ1C7nrJXWlWLernnoaumKKb2nqF2ORyWG/zGmbYnjkeHTDRGRedjxGFkKHyMjorMQdv/9UExmBI0fB8XLM5cSPt51sCrl+L0kA0D6KbfdV+6YdDHUFAu9Bj7nZDa1z9RWb8vx3og6r8QwP+iaHI22+VX5aLA1wKg3nnR7IYS0jbZ2YceBygMAeL7r7LpHBsC+IxR6cyH2H92PrpauapdEbqYoCqL//Wyb3sPrG7mxp62EKr78CjmjRqHwscfULqVdJAUmYXzaeEzsMVHtUjyuLY8Mr8v/GXk+T8IU/g2Sw/09VCERaZGi0yHs3qnwCgpyLbM3NLj1Z0b5xAFoXQ+s/GrHuKbxlnh3lkQdDG9qiDovo5cOiSHREDYT7LDjYOXBU25fUm1FZX0TdIpjIjKZFNUUoa6pDl46L8T4xahdDp2FlAg/Vw9z5xfORH92bti5GNltJPqG91W7FFIBG20lJJqaoPP3hykhUe1S2kWIdwgeHvgwbki5Qe1SPM5s0CM22AfA6ce13VSwBzpTMbxMlYgJ9PZEeUQkieq165B9RTrq92W47WckBSUAAIrrTn0jbxd2NFXm48X3mjB2yToIm81tNZFnNNmbcKDiwGknofPSecFitKCbhY22RJ3ROZEW2BscDVinG9fWed0bG+wDs0Gu4VCcX1B19e8KLx0fnO3MAn2M8EYUAGDH4UyVqyFPstfWoj6jddfNVydejRcveRGXxV7m3qKoQ2KjrYTC7p2KlI2/IuSO29UuhdrBsXFta065XazhItQevAPRygjodKceRoGIqLWEECh7+200FRWhfN48t/2c3hHJAIAacerx+4prixFbXI/4YiAqNxeKXq6bea1ptDdi0MJBuPrLq1FaV3rKbR+94FGsu3kdRnQb4aHqiKg9pUVZYLc6JiM73VMV2RJPQsanCrQlxtcxJEJOKyfgo86v4eBB7Os/AAduHsvOBXRabLSVlKIoUAzamXmwuqEa20u2SzmDqnMcr+zT9LQtOqKHrSYZvULO9URZRCQJRVHQZfYbCJk8GVFPPuG2n3NhXBoAQOgrUVR15KTb7SvLwoEIBc9fE4SA+6a5rR7yDIPOgHCf1k9OpCgKdAovb4k6o7QoCxqODIL/0Sm4LuW6U27rGs9Wwnka2GirLSnBjqdfi+vzTvtECWmDISYGiskEna8PmkpP/YV0ZUMl8qryYLOzcVdWvKolTfh438e45btbMG/XPLVL8bjEsGOTkZ1KxmHH+uQIjmdLRO1LHxCA8Afuh2I8NmmMaOcxbrsEBgNNAQCADQf3nHS7zQX7UOOtYGt8V3S59pp2rYHU4WyYaO2M8kTUOaVF+cNeH4vConh46wJPuW2mxD1tnUNHsNFWG/pGJUEIBQ2iBuX15WqXQx6g6PVIXrUSKWvXwhARccptV+etxpWLr8SUn+SacJ2OYaOtZGq3bkXuhIkoefNNtUtpV87x62S8oXMNj3CKnraVDZXYXPkRvCzbkBQm12QNROR55fPnI+fmm2GrrGzX/foojnHfthWdfAwwe0MIGiv6INzQE4rCoWC0wNVoW3nyz/gvMr/AlYuvxJvbtHV9QySTED8TwvxNEALYW1R1ym1dPW3D5Wu0dd7vJAQkqFwJtYdzokIhGh0Tu8p4LysrfWBgq7arsFbAqDMizj/OvQVRh8VGW8nU79mD2o0bUb9rt9qltKvje+HI9liJs9G2oKIeNdamFrfJLM9GlXkpTOFL0T3S4snyiEgytqNHUfrOu7Du3oOKb79t132HmWMBABlHTj7Dsr4mGZes74aRdT0g7PZ2/fmkDudn/P6jJx/vL+toFvKq8lDdcOqnToioY0uLssDL/3e88/tbKKsra3GbyvpGFFdZAQCJkjXaVlgrUFbv+HuJD4hXtxhqFykRfrA3OMZy3lXKyciouVvOuQUbx2/EAwMeULsUUgmnm5SM36WXQu/nB31wsNqltKtY/1joFT1qm2pRXFuMCN9TP2agJUG+RoT4GlFW04Cc0hr0jAk4YZsthX+M9dsYji5B3h6ukIhkog8MRNx776Jm3ToEjR3brvvuZumG3DKgoCb3pNuU7s7A/dsWoWnfEuC+Ue3680kdzt5kp+ppe2evO3FZ7GUIMgV5qiwicoO0KH9sbvoBP5eVYN+Ry3Ch94UnbOPsZRthMcFi1s4cHa1xoPIAACDcJxy+Bj49pwX+ZgO8EYVG7MOOokygp9oVkSc0lZej5NXX0FhYiLh33znltnqdHr46/nuXFRttJWPs0gXGLl3ULqPdGfQGxPrH4kDlAeyv2C9Voy3g6G1bVlOOrOLqFhttfz/s+NY2QB8DnY6PCxORe5m7d4e5e3fXa2G3A3Y7FK+zu+zoEZ6EVWVAha2gxfUNtgbklR3EttAkpCVGcngEjYi3xAMAimqKUNtYCx+DzwnbBJmDcF7keR6ujIjaW1qkBU27eyJI34BAU2CL28g8NILziQOOZ6stUT5xOAgg6xRPlJC26Ly9cfTzzwG7HU0lJfAKC1O7JOqgODwCaYbME5Ukhp96MjJn76Ro364eq4mICACEzYbCRx9D4SOPnPVwBefHpAIAmnQlqGu0nrD+95Ld2Nb9XTwz8SgiX3/jrH4WdRyB5kAEmx1PCDl7mRGRNqVFWdBQko6Kg2OQGpTW4jbZEk9C5rym53i22pIclAgAKKo7+ZNEpC06b2+EP/QQYl57FTqfE7+MBoCDlQdxwzc34PGfH/dwddSRsNFWIva6OlT99BOs+7X5DZ7UjbbOychO0mhbXH8QAJASzAs8IvKs+l27UPHVV6j45lvUbd9+Vvs6N7IrhN0IRbFjU/6J477tLj4EYfeCzhaCCIvprH4WdSzO3rbOWdOPl1+Vj1e3vIplOcs8XBURtbeEMF8Y9TpUW5uQf6SuxW1k7mnbK7QXrkm8hk8WaMx50b1QkzMV8fWPqV0KeVDI7ZNgGTECOt+Whz7IPpqNveV7sbd8r4cro46EwyNIxJqVjfx774M+JAQpP69Tu5x215rZpbXKOQmD8yL2eA22BtTZSwAF6BfV/YT1RETu5H3uuYh+/nkoJiN8+vY9q33p9ToE19+IgnKgourE8bkD7X1Qs/cJnNvVm0MjaExCYAK2Fm9t8YvZXWW78P7O93Fu2LkY0W2ECtURUXsx6HVIjvDDrsIyrNz/OyaGnH/CNpmuRlt/T5enuuFdh2N41+Fql0HtrGdUGOz1scg+bFO7FOpAnO0anHRQbmy0lYhobIC5Rw94hYaqXYpbuBptj8rXaOt8POxAaS2abHZ46Y91os+pyAUUO4TNhP4xHB6BiDwv4Oq/NHstmprOeHzb3oFX4GBuAQ4dOXFdbnY+PlsyA5Ux3SDuGgpFrz+jn0EdTzfLyZ+mcS5zbkNEnVtShAG5lsfx8m6B63pvgJ/xWI/a+kYb8o7UOraTsKctaVNyhON3ubS6AeU1DQj2NapcEXmCsNlgzc5GQ24uLMNP/DLGdX3DMaylxuERJOLTrx+6ff4ZYv8zV+1S3MJ5MiuuK0Z1Q8vDBGhVdKA3TF46NNjsJzxKtvmQ43EK0RiOuBDOOklE6rJVVSF34m0oX7jwjN6fGPbHGN4tPFlQvWs3fJqssFir2WCrMacaAsk5ZAJvaoi0oVd0OITN0Yj153/z+0tqIAQQ4G1AqJ9cDVtVDVXYf3Q/Gu2NapdC7czH6IXIiHyYIr7C/B2fq10OeYjt6FHkXDMKh6ZNh72m5oT1vL4hgI22pCEWowWh3o5exLJNVKLXKUgIa3mIhO2HHeM++umiodfxcWEiUlfFN9+gbssWlM56HbajR9v8/i4hCrz8duG3Iz80W95ob8SiqE8x7eY0VN13fztVSx2F84YlryoPNnvzx0cPVBwAwIl5iLTinCgL7FbHTOp/HvYsq+TYeLayDYPza+GvGPXVKNy29Da1SyE3CA4qhjF4A9YXrlW7FPIQr5AQGBMS4N2/H5qOHG22Tgjh+tKK1zdy4/AIpCndArqhtK4UORU56BnaU+1yPCoxzBd7CiuRXVKNYYhwLc864viGLso7Tq3SiIhcgsaOha20FP7Dh0MfGNjm9wf41cI79n84bDfDbn8QOp3j++eco3loMOajsKsRyUMua9+iSXXRftH49C+foqulK/S6Y72o7cLu+qKWPVGItCE1ygJ7Qzjgux/7yrKAxGPrXJOQhck3NEJlQyV8vHzQ1cLhzrSoR0hfZGcWweLDSeZkkrDk2xa/gCqrL0NVQxUUKPw3Lzk22kpCNDUh+8qrYIyNRcyrr0Bvsahdklt0s3TD7yW/o7KhUu1SPC7xj4vX7JLmPW0P1x0EACQF8Rs6IlKfoigImzat2TJhs7V6OIPzY1Ngq+sCe0MoDlVUITYoAACwKX+PY4PGcEQHnjhJGXVuOkWHtJC0E5YfrjmMuqY6eOm8EOMfo0JlRNTegn2N8FWi0AhgV0lWs3XZxcd62srm2uRrMSZpDOqa6k6/MXU6F3bpg0U/K6jyDVa7FPKgkz0x4OxlG+MXA5Pe5MmSqIPh8AiSaMzPR+PBg6jdsgU6P+1e5Dw44EH8Ov5XjE8br3YpHue8eM0uOTYejhACVfYCAEDviGRV6iIiOpWG3FzsHzUKtVt/a9X2FrM3Qiv/jvqCm5Ff3uRa/nv+boxdZcOQDCNgt7urXOpgnOO9xfnHwaAzqFwNEbWXeEs8gBaGR5C40RZwNPD4GHzULoPcIDncHwCQcbgKQgiVqyFP+/Mx5yRk5MRGW0l4RUWh68KFiH7xBSg67R52H4MPdIp2851K4nFj2jpP+jUNtbBVp8BWH4nz47qrWR4RUYtK58xBQ1Y2Dj//fKtvUlyTkR33ZEF1xk6M2SAwcfkBQMOfczLbXrIdT214CvN2znMt43hvRNrUMzwFAHCkodA18VaTzY79pXI32pJ2JYX7QedVjSrsw87iA2qXQx5iq6hA7oSJyLrkUoimY50ReH1DTryrkYTOZIJPv76wXHGF2qWQm3QL9YWiABV1jSiraQAAlFcrqMkfj6a8B5AYGqByhUREJ4qcMQOBN9yA2DlvtnpSGcfNug27Dxe6lhXYi/FjHwUH+veSbnIaWRRUF2BRxiKsyFvhWsaeKETa1De6K4TdCAEb8qvyAQAHy2vRaBPwNugRI9kwOIdrDmP0l6Px0OqH2AtTo8wGPYLivoVP17exeO9StcshD9H5+6N+5040lZSg4cAB13Je35ATG21Jcx5d9yiu+/o65Fbmql2KR3kbj13AOsf7yjhcBcDRC1evYyMGEXU8Oh8fRD39FLxCQ13LxGmGN2g0bYdf98exsvwVx/ZCICuoDO+M1MM6ddop30udV8/Qnvhrr7/ilrRbXMucj07zpoZIW3pGB8JuDQMA7D/q+HfuHBohIcwXOsmua3Mqc5BdkY195fv4xaSGhZkcE0fvLctWuRLyFEWnQ/TMl9Hti8Uwdj024RgbbcmJjbaSqPjmG1StWgVbdfXpN+7kMo5kIONIBrKPyvdh53xULOuPR4Z3Fh0GIJDMR8iIqJOo2bABOdddj6aSkpNu0z0sBorOhhrh6GlbWF0MoauDEAouiE3xVKnkYbH+sZjWbxquiD/21ND+o44xbXlTQ6Qt3UJ9gUZHo+1vRfsAHLu+lXFoBOe5Lj4gXt1CyK0SAx2fZQW1cnU+kp3/5ZfDnJYGxeAYm7+uqQ4FNY55aXh9Q2y0lYAQAkVPP4P8KXej8dAhtctxu3v73os3h76JfuH91C7F45zj2mYXOyYj++LQs/DrPgNGy241yyIiahXR1ISiZ56Fdc8elM79z0m3uzAuzbG9/iiKqyvwa94eBFQLoDEE8cEcCkYWFdYKlNWXAeBNDZHWeOl1CDbGAgB2FmcCOG4SsjD5Gm05vqUceoY7Jo6uaCpQuRJS29ODn8Zd596FIHOQ2qWQyrzULoDcT1it8B18IRpyDsAYH692OW53SZdL1C5BNa5G2z96IlQ0FUHRNSAlNFrNsoiIWkXx8kLs3LdQ9u57CP/H30+6XdegMMDmB+irsSFvL/bt2Yx33rChzL8Myu12AHrPFU0eVVpXiswjmQjzDoPFZMHI+JE4aj0KX4Ov2qURUTvrZumGbQ3AwaoDAI4N/yVjT1sOBSOHQbGpmJMB2JQqHKk7giBvNtjJwN7QgOrVq9GQnY2Qu+6Ct5c3RieNVrss6iDYaCsBndmMLq++qnYZ5AHOi9jskmrY7QLW/X+HVSnGJennqlwZEVHrGGNjEfXkE82WCSFOGMPPB9GoRQa2Fe2DNXsP7ArQ4G2GomeDrZa99/t7+GDPB5hwzgT833n/hxcvfVHtkojITXqFJ2NbPnCksQB2ux3ZJY4nyaRstOX4llI4JzIM9sZA6AxHsaUwA8MSzle7JPIEIXDob/cDNhsCRo+GITJS7YqoA+HwCKQ5VpsVyw4swzs73pFudtXEMEdPo0NH65BVUo26RsDLFomkMH5LS0SdU/nChcifei9EY2Oz5aHmLgCAjPL9WBdZiwkP6rH8juvUKJE8yNlg4WzAICLtGhSXhprs++FT9BQOV1lRbW2CXqega4hcPetrGmtQXFsMAIi3xKtbDLmV0UsHs3A02G06tFflashTdCYT/IcPR8C110I02bD+0HpsLNyI6gbtz0dEp8eethIQdjsUnVzt839f/XcICIxJHoNQ79DTv0Ejgn2NCPQx4GhtI77fWQTAMcOul16u409E2tBYVITiF1+CqK9HxZIlCBw92rUu3tINB8uAQzW5qLQdgs2goFvaeeoVSx7hbLTdX7EfZXVlCDQFQq9j72oiLeoVEwR7QwQONTRia+5RAEB8iA+MXnJd1x6oOAAACDGHIMDEcdu1LtQUiwKxF3vLstQuhTyoy2vHnoye+fU0ZBzJwJtD35R66EdykOsTT1J5d9+NrOFXoHrNGrVL8QiT3oQYvxgA8vXEURTFNa7tosxPYY76FCFhcv0dEJF2GCIj0eX1WQi95x4EjBrVbN05YUkAgKON+agvvgLW0sswuGsPNcokD3JOwlNQXYCxS8bi/IXnY1vxNnWLIiK3CPQxIirADABY8rtjYiYZh0bYX7EfAIdGkIWzN3VeVa66hZBqulq6ItY/Ft0s/DdPbLSVQkNmFhrz8qDz8VG7FI9JCHTc1MnWaAscm1G3uPF3GAK3ws/3iMoVERGdOb9LLkHYtPtcY9oKISCEwPkxqQAAr6ZiPPD9Dty4wYCU4BA1SyUPCDYHw2K0QECgsKYQVpsVUb5RapdFRG4SE1UAc9QirCpcDEDORluOZyuXnuEpAIAjTfkqV0JqsFVU4JXLXsF3136HWEus2uVQB8BGWwnEf/oJ4hbMhzktTe1SPMb5rZSMjbaJ4Y5xvnSmEgDAOeGJapZDRNRuhM2GoieeROlbb6F3VDcIuwFxpTZcXPA7/rJ/PfQGjvqkdYqiuBouXrj4BXw35juE+4SrXBURuUtQQCUMgVsgvPcAkLvR1vmkAWnbBbGOL6UblVLUN1pVroY8xV5Xh8zLhyDj/Atgq6pSuxzqQNhoKwGv0FD4DhwIna88g/bLPFGJY3gEG3TGMgDAwBh5GuuJSNtqfv4ZRz/5BKWz34R9/34Y7REotwD/PT8BOwcPU7s88hDnZ3xuZS5iLbGuXthEpD0XRPeHtWQYGo4MBgAkhfmrXJHnsaetXHpHxULYzFAUgc0FmWqXQx6i8/YG/ricsWZlq1sMdSgdotH2zTffRHx8PMxmM84//3xs3LjxlNsvWrQIqampMJvN6NWrF7777jsPVdo5lLwxGyVz5rS8bs4clLwx28MVec6cbXMwd/vcFhtt526fiznbWv570YJbP38at33xLJLC/aAYy6EoNgi7Ef1j4nHbF8/i1s+fVrtEIqKz4nfJJQj723TsSYvB/FmPItAQgzKLgqVDDmLT8GAAwNv33oS377lB5UrJHZyf8c7eZjJ9xhPJxnlde2m3nmgoHQZbtaP3YWK4rxTXtc7zXZO9Cbl/jG3qvL/h+U67bv38aUz+5nkYRSSEzYzfi/Jc67T+e+/8N98SrWd/+54b8Pa9NyHu7bfx+ZvX4op992JRxiLHOo1f135z/5P45qGWj+03Dz2Nb+5/0sMVdTyqN9p+8skneOCBBzBjxgxs3boVvXv3Rnp6OoqLi1vcfv369Rg7dizuuOMO/Pbbbxg9ejRGjx6NnTt3erjyDkyvQ+nrb6BkzhxUrVqF8gX/Q31GBkrmzEHp628AetUPu9voFB3e3PYm1uQ7Jl0rqClAbWMt5m6fize3vQmdot3sep0OWyo/xuOrX4PJXAoAMNojMPmbF7Cl8mPoddrNTkTyCJ0yBaVRQbj4px0YuiYDEAIA0DMsBW/fexMu/mkHwPOdJjk/4/eV7wMALD2wFFsOb5HiM55INs7r2qfXzYLJy/FvOybQG/cskeO61nm+e3v723hm8DOY0nsKIn0jeb7TOOfvvakxFdUZM6DUO8a3ve2LZzX/e+/M/ueGWxmyQ6fDxT/twPzXHkFmQz4qrBXw8fKR47pWr0PStwtPaLj95qGnkfTtQk23XbWWIsQfdzsqOf/883Heeedh9mxH70+73Y7Y2Fjcd999ePjhh0/Y/qabbkJNTQ2+/fZb17ILLrgAffr0wdy5c0/78yorKxEQEICKigpYLJb2C9LBOBtoTcnJsGZmwveii1Czbh1Cp92HsHvuUbs8t3JezJj1ZtTb6nFe5HnYVLQJl3a5FFN6T0HP0J4AgAprBb4/8D0MOgPGJI9xvX/lwZUoqStp089MCEjAgMgBAACrzYqvsr4CANyQcoPrsc31BeuRX9W2AeVj/GIwOGaw6/Wn+z4FAFydeDW8vbwBAFsPb0XW0SwAwPxtS3GwfjOUhkgIYxG8mqLR5FWA/pabMW/MI2362UREHZnzQrZRp+DFATfjQr+duHzF71g77FxMnv2J2uWRmzg/453S49Px/YHvMbXPVEzpPUXFyoiovTkba7yqLkN1dSD8QrahyXgAceYBmNhn5Cnfa/Yy45rEa1yvlx9cjrK6MlwYfSG6+HcB4Bhi5dfCX9tc143db3T9/7pD61BQXYD+Ef2RGOiYR6KopsjVgaQtRieNhlFvBABsKtqE+bvmY3X+atf5zXn+4/lO25y/99aS4Ui2nItK4084qmxHjBiNW1LvRG1TBfZV/9zm/ab5XwKz3jEmdEHdPhy2ZiPMGI8uPucAABrs9dhVuaLN+032GwQ/ryAAQHF9Dg7V70GQIRrxvn0AAHZhw/aK71u1r41FP+Oosh2BojduSrgP3+7/FoeULxEhhqF3ZAr8vUKR5DfQtf2Oih9gE01tqjfWuydCTXEAgMrGEmTXbIK33oJU/4tc2+yuXA2rvaZN+40ypyDSnAQArmNkUEzoGTDUtU1G1XrU2I62+P6wzz/DqLUF+OQiL+SH2XH57hD021eC7y4+BxF3vALLjs0wlh5GdfeeqI/pCgDQV1chaOMaCIMBZRdf4dqX/66tMB0uRE1yGupiHU8n6epqELxhFYReh7JLj50//fbugLkgDzUJKaiLTwYAKA1WhKz7CQBQOuQq17a+mbvgnXcAtV0TUZvoePoBTU0IXeM4vqWXpANejvklfLL3wic3G3Wx8ahJ7uHYVgiErnQ8JV920TAIo8lR26wX0fe3Ffi91yVoePBf8P90PlK++whZfxmHq19+rE3HoTNpbdukqjN2NDQ0YMuWLfjnP//pWqbT6TBs2DBs2LChxfds2LABDzzwQLNl6enp+PLLL1vc3mq1wmo9NoB3ZWXl2RfeCTgbZktffwNQFGkabAG4LmKcN3WbijYBAFbnr0ZqcKqr0basrgxP//I0AkwBzRptP9jzATYWnXqIjj+7MeVGV6NtXWMdnv7F8U3R9SnXQ/ljcJrFmYvx/YHWfWA5DYsb1qzR9plfnoGAwJC4Ia5G22UHluGjvR81e58wFgEAG2yJSLMmz/4ES6+4GPEHS/GvjR9BAdhgK4EpvafALux4a/tbAMAGWyINmzfmEdz2BbAFH8PsDzibZg7Wb8bTv2w+5XtDvUObNdrO3zUfvxX/htcue83VaLurdJfrmr219Iq+WaPton2LsCJvBR674DFXo21ORU6b9ws4voRyNtp+l/MdVuevxvmR5zt63O54G432Rp7vJDBvzCO45oMG5IQtRq74CYoi0FSdhL15F+DRvTuhMxXAN6Htw2Ms/tkHojEUAGAMWwZT6Co0lA+G9bAdAKB4VcAvue37/eJXAXudoxHRELQO5shv0VjRB/UFzqamJvintXK/f4zpelTZjrn7J0NR7LCWDMeBhmAcVuagqToZdXk+rs39Ut6Goq9vU731hdei8aij4VfvmwGfuPdhq49CbU6gaxvfxPdd88O0lrU4HQ1llwOA6xjZGy34eGWEaxufrh9A73Ow5R1cBFiFgpvWNcEOQIcSfNg/GR+E3A58uROP//IRBhXtwqw+12NZ/AUAgLjKIvxnxRs4avTF3WXRrl39ffMiXJ7/G+b2GoWvEi8GAETUlGHej2+gXm/A3UdiXdtO++1zjMz9FfPSRuKT7o4G5gBrFT5e+gYAYEplV9e2d+34EqP3r8PHKUMx/xzHGdnUZMWX3zq2nV4eBauX4xw2cfd3uDljBb5IvBhv9/qjn6gQWPqVY9t/lISgwuT4EuHmeiP6Auj1+xo03v4zDHab5hts20LVRtvS0lLYbDZEREQ0Wx4REYG9e/e2+J6ioqIWty8qKmpx++eeew5PPinnOBhh99yDsrfmQjQ2QjEYpGiwdZrSewr+s/0/aBJNUKBgSNwQAM1nXfUx+GBo3FD4ePk0e2/f8L7wN7ZtkoPuwd1d/2/QGzA0bugJ2/QK7YUme9u+CewV1qvZ66FxQyEgXBd0ANA9qPsJP++n3BVQFAEh9GywJSLNSv92OXb37g29ABr1YIOtJO7pcw/e3vE2bMIGg87ABgwiDZs35hH0nLfIMVeDUDCs65BWve/P1/L9I/oj2ByMUJ9Q17II34gWr9lP5c/DEpwbdi4URUGMX4xrWbA5uM37BQCDzuD6/7TgNAyNG4qR3UZia/FWNNobeb6TyGc3z0C//30FRef4ve/hn46QHo42kHqhwwHRr837HJAcDaPi6BFbIpJRJioRFJKCiFDHfhuFL7LPYL99usXCR3Hso1wkolj0gyUwBdFBjmV20YSMNu63TPwGRbFD2PW4PGIcKsQ+FIp+8PWPRWyPY21BGfY+sKOhTfuOjE5AYIxjHzXCijzRDyZzKLodt9/99t5owNE27TcsIgkhkc2PkZfBF0nH7TfX3hN1CD3ZLrDiYuC6nzdCLwC7ApSMeBzpf6yzHz0HWQHeiE1LQHqCY5/+FXpkHRqABpMZ6cf9HF11GrJ89YhKS0R6smO5T7UZWQcHwKb3aratqS4VWWYbQs9JRnqqY7mx3h9Z+x0d0o7f1rexO7IM9QhITXYt1zc1ICvTse3QHpGweTnOY4H2FGQplfBN6n5sH0Iga69j20vOiUKD2dEOE6JLRpa9FN0ytsJgt6FRp2eD7XFUHR6hoKAAMTExWL9+PQYNGuRa/ve//x2rV6/Gr7+e+LiK0WjE/PnzMXbsWNeyOXPm4Mknn8Thw4dP2L6lnraxsbGaHx4BODZEgmIwQDQ2StPTFjj2+KRBZ5DuW2nnIzXCroeis7GnLRFplmuIBD1gsLGnrSxk/ownko3s17U838lJ5t97mbPLfF3rHMO2UaeXpqdta4dHUHVU39DQUOj1+hMaWw8fPozIyMgW3xMZGdmm7U0mEywWS7M/MnA22IZOuw+pv+9A6LT7XJOTad3x4z1tvXUrpvaZije3vYm5208/5nFn5/yQ62+5GTsnbUN/y80tDuhORNTZOS9s1w47F+fu2oO1w87FxT/twNv33qR2aeRGMn/GE8lG9utanu/kJPPvvczZZb6udTbYZv1lHM7dvRNZfxnX4uRkslK10dZoNKJ///5Yvny5a5ndbsfy5cub9bw93qBBg5ptDwA//vjjSbeX0fENts6etWH33CNFw21LA/RP6T1Fiouc4z/knN9GzhvziFQfdkQkh+MvbJ09ECbP/kSqC1wZyfwZTyQb2a9reb6Tk8y/9zJnl/m69vgGW2fP2qtffowNt8dRdUxbAHjggQcwceJEDBgwAAMHDsRrr72GmpoaTJo0CQAwYcIExMTE4LnnngMATJ8+HZdeeilmzpyJq666Ch9//DE2b96Mt99+W80YHYvN3uJQCK7XNrsKRXmGXdhbfGzI+doutJvdZre3+PiIcxIHm1272YlIMnZ7i4+MTZ79iePCluc7TZL5M55INrJf1/J8JyeZf+9lzi71da3N3uJQCFe//Bi++WO97FQd09Zp9uzZeOmll1BUVIQ+ffrg9ddfx/nnnw8AuOyyyxAfH4958+a5tl+0aBEeffRRHDhwAMnJyXjxxRdx5ZVXtupntXbcCCIiIiIiIiIiIqL21Nq2yQ7RaOtJbLQlIiIiIiIiIiIiNXSKiciIiIiIiIiIiIiIqDk22hIRERERERERERF1IGy0JSIiIiIiIiIiIupA2GhLRERERERERERE1IGw0ZaIiIiIiIiIiIioA2GjLREREREREREREVEHwkZbIiIiIiIiIiIiog6EjbZEREREREREREREHQgbbYmIiIiIiIiIiIg6EDbaEhEREREREREREXUgbLQlIiIiIiIiIiIi6kDYaEtERERERERERETUgXipXYCnCSEAAJWVlSpXQkRERERERERERDJxtkk62yhPRrpG26qqKgBAbGysypUQERERERERERGRjKqqqhAQEHDS9Yo4XbOuxtjtdhQUFMDf3x+KoqhdjttVVlYiNjYWeXl5sFgsapfjUcwuZ3ZA7vzMzuzMLg9mZ3Zmlwezy5kdkDs/szM7s8tDtuxCCFRVVSE6Oho63clHrpWup61Op0OXLl3ULsPjLBaLFL/4LWF2ObMDcudndmaXDbMzu2yYndllI3N2QO78zM7ssmF2ObKfqoetEyciIyIiIiIiIiIiIupA2GhLRERERERERERE1IGw0VbjTCYTZsyYAZPJpHYpHsfscmYH5M7P7MwuG2ZndtkwO7PLRubsgNz5mZ3ZZcPscmY/FekmIiMiIiIiIiIiIiLqyNjTloiIiIiIiIiIiKgDYaMtERERERERERERUQfCRlsiIiIiIiIiIiKiDoSNtkREREREREREREQdCBttiUhzVq1ahbq6OrXLICIiIqJ2kJOTg6amJrXLIA/jMZeXEELtEog6BDbakmZs374dzzzzDObMmYPS0tJm6yorK3H77berVJn7vfvuu5g4cSL++9//AgA++eQTpKWlISEhATNmzFC5Os+74oorcODAAbXLcKvi4uJmr7dt24aJEydi8ODBuP7667Fq1Sp1ClOJ1WpFdnY2rFar2qW4Va9evfD0008jLy9P7VI6jMOHD6OoqEjtMjzGZrPh8OHDKCkpUbsUj9q9ezfuuece9O3bF1FRUYiKikLfvn1xzz33YPfu3WqXp5rs7GwMGTJE7TLcprCwEB988AG+++47NDQ0NFtXU1ODp556SqXK3O/HH3/EjBkzsGLFCgDAmjVrMHLkSAwZMsR1vSeT7t27IzMzU+0yPKqgoAAzZszA+PHj8dBDD2Hv3r1ql+Q2y5Ytw++//w4AsNvtePrppxETEwOTyYQuXbrg+eef12wj3tVXX43//e9/UnY4sVqteOihh3DJJZfghRdeAAA888wz8PPzg7+/P8aNG4fKykqVq3Sf7du3Y8KECUhISIC3tzd8fX3Rq1cvPPbYY5rO7VRaWooXX3wRY8aMwaBBgzBo0CCMGTMGL730knTXuaeiCK2e/egEe/bswVVXXYX9+/erXUq7++GHH3D11VcjOTkZVVVVqKmpwaJFi3D55ZcDcNzUR0dHw2azqVxp+3vttdfw6KOPIj09HRs2bMDUqVPx6quv4v7774fNZsPMmTPx0ksvYfLkyWqX2u769evX4vJt27YhNTUVZrMZALB161ZPluURer0ehYWFCA8Px/r163HZZZfhwgsvxMCBA7Ft2zasXLkSy5cvxyWXXKJ2qe1u3rx56N69OwYNGoT6+npMnToV8+fPhxACOp0Od9xxB2bNmgWTyaR2qe1Op9MhODgYR48exbBhw/DXv/4Vo0aNgpeXl9qluV15eTkmT56MjRs34qqrrsLs2bNx11134f3334eiKDj//PPx+eefIyoqSu1S3WLJkiV44YUXsHHjRjQ2NgIA/P39cfXVV+PZZ59FXFycyhW6z9KlSzF69Gj069cP6enpiIiIAOD4bP/xxx+xZcsWfPXVV0hPT1e5Us/bvn07+vXrp8nrm02bNuGKK66A3W5HY2MjYmJi8OWXX6JHjx4AtH1t98EHH2DSpEk499xzkZGRgTfeeAP3338/rr/+etjtdnzwwQf48MMPcf3116tdaru79tprW1z+1VdfYciQIfD39wcALF682JNleYSPjw9yc3MRFhaG3bt348ILL0RYWBj69u2L33//HQcPHsSGDRtw7rnnql1qu0tNTcU777yDiy++GM899xxmzpyJRx55BGlpadi3bx+ee+453H///fjHP/6hdqntTqfTQa/Xw9fXF2PHjsWdd96J/v37q12WRzzwwAP45JNPMHbsWHz33Xe4/PLL8e233+Lf//43dDodHn/8cYwcORKvv/662qW2u++//x5jxozBlVdeCW9vbyxevBi33347fH198fnnn0MIgXXr1iEyMlLtUt1i06ZNSE9Ph4+PD4YNG9bs2m758uWora3F999/jwEDBqhcqfrYaCsRLV/YX3jhhbj88svx7LPPQgiBl156CU8//TQWLVqEESNGaPrCPi0tDY899hjGjRuH3377DQMHDsTcuXNxxx13AADee+89vPXWW9i8ebPKlbY/g8GAYcOG4YILLnAtE0Lg6aefxpQpUxAeHg4AmuxtrNPpUFRUhPDwcFxxxRWIjY3Fe++951r/t7/9Db///juWL1+uYpXukZCQgI8++gjnn38+/u///g+fffYZXnnlFdeF/d///neMGjUKL774otqltjudTof8/Hxs3LgR77//PpYuXYqgoCBMmDABd9xxB9LS0tQu0W3uuOMObNy4EXfddRc+++wzBAYGIicnB3PmzIFOp8P06dORlpaG+fPnq11qu/vf//6HqVOnYvLkyTCbzXjvvfdw2223oWvXrvj444+xa9curF+/HsnJyWqX6ha9e/fGqFGjTtqr8oknnsDixYuxY8cOD1fmfqe7UT106BBefvllTV7fDB8+HLGxsXj33XdRU1ODf/zjH/j000/x448/om/fvpq+tuvbty8mTZqEadOmYfny5a4vZ+6//34AwMyZM/HFF19g3bp1Klfa/nQ6HS655BJ069at2fIFCxbgmmuuQWBgIABosrfx8dd2o0ePht1ux+LFi+Hl5QW73Y7x48ejuroa33zzjdqltjuz2YyMjAzExcWhV69eePzxx3HDDTe41i9ZsgR/+9vfNNnbWqfTYefOnfjhhx/w/vvvY9euXejVqxfuvPNOjB8/HkFBQWqX6DZxcXF4//33MWzYMOzfvx/JyclYvHgxRo0aBcDxxMFf//pXTT5B2bdvX9x1112YMmUKAEfWadOmYc+ePWhsbMTIkSMRGxuryXMdAFxwwQXo3bs35s6dC0VRmq0TQmDKlCnYsWMHNmzYoFKFHYggzbj//vtP+eeWW24ROp1O7TLdwmKxiKysrGbLPvzwQ+Hr6yu++eYbUVRUpNns3t7eIjc31/XaZDKJnTt3ul5nZmaKwMBANUpzu3Xr1onExETx+OOPC5vN5lru5eUldu3apWJl7qcoijh8+LAQQoioqCixYcOGZut37twpQkND1SjN7Uwmk+t3PiUlRSxdurTZ+tWrV4u4uDg1SnO744+7EEIUFBSIf//73yI5OVnodDoxaNAg8d5776lYoftERUWJn3/+WQghRFFRkVAURfzwww+u9evWrRMxMTFqledWqamp4uOPP3a93rRpk+jSpYuw2+1CCCFuuukmMWbMGLXKczuz2Sz27t170vV79+4VZrPZgxV5jqIoIjo6WsTHx7f4Jzo6WrPXN0FBQWLfvn3Nlj333HMiKChIbNy4UdPXdr6+vmL//v2u1waDQWzfvt31es+ePSIkJESN0tzuo48+El26dBHvv/9+s+WyXdvFxsaKNWvWNFu/detWERUVpUZpbnf8tWxERITYunVrs/UZGRnC29tbjdLc7s/Xdr/++quYPHmyCAgIEN7e3mLs2LFi+fLlKlboPn++jzUYDM3uY3NycoSPj48apbmd2WwWOTk5rtd2u10YDAZRUFAghBBizZo1IiwsTKXq3M9sNos9e/acdP2ePXs0e23XVhzTVkNmzZqF1atX47fffmvxj5bHQTKZTDh69GizZePGjcO7776Lm266CV988YU6hXmAj48PampqXK/DwsLg5+fXbButDuI/ePBgbNmyBRkZGbjwwguRnZ2tdkkeVVVVhcrKSpjN5hOGAjCbzaitrVWpMveKjIx0HeuamhqEhoY2Wx8WFoaysjI1SnO7P38THRUVhX/+85/IyMjA8uXLkZiYiGnTpqlUnXtVVFQgJiYGABAREQEvL69mQyFER0ef8DmgFbm5uTj//PNdrwcMGICioiIUFhYCcDxeuHLlSrXKc7v4+HgsWbLkpOuXLFmCrl27erAiz+natSteffVV5OTktPjnVH8vWlBfX9/s9cMPP4x//etfuOKKK7B+/XqVqnI/g8HQbAxfk8nU7NrOZDJpdvzLm2++GWvXrsV7772H6667DkeOHFG7JI9RFMX1Oa/T6RAQENBsfWBgoGb/PsaMGYNnn30WNpsNo0aNwpw5c5qNYfvGG2+gT58+6hXoQQMHDsR//vMfFBQUYM6cOcjLy8Pw4cPVLsst4uLiXD0pN23aBEVRsHHjRtf6X3/91XXtpzUxMTHYt2+f63V2djbsdjtCQkIAAF26dEF1dbVa5bldZGRks2P9Zxs3bnQNmSA77Q+CJ5GkpCTcf//9uOWWW1pcv23bNs2Oj9OnTx+sXLnyhHw333wzhBCYOHGiSpW5X2pqKnbs2OF6LPrPExTt3bsX8fHxKlTmGQEBAfjoo4/w3//+FxdddBGefPLJExq2tColJQWA4xGSzZs3o2/fvq51u3btQnR0tFqludX48ePxyCOP4LvvvsOtt96Kp556CgsXLoSfnx9qa2vxxBNPYPDgwWqX6RbiFCMaXXbZZbjssss0O3FBcnIyvv32W0ydOhVLly6F2WzGDz/8gJ49ewJwjA3258dptSI+Ph6bN292ncu3bt0KnU7nupgNDg52jXOrRU899RTGjRuHVatWtTju2bJly7Bw4UKVq3SP/v37Y8uWLbjxxhtbXK8oimYn5+nZsyfWr19/wvidDz30EOx2O8aOHatSZe6XlJSEvXv3onv37gAcw2A4x3IFHDf3Xbp0Uas8t4uPj8eaNWvw5JNPonfv3njnnXekuLYTQiAlJQWKoqC6uho7duxo9vuflZWl2fEt//3vf2PYsGFITU3FoEGDsGjRIvz4449ISUlBVlYWysvL8f3336tdpkf5+Pjgtttuw2233YaMjAy1y3GLKVOm4LbbbsO7776LLVu24OWXX8a//vUv7N27FzqdDm+99RYefPBBtct0iwkTJuDOO+/EI488ApPJhFdeeQXXXHMNjEYjAEfbjVavawHHZ/nkyZOxZcsWDB069IRru3feeQcvv/yyylV2DGy01ZABAwZgy5YtJ2201fKF/d133401a9a0uG7s2LEQQuCdd97xcFWe8cILL8DX1/ek6w8ePIi77rrLgxWpY9KkSbjoooswfvx4zfYsPt6fe9X9efKlnJwcTU4+BzjGKN65cycSEhIwYMAArF27FhEREYiJiUFBQQFCQkLw448/ql2mW0ycOBHe3t6n3MZisXioGs/6v//7P0ycOBGvvfYa8vLy8MEHH2D69On49ddfodPpsHjxYrzyyitql+kWU6dOxZ133olNmzbBbDbj3Xffxa233gq9Xg/A0RPF+SWOFt1www2IiYnB66+/jpkzZ6KoqAiAo5fGoEGDsGrVKgwaNEjlKt3jqaeeOuVTE+eccw5ycnI8WJHnTJgwAatXr3aN93e8v//97xBCYO7cuSpU5n7/+te/mo1j+efz+ubNm0/akK8VOp0OTz75JIYPH44JEyZocuziP/vz2JVJSUnNXv/yyy8YM2aMJ0vymICAAKxfvx7vvfcevvnmG8THx8Nut6OhoQFjx47F3XffrdkvKi699FJXQ93JaPUz/m9/+xvCw8OxYcMG3H777Rg7dqxrTOPa2lrcf//9eOSRR9Qu0y3+9a9/oaamBk8//TSsVivS09Mxa9Ys1/qYmBi89dZbKlboXlOnTkVoaCheffVVzJkzx3WO1+v16N+/P+bNm6f5z7nW4kRkGlJUVASr1arZRwSJWsNut6OqqgoWi0WKXhkyW7ZsGb755hvs378fdrsdUVFRGDx4MMaNG3fKLzKo8/r555/xyy+/YNCgQbjwwguxe/duPP/886itrcXVV1+t6acq3nrrLXzwwQeuC/vHHnsMZrMZAJCZmQmbzYbU1FSVqyQian/V1dXIzs5GWlraaRu3iIioc2lsbERpaSkAIDQ0FAaDQeWKOhY22hIRERERERERERF1IJyIjIiIiIg6rT179iAhIUHtMlTB7MwuG2ZndtkwO7Nr1fbt2/HMM89gzpw5rp62TpWVlbj99ttVqqxjYaMtEREREXVaDQ0NyM3NVbsMVTA7s8uG2ZldNszO7Fr0ww8/YODAgfj444/xwgsvIDU1tdmcLXV1dZg/f76KFXYcnIiMiIiIiDqsBx544JTrS0pKPFSJ5zH7yTG7NjH7yTG7NjH7yTG7dj3xxBN46KGH8Oyzz0IIgZdeegnXXHMNFi1ahBEjRqhdXofCMW2JiIiIqMPS6/Xo06cPLBZLi+urq6uxdetWTc4uz+zM3hJmZ3atYXZmbwmzazM7AAQEBGDr1q1ITEx0LVu4cCEmT56Mjz/+GOeddx6io6M1m78t2NOWNMVms2HevHlYvnw5iouLYbfbm61fsWKFSpW5H7Mzu2zZZcbjTjJJSkrC/fffj1tuuaXF9du2bUP//v09XJVnMDuzt4TZmV1rmJ3ZW8Ls2swOACaTCUePHm22bNy4cdDpdLjpppswc+ZMdQrrgNhoq0Ey38xPnz4d8+bNw1VXXYWePXtCURS1S/IYZmd22bLzXMfjLttxlzX7gAEDsGXLlpPe2CiKAq0+OMbszN4SZmd2rWF2Zm8Js2szOwD06dMHK1euPKFh+uabb4YQAhMnTlSpso6HwyNo0L333uu6mY+KijrhZv7VV19VqTL3Cw0NxYIFC3DllVeqXYrHMTuzy4bnOh532Y67rNmLiopgtVrRtWtXtUvxOGZndtkwO7PLhtmZXUZffPEF1qxZc9Jr14ULF+Kdd95pNjmZrNhoq0Ey38xHR0dj1apVSElJUbsUj2N2ZpcNz3U87rKROTsRERERkWx0ahdA7c9oNCIpKUntMlTx4IMPYtasWZp+lOBkmJ3ZZcNzHY+7bGTOTkREREQkG/a01aCZM2di//79mD17tlTjHALAmDFjsHLlSgQHB6NHjx4wGAzN1i9evFilytyP2Zldtuw81/G4y3bcZc5ORERERCQbTkSmQevWrcPKlSuxdOlS6W7mAwMDMWbMGLXLUAWzM7tseK7jcZftuMucnYiIiIhINuxpq0GTJk065fr//ve/HqqEiMh9eK6Tk8zHXebsRERERESyYaMtaVJJSQn27dsHAOjevTvCwsJUrshzmJ3ZZcsuMx53IiIiIiIibeLwCBom4818TU0N7rvvPixYsAB2ux0AoNfrMWHCBLzxxhvw8fFRuUL3YXZmly27E891PO6yHHcnWbPbbDbMmzcPy5cvR3Fxset332nFihUqVeZ+zM7szM7sTsyuTczO7LJlB5i/Ndhoq0Ey38w/8MADWL16Nb755hsMHjwYgGMMwGnTpuHBBx/EW2+9pXKF7sPszC5bdp7reNxlO+4yZweA6dOnY968ebjqqqvQs2dPqSZjY3ZmZ3ZmlwGzMzuzy5MdYP5WEaQ5kydPFgkJCeK7774TFRUVoqKiQixZskQkJiaKKVOmqF2eW4WEhIiVK1eesHzFihUiNDTU8wV5ELOvPGE5s2s7O891K09YzuOu7eMuc3YhHL/3S5YsUbsMVTA7s8uG2ZldNszO7DKSPX9rsKetBn3++ef47LPPcNlll7mWXXnllfD29saNN96o6R5YtbW1iIiIOGF5eHg4amtrVajIc5id2Y8nQ3ae63jcnWQ57jJnBwCj0YikpCS1y1AFszO7bJid2WXD7MwuI9nzt4ZO7QKo/cl8Mz9o0CDMmDED9fX1rmV1dXV48sknMWjQIBUrcz9mZ3YnWbLzXMfjfjwZjrvM2QHgwQcfxKxZsyAknEOX2ZldNszO7LJhdmaXkez5W0MR/NvRnKFDhyIkJAQLFiyA2WwG4LiZnzhxIsrLy/HTTz+pXKH77Ny5E+np6bBarejduzcAYPv27TCbzfj+++/Ro0cPlSt0H2Zndtmy81zH4y7bcZc5OwCMGTMGK1euRHBwMHr06AGDwdBs/eLFi1WqzP2YndmZndmdmF2bmJ3ZZcsOMH9rcHgEDZo1axbS09PRpUuXFm/mtaxnz57IzMzEhx9+iL179wIAxo4di/Hjx8Pb21vl6tyL2Zldtuw81/G4y3bcZc4OAIGBgRgzZozaZaiC2ZldNszO7LJhdmaXkez5W4M9bTWqtra22c18WlqaFDfzRCQXnuvkJPNxlzk7EREREZFM2GhLnd7XX3+NkSNHwmAw4Ouvvz7lttdcc42HqvIMZmd22bLLjMedyKGkpAT79u0DAHTv3h1hYWEqV+Q5zM7szM7sMmB2Zmd2ebIDzH8qbLTVCJlv5nU6HYqKihAeHg6d7uRz6ymKApvN5sHK3I/ZmV227DzX8bjLdtxlzv5nNTU1uO+++7BgwQLY7XYAgF6vx4QJE/DGG2/Ax8dH5Qrdh9mZndmZndmZXauYXc7sAPO3iiBNUBRFHD582PX/J/uj0+lUrpSI6MzxXCcnmY+7zNn/bPLkySIhIUF89913oqKiQlRUVIglS5aIxMREMWXKFLXLcytmZ3ZmZ3ZmZ3atYnY5swvB/K3BRlvSlPnz54v6+voTllutVjF//nwVKvIcZmf248mQXWY87iSjkJAQsXLlyhOWr1ixQoSGhnq+IA9i9pUnLGd2ZtcqZl95wnJmZ3atkjm7EMzfGid/vpI6rQULFsBqtZ6wvKGhAQsWLFChIs+ZNGkSKioqTlheVVWFSZMmqVCR5zA7sx9Phuw81/G4H0+G4y5zdsAxCVtERMQJy8PDw1FbW6tCRZ7D7Mx+PGZndq1idmY/HrNrOzvA/K3BRlsNkvlmXggBRVFOWJ6fn4+AgAAVKvIcZmf248mQnec6HvfjyXDcZc4OAIMGDcKMGTNQX1/vWlZXV4cnn3wSgwYNUrEy92N2ZndidmbXMmZndidm1352gPlbw0vtAqj9yXgz37dvXyiKAkVRMHToUHh5HfvVttlsyMnJwYgRI1Ss0H2Yndlly+7Ecx2P+/G0fNydZM4OALNmzUJ6ejq6dOmC3r17AwC2b98Os9mM77//XuXq3IvZmZ3ZmZ3ZmV2rmF3O7ADztwYbbTVE5pv50aNHAwC2bduG9PR0+Pn5udYZjUbEx8fjuuuuU6k692J2ZpctO891PO6yHXeZsx+vZ8+eyMzMxIcffoi9e/cCAMaOHYvx48fD29tb5erci9mZndmZndmZXauYXc7sAPO3hiKEEGoXQe3jySefdP33wQcfPOnNvNFoVKtEt5s/fz5uvvlmmEwmtUvxOGZndlnwXMfjLttxlzk7EREREZGs2GirQTLezDvl5eVBURR06dIFALBx40YsXLgQ55xzDiZPnqxyde7F7MwOyJWd5zoed9nImP3rr7/GyJEjYTAY8PXXX59y22uuucZDVXkGszM7s58cs2sHszM7s5+c1rIDzN9WbLTVIJlv5i+++GJMnjwZt956K4qKipCSkuLqcn/ffffh8ccfV7tEt2F2ZpctO891PO6AXMddxuw6nQ5FRUUIDw+HTnfy+XMVRYHNZvNgZe7H7MzO7C1jdmbXCmZndtmyA8zfVif/G6JOa9y4cVi5ciUAoKioCMOGDcPGjRvxyCOP4KmnnlK5OvfauXMnBg4cCAD49NNP0atXL6xfvx4ffvgh5s2bp25xbsbszC5bdp7reNxlO+4yZrfb7QgPD3f9/8n+aPGintmZndmZndmZndmZXWtkz99WbLTVIJlv5hsbG12Pjf7000+u7vSpqakoLCxUszS3Y3Zmly07z3U87rIdd5mzA8CCBQtgtVpPWN7Q0IAFCxaoUJHnMDuzH4/ZmV2rmJ3Zj8fs2s4OMH+rCNIcX19fkZOTI4QQ4uqrrxbPP/+8EEKI3NxcYTabVazM/QYOHCj+8Y9/iDVr1giz2Sy2bdsmhBBiw4YNIiYmRuXq3IvZmV227DzX8bjLdtxlzi6EEDqdThw+fPiE5aWlpUKn06lQkecwO7Mfj9mZXauYndmPx+zazi4E87cGe9pqUI8ePTB37lysXbsWP/74I0aMGAEAKCgoQEhIiMrVudcLL7yA//znP7jsssswduxY9O7dG4BjsGtn7yStYnZmly07z3U87rIdd5mzA4AQAoqinLA8Pz8fAQEBKlTkOczO7MdjdmbXKmZn9uMxu7azA8zfGl5qF0Dt74UXXsCYMWPw0ksvYeLEiVLdzF922WUoLS1FZWUlgoKCXMsnT54MHx8fFStzP2Zndtmy81zH4y7bcZc1e9++faEoChRFwdChQ+Hldezy1WazIScnx9WArTXMzuzMzuwAszM7s2uNzNkB5m8LNtpqkMw384Dj25otW7YgOzsb48aNg7+/P4xGI7NrHLPLl53nOh532Y67rNlHjx4NANi2bRvS09Ph5+fnWmc0GhEfH4/rrrtOperci9mZndmZHWB2Zmd2rZE5O8D8baEIIYTaRVD7a2pqwqpVq5rdzBcUFMBisTT7B6E1ubm5GDFiBA4ePAir1YqMjAwkJCRg+vTpsFqtmDt3rtolug2zM7ts2QGe63jc5TrugNzZ58+fj5tvvtk1CZ9MmJ3ZZcPszC4bZmd2GcmevzU4pq0G5ebmolevXhg1ahSmTp2KkpISAI7HKh966CGVq3Ov6dOnY8CAAThy5Ai8vb1dy8eMGYPly5erWJn7MTuzy5ad5zoed9mOu8zZAWDIkCGuzACwceNG/O1vf8Pbb7+tYlWewezMDjA7szO7ljE7swNyZQeYv1U8PfMZud+oUaPELbfcIqxWq/Dz8xPZ2dlCCCFWrlwpkpKSVK7OvYKDg8XevXuFEKJZ9pycHOHt7a1maW7H7MwuW3ae63jcZTvuMmcXQoiLLrpILFiwQAghRGFhofD39xeDBg0SoaGh4sknn1S5OvdidmZndmZndmbXKmaXM7sQzN8a7GmrQWvXrsWjjz4Ko9HYbHl8fDwOHTqkUlWeYbfbYbPZTlien58Pf39/FSryHGZn9uPJkJ3nOh7348lw3GXODgA7d+50Tbj26aefolevXli/fj0+/PBDzJs3T93i3IzZmZ3ZmZ3Z56lbnJsxO7PLlh1g/tZgo60GyXwzf8UVV+C1115zvVYUBdXV1ZgxYwauvPJK9QrzAGZ/zfWa2eXIznPda67XPO5yHHeZswNAY2Oja8yzn376Cddccw0AIDU1FYWFhWqW5nbMzuzMzuzMzuxaxexyZgeYvzXYaKtBMt/Mz5w5Ez///DPOOecc1NfXY9y4ca4eSC+88ILa5bkVszO7bNl5ruNxB+Q67jJnB4AePXpg7ty5WLt2LX788UeMGDECAFBQUICQkBCVq3MvZmd2Zmd2Zmd2rWJ2ObMDzN8qao/PQO0vLy9PnHPOOSItLU14eXmJCy64QISEhIju3buLw4cPq12e2zU2Nor//e9/4v/+7//E3XffLd555x1RW1urdlkewezMLlN2nut43GU77jJnF8Ixdm9gYKDQ6XRi0qRJruX//Oc/xZgxY1SszP2YndmZ3YHZmV2rmJ3ZZcsuBPO3hiKEEGo3HFP7a2pqwscff4wdO3aguroa/fr1w/jx45vNMk5E1NnxXCcnmY+7zNkBwGazobKyEkFBQa5lBw4cgI+PD8LDw1WszP2YndmdmJ3ZtYzZmd2J2bWfHWD+0/FSuwByDy8vL9xyyy1ql+FxCxYsOOX6CRMmeKgSz2P2k2N27eK5rmU87tolc3YAEEJgy5YtyM7Oxrhx4+Dv7w+j0QgfHx+1S3M7Zmd2Zmd2Ztc2Zmd22bIDzH867GmrQTLfzB//7QzgGNi6trbW9Y++vLxcpcrcj9mPYXY5svNcdwyP+zFaPu4yZweA3NxcjBgxAgcPHoTVakVGRgYSEhIwffp0WK1WzJ07V+0S3YbZmZ3ZmZ3ZmV2rmF3O7ADzt4oKQzKQmwUGBjb74+vrKxRFESaTSQQFBaldnsdlZGSIoUOHimXLlqldiscxO7NrGc91zfG4a/+4y5xdCCFGjRolbrnlFmG1WoWfn5/Izs4WQjjGQ0tKSlK5OvdidmZndmZndmbXKmaXM7sQzN8abLSVhCw38yezadMm0b17d7XLUAWzM7tMeK7jcZeNTNmDg4PF3r17hRCi2YV9Tk6O8Pb2VrM0t2N2Zmd2Zmd2ZtcqZpczuxDM3xo6tXv6kmckJyfj+eefx/Tp09UuRRVeXl4oKChQuwxVMDuzy4TnOh532ciU3W63w2aznbA8Pz8f/v7+KlTkOczO7MdjdmbXKmZn9uMxu7azA8zfGpyITCIy3Mx//fXXzV4LIVBYWIjZs2dj8ODBKlXlGcx+DLPLkf1keK7jcZeNLNmvuOIKvPbaa3j77bcBAIqioLq6GjNmzMCVV16pcnXuxezMDjA7szO7ljE7swNyZQeYvzU4EZkGnepmPjY2FkuXLlWpMvfT6Zp3HlcUBWFhYRgyZAhmzpyJqKgolSpzP2Y/htnlyM5z3TE87nIcd5mzA45eF+np6RBCIDMzEwMGDEBmZiZCQ0OxZs0ahIeHq12i2zA7szM7szM7s2sVs8uZHWD+1mCjrQbJfDNPRPLguU5OMh93mbM7NTU14eOPP8aOHTtQXV2Nfv36Yfz48fD29la7NLdjdmZndmZndm1jdmaXLTvA/KfDRlvSpNLSUhiNRlgsFrVL8ThmZ3aSB487ERERERGRNnFMWw2T7Wb+6NGjeOSRR/DJJ5/gyJEjAICwsDBMmjQJjz32GHx8fFSu0H2Yndlly348nut43GUja/YFCxaccv2ECRM8VInnMfvJMbs2MfvJMbs2MfvJMbt2yZ6/NdjTVmNkvZkvLy/HoEGDcOjQIYwfPx5paWkAgN27d2PhwoVITU3FunXrsGPHDvzyyy+YNm2ayhW3H2ZndtmyAzzX8bjLddwBubM7BQUFNXvd2NiI2tpaGI1G+Pj4oLy8XKXK3I/Zj2F2Zmd2ZtciZj+G2eXIDjB/qwjSjLKyMpGSkiJ8fX3F5MmTxauvvipeffVV8de//lX4+vqK/v37i7q6OvHrr7+KWbNmqV1uu5o+fbro2bOnKCoqOmFdYWGh6NWrl7j++uuFxWIR8+bNU6FC92F2Zv8zrWfnuY7HXbbjLnP208nIyBBDhw4Vy5YtU7sUj2N2ZpcNszO7bJid2WUke/4/Y6Othsh8M9+1a9dT/qNeunSpUBRFPPHEEx6syjOYndlbouXsPNfxuP+Z1o+7zNlbY9OmTaJ79+5ql6EKZmd22TA7s8uG2ZldRrLnPx4bbTVE5pt5o9Eo8vLyTro+Ly9P6PV6D1bkOczO7C3Rcnae63jcW6Ll4y5z9tb47bffhL+/v9plqILZmV02zM7ssmF2ZpeR7PmPx4nINKSwsBA9evQ46fqePXtCp9NhxowZHqzKM0JDQ3HgwAF06dKlxfU5OTkIDw/3cFWewezM3hItZ+e5jse9JVo+7jJnP97XX3/d7LUQAoWFhZg9ezYGDx6sUlWewezHMDuzM7t2MfsxzM7sWs8OMH+rqNRYTG4QHR0t1q5de9L1a9asEVFRUR6syHMmTZokLrnkEmG1Wk9YV19fLy699FIxadIkFSpzP2Zn9j/Tenae63jcW6Ll4y5z9uMpitLsj06nExEREWLs2LGioKBA7fLcitmZndmZndmZXauYXc7sQjB/ayhCCKF2wzG1j9tvvx3Z2dn48ccfYTQam62zWq1IT09HQkIC3n//fZUqdJ/8/HwMGDAAJpMJU6dORWpqKoQQ2LNnD+bMmQOr1YpNmzYhLi5O7VLbHbMzu2zZea7jcZftuMucnYiIiIhIVmy01RCZb+YBx2PB99xzD3744Qc4f60VRcHw4cMxe/ZsJCUlqVyh+zA7s8uUnec6HnfZjrvM2VtSWloKo9EIi8Widikex+zMLhtmZ3bZMDuzy0j2/Kfk6a695F779+8XI0aMEDqdrlkX8/T0dJGZmal2eR5RXl4ufv31V/Hrr7+KsrIytcvxKGZndlmy81zH4y7bcZc5uxBCHDlyRNxzzz0iJCRE6HQ61+NzDz/8sKipqVG7PLdidmZndmZndmbXKmaXM7sQzN9a7GmrUUeOHEFmZiYAICkpCcHBwSpXRETU/niuk5PMx13G7OXl5Rg0aBAOHTqE8ePHIy0tDQCwe/duLFy4EKmpqVi3bh127NiBX375BdOmTVO54vbD7MzO7MzO7MzO7MyupewA87eJ2q3GREREREQnM336dNGzZ09RVFR0wrrCwkLRq1cvcf311wuLxSLmzZunQoXuw+zM/mfMzuzMzuxawexyZheC+duCjbZERERE1GF17dpVLFu27KTrly5dKhRFEU888YQHq/IMZmf2ljA7s2sNszN7S5hdm9mFYP624PAIRERERNRhmUwmZGdno0uXLi2uz8/PR3x8PJqamjxcmfsxO7O3hNmZXWuYndlbwuzazA4wf1vo1C6AiIiIiOhkQkNDceDAgZOuz8nJQXh4uOcK8iBmP3DS9czO7FrD7AdOup7ZmV1rZM4OMH9bsNGWiIiIiDqs9PR0PPLII2hoaDhhndVqxWOPPYYRI0aoUJn7MTuz/xmzM7sWMTuz/xmzazc7wPxtweERiIiIiKjDys/Px4ABA2AymTB16lSkpqZCCIE9e/Zgzpw5sFqt2LRpE+Li4tQutd0xO7MzO7MzO7MzO7Nrjez520SFcXSJiIiIiFpt//79YsSIEUKn0wlFUYSiKEKn04n09HSRmZmpdnluxezMzuzMzuzMrlXMLmd2IZi/tdjTloiIiIg6hSNHjiAzMxMAkJSUhODgYJUr8hxmZ3ZmZ3YZMDuzM7s82QHmPx022hIRERERERERERF1IJyIjIiIiIiIiIiIiKgDYaMtERERERERERERUQfCRlsiIiIiIiIiIiKiDoSNtkREREREREREREQdCBttiYiIiIjakaIo+PLLL9Uug4iIiIg6MTbaEhEREVGnk5eXh9tvvx3R0dEwGo3o2rUrpk+fjrKyMo/V8MQTT6BPnz4nLC8sLMTIkSM9VgcRERERaQ8bbYmIiIioU9m/fz8GDBiAzMxMfPTRR8jKysLcuXOxfPlyDBo0COXl5arWFxkZCZPJpGoNRERERNS5sdGWiIiIiDqVqVOnwmg04ocffsCll16KuLg4jBw5Ej/99BMOHTqERx55BEDLwxQEBgZi3rx5rtd5eXm48cYbERgYiODgYIwaNQoHDhxwrV+1ahUGDhwIX19fBAYGYvDgwcjNzcW8efPw5JNPYvv27VAUBYqiuPb755/7+++/Y8iQIfD29kZISAgmT56M6upq1/rbbrsNo0ePxssvv4yoqCiEhIRg6tSpaGxsdG0zZ84cJCcnw2w2IyIiAtdff327/X0SERERUcfDRlsiIiIi6jTKy8vx/fff45577oG3t3ezdZGRkRg/fjw++eQTCCFOu6/Gxkakp6fD398fa9euxc8//ww/Pz+MGDECDQ0NaGpqwujRo3HppZdix44d2LBhAyZPngxFUXDTTTfhwQcfRI8ePVBYWIjCwkLcdNNNJ/yMmpoapKenIygoCJs2bcKiRYvw008/4d5772223cqVK5GdnY2VK1di/vz5mDdvnqsRePPmzZg2bRqeeuop7Nu3D8uWLcMll1xy5n+JRERERNThealdABERERFRa2VmZkIIgbS0tBbXp6Wl4ciRIygpKTntvj755BPY7Xa8++67UBQFAPDf//4XgYGBWLVqFQYMGICKigr85S9/QWJiomv/Tn5+fvDy8kJkZORJf8bChQtRX1+PBQsWwNfXFwAwe/ZsXH311XjhhRcQEREBAAgKCsLs2bOh1+uRmpqKq666CsuXL8df//pXHDx4EL6+vvjLX/4Cf39/dO3aFX379m3dXxgRERERdUrsaUtEREREnc7petIajcbT7mP79u3IysqCv78//Pz84Ofnh+DgYNTX1yM7OxvBwcG47bbbkJ6ejquvvhqzZs1CYWFhm+rcs2cPevfu7WqwBYDBgwfDbrdj3759rmU9evSAXq93vY6KikJxcTEAYPjw4ejatSsSEhJw66234sMPP0RtbW2b6iAiIiKizoWNtkRERETUaSQlJUFRFOzZs6fF9Xv27EFYWBgCAwOhKMoJjbvHjxNbXV2N/v37Y9u2bc3+ZGRkYNy4cQAcPW83bNiACy+8EJ988glSUlLwyy+/tHsug8HQ7LWiKLDb7QAAf39/bN26FR999BGioqLw+OOPo3fv3jh69Gi710FEREREHQMbbYmIiIio0wgJCcHw4cMxZ84c1NXVNVtXVFSEDz/8ELfddhsAICwsrFnP2MzMzGY9VPv164fMzEyEh4cjKSmp2Z+AgADXdn379sU///lPrF+/Hj179sTChQsBOHrz2my2U9ablpaG7du3o6amxrXs559/hk6nQ/fu3Vud28vLC8OGDcOLL76IHTt24MCBA1ixYkWr309EREREnQsbbYmIiIioU5k9ezasVivS09OxZs0a5OXlYdmyZRg+fDhSUlLw+OOPAwCGDBmC2bNn47fffsPmzZsxZcqUZj1ax48fj9DQUIwaNQpr165FTk4OVq1ahWnTpiE/Px85OTn45z//iQ0bNiA3Nxc//PADMjMzXePaxsfHIycnB9u2bUNpaSmsVusJtY4fPx5msxkTJ07Ezp07sXLlStx333249dZbXePZns63336L119/Hdu2bUNubi4WLFgAu93epkZfIiIiIupc2GhLRERERJ1KcnIyNm3ahISEBNx4443o2rUrRo4ciZSUFPz888/w8/MDAMycOROxsbG4+OKLMW7cODz00EPw8fFx7cfHxwdr1qxBXFwcrr32WqSlpeGOO+5AfX09LBYLfHx8sHfvXlx33XVISUnB5MmTMXXqVNx1110AgOuuuw4jRozA5ZdfjrCwMHz00Ucn1Orj44Pvv/8e5eXlOO+883D99ddj6NChmD17dqvzBgYGYvHixRgyZAjS0tIwd+5cfPTRR+jRo8dZ/k0SERERUUeliNPN4kBERERE1MHNmDEDr7zyCn788UdccMEFapdDRERERHRW2GhLRERERJrw3//+FxUVFZg2bRp0Oj5QRkRERESdFxttiYiIiIiIiIiIiDoQdkEgIiIiIiIiIiIi6kDYaEtERERERERERETUgbDRloiIiIiIiIiIiKgDYaMtERERERERERERUQfCRlsiIiIiIiIiIiKiDoSNtkREREREREREREQdCBttiYiIiIiIiIiIiDoQNtoSERERERERERERdSBstCUiIiIiIiIiIiLqQP4ffjJzqXX52RUAAAAASUVORK5CYII=","text/plain":["
"]},"metadata":{},"output_type":"display_data"}],"source":["import pandas as pd\n","import matplotlib.pyplot as plt\n","\n","# Load your dataframes here\n","\n","# Merge on the 'question' column or another common identifier\n","merged_df = pd.merge(base_result.to_pandas(), fine_tuned_result.to_pandas(), on='question', suffixes=('_base', '_finetuned'))\n","\n","# Create shortened question labels for readability\n","merged_df['short_question'] = ['Question ' + str(i + 1) for i in range(len(merged_df))]\n","\n","# Plotting two subplots: one for base model and one for fine-tuned model\n","fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 12))\n","\n","# Plot for Base Model\n","ax1.plot(merged_df['short_question'], merged_df['answer_relevancy_base'], label='Answer Relevancy', marker='o')\n","ax1.plot(merged_df['short_question'], merged_df['faithfulness_base'], label='Faithfulness', marker='o', linestyle='--')\n","ax1.plot(merged_df['short_question'], merged_df['context_recall_base'], label='Context Recall', marker='o', linestyle='-.')\n","ax1.plot(merged_df['short_question'], merged_df['context_precision_base'], label='Context Precision', marker='o', linestyle=':')\n","ax1.set_title('Base Model Metrics')\n","ax1.set_xlabel('Questions')\n","ax1.set_ylabel('RAGAS Metrics')\n","ax1.legend()\n","ax1.set_xticklabels(merged_df['short_question'], rotation=90)\n","\n","# Plot for Fine-tuned Model\n","ax2.plot(merged_df['short_question'], merged_df['answer_relevancy_finetuned'], label='Answer Relevancy', marker='x')\n","ax2.plot(merged_df['short_question'], merged_df['faithfulness_finetuned'], label='Faithfulness', marker='x', linestyle='--')\n","ax2.plot(merged_df['short_question'], merged_df['context_recall_finetuned'], label='Context Recall', marker='x', linestyle='-.')\n","ax2.plot(merged_df['short_question'], merged_df['context_precision_finetuned'], label='Context Precision', marker='x', linestyle=':')\n","ax2.set_title('Fine-tuned Model Metrics')\n","ax2.set_xlabel('Questions')\n","ax2.set_ylabel('RAGAS Metrics')\n","ax2.legend()\n","ax2.set_xticklabels(merged_df['short_question'], rotation=90)\n","\n","# Adjust layout for better spacing\n","plt.tight_layout()\n","\n","# Show the plots\n","plt.show()\n"]},{"cell_type":"code","execution_count":81,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"elapsed":3964,"status":"ok","timestamp":1727146071738,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"bJY55sUN7Tyl","outputId":"97956c41-6a82-45bf-a998-de9ead482c8f"},"outputs":[{"name":"stderr","output_type":"stream","text":["/usr/local/lib/python3.10/dist-packages/datasets/table.py:1395: FutureWarning: promote has been superseded by mode='default'.\n"," block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n"," table = cls._concat_blocks(blocks, axis=0)\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1395: FutureWarning: promote has been superseded by mode='default'.\n"," block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]\n","/usr/local/lib/python3.10/dist-packages/datasets/table.py:1421: FutureWarning: promote has been superseded by mode='default'.\n"," table = cls._concat_blocks(blocks, axis=0)\n",":34: UserWarning: FixedFormatter should only be used together with FixedLocator\n"," ax1.set_xticklabels(merged_df['short_question'], rotation=90)\n",":45: UserWarning: FixedFormatter should only be used together with FixedLocator\n"," ax2.set_xticklabels(merged_df['short_question'], rotation=90)\n",":53: UserWarning: FixedFormatter should only be used together with FixedLocator\n"," ax3.set_xticklabels(merged_df['short_question'], rotation=90)\n"]},{"data":{"image/png":"iVBORw0KGgoAAAANSUhEUgAABW0AAAb+CAYAAAAijRLDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3hUVfrA8e+dmkySSa+kB0ho0iNVmiJFwboIFrDr2nWtP13rqru6ltVVca2giIoKKgo2ekekSQspJKT3nky7vz9uZpKQTsqknM/z5JnJnXPvnDuZzNz73ve8R5JlWUYQBEEQBEEQBEEQBEEQBEHoFlTO7oAgCIIgCIIgCIIgCIIgCIJQSwRtBUEQBEEQBEEQBEEQBEEQuhERtBUEQRAEQRAEQRAEQRAEQehGRNBWEARBEARBEARBEARBEAShGxFBW0EQBEEQBEEQBEEQBEEQhG5EBG0FQRAEQRAEQRAEQRAEQRC6ERG0FQRBEARBEARBEARBEARB6EZE0FYQBEEQBEEQBEEQBEEQBKEbEUFbQRAEQRAEQRAEQRAEQRCEbkQEbQVBEARBEAShg2zcuBFJkti4cWOb1/3oo4+QJImUlJQO71dXeOqpp5AkydndEARBEARB6BVE0FYQBEEQBEFokj2QWPcnICCAadOm8eOPPzq7e01asmQJkiRhNBqprKxs8HhCQoJjf15++WUn9PDs2YOjKpWKtLS0Bo+XlJTg6uqKJEnceeedZ/Uczz//PKtXr25nTwVBEARBEISzJYK2giAIgiAIQoueeeYZli9fzrJly3jooYfIzc1lzpw5fP/9987uWpM0Gg0VFRV89913DR779NNPcXFxcUKvOo5er+ezzz5rsPzrr79u97bPJmj7+OOPNxogFwRBEARBENpOBG0FQRAEQRCEFs2ePZtrrrmGa6+9lr/97W9s2bIFrVbbaNCwu9Dr9cyYMaPRPq5YsYK5c+c6oVcdZ86cOd1i38rLywElSN7TA+GCIAiCIAjdhQjaCoIgCIIgCG3m5eWFq6srGo2m3vKXX36ZCRMm4Ovri6urK6NHj2bVqlUN1v/555+ZNGkSXl5euLu7Exsby2OPPVavTXV1NU8++ST9+/dHr9cTFhbGQw89RHV1dav7uWjRIn788UeKioocy/bs2UNCQgKLFi1qdJ2kpCSuvPJKfHx8MBgMjBs3jrVr1zZod/r0aS655BLc3NwICAjgvvvua7Jvu3btYtasWXh6emIwGJgyZQrbtm1r9X40tW/79+/n2LFjjmVZWVn89ttvTe5ba15TSZIoLy/n448/dpSQWLJkCVBbmuHIkSMsWrQIb29vJk2aVO+xM33yySfEx8djMBjw9vbmvPPO46effnI8vnfvXi688EL8/PxwdXUlKiqKG264oV2vjSAIgiAIQk+nabmJIAiCIAiC0NcVFxeTl5eHLMvk5OTwxhtvUFZWxjXXXFOv3euvv868efO4+uqrMZlMrFy5kiuvvJLvv//ekf35559/ctFFF3HOOefwzDPPoNfrOXnyZL0gps1mY968eWzdupVbbrmFQYMGcejQIV599VVOnDjR6qH7l112Gbfddhtff/21IxC4YsUK4uLiGDVqVIP22dnZTJgwgYqKCu6++258fX35+OOPmTdvHqtWreLSSy8FoLKykhkzZpCamsrdd99NSEgIy5cv57fffmuwzd9++43Zs2czevRonnzySVQqFR9++CHTp09ny5YtxMfHt2pfznTeeecRGhrKihUreOaZZwD4/PPPcXd3bzTTtrWv6fLly7npppuIj4/nlltuASAmJqbetq688koGDBjA888/jyzLTfbx6aef5qmnnmLChAk888wz6HQ6du3axW+//cbMmTPJyclh5syZ+Pv788gjj+Dl5UVKSkqHlHgQBEEQBEHo0WRBEARBEARBaMKHH34oAw1+9Hq9/NFHHzVoX1FRUe93k8kkDx06VJ4+fbpj2auvvioDcm5ubpPPu3z5clmlUslbtmypt/ydd96RAXnbtm3N9nvx4sWym5ubLMuyfMUVV8gzZsyQZVmWrVarHBQUJD/99NNycnKyDMgvvfSSY717771XBuo9b2lpqRwVFSVHRkbKVqtVlmVZfu2112RA/uKLLxztysvL5f79+8uAvGHDBlmWZdlms8kDBgyQL7zwQtlms9V7naKiouQLLrjAscz+WicnJze7b08++aTj9fvb3/4m9+/f3/HY2LFj5euvv16WZVkG5DvuuMPxWFteUzc3N3nx4sVNPvfChQubfMwuISFBVqlU8qWXXup43ezsr8U333wjA/KePXua3WdBEARBEIS+RpRHEARBEARBEFr03//+l59//pmff/6ZTz75hGnTpnHTTTc1yIh0dXV13C8sLKS4uJjJkyezb98+x3IvLy8A1qxZg81ma/T5vvzySwYNGkRcXBx5eXmOn+nTpwOwYcOGVvd90aJFbNy40VE6ICsrq8nyAT/88APx8fGOIf8A7u7u3HLLLaSkpHDkyBFHu+DgYK644gpHO4PB4MhMtdu/f7+jFEN+fr5jP8rLy5kxYwabN29u8jVo7b6dPHmSPXv2OG6b2reOfE1vu+22FtusXr0am83G3//+d1Sq+qcd9jIK9vfC999/j9lsbvXzC4IgCIIg9HaiPIIgCIIgCILQovj4eMaMGeP4feHChYwcOZI777yTiy66CJ1OByjBt+eee479+/c3qJNqt2DBAt577z1uuukmHnnkEWbMmMFll13GFVdc4QjuJSQkcPToUfz9/RvtT05OTqv7PmfOHDw8PPj888/Zv38/Y8eOpX///qSkpDRoe+rUKc4999wGywcNGuR4fOjQoZw6dYr+/fs3qOEaGxtb7/eEhAQAFi9e3GT/iouL8fb2bvX+1DVy5Eji4uJYsWIFXl5eBAUFOYKwZ+rI1zQqKqrFNomJiahUKgYPHtxkmylTpnD55Zfz9NNP8+qrrzJ16lQuueQSFi1ahF6vb3V/BEEQBEEQehsRtBUEQRAEQRDaTKVSMW3aNF5//XUSEhIYMmQIW7ZsYd68eZx33nm89dZbBAcHo9Vq+fDDD1mxYoVjXVdXVzZv3syGDRtYu3Yt69at4/PPP2f69On89NNPqNVqbDYbw4YN45VXXmn0+cPCwlrdV71ez2WXXcbHH39MUlISTz31VHt3v9XsWbQvvfQSI0aMaLSNu7t7u55j0aJFvP3223h4eLBgwYIGWa11+9JRr2ndjOr2kCSJVatWsXPnTr777jvWr1/PDTfcwL///W927tzZ7tdGEARBEAShpxJBW0EQBEEQBOGsWCwWAMrKygD46quvcHFxYf369fWyJD/88MMG66pUKmbMmMGMGTN45ZVXeP755/m///s/NmzYwPnnn09MTAwHDhxgxowZDbJZz8aiRYv44IMPUKlUXHXVVU22i4iI4Pjx4w2WHzt2zPG4/fbw4cPIslyvf2eua5/Ay2g0cv7557d7PxqzaNEi/v73v5OZmcny5cubbNeW17QjXvOYmBhsNhtHjhxpMmBtN27cOMaNG8c//vEPVqxYwdVXX83KlSu56aab2t0PQRAEQRCEnkjUtBUEQRAEQRDazGw289NPP6HT6RylA9RqNZIkYbVaHe1SUlJYvXp1vXULCgoabM8e1LOXVPjLX/5Ceno6//vf/xq0rayspLy8vE39nTZtGs8++yxvvvkmQUFBTbabM2cOu3fvZseOHY5l5eXlvPvuu0RGRjqG+s+ZM4eMjAxWrVrlaFdRUcG7775bb3ujR48mJiaGl19+2RHcris3N7dN+9GYmJgYXnvtNV544QXi4+ObbNeW19TNzY2ioqJ29euSSy5BpVLxzDPPNKjbK8syoNQ9tt+3O/O9IAiCIAiC0BeJTFtBEARBEAShRT/++KMj2zQnJ4cVK1aQkJDAI488gtFoBGDu3Lm88sorzJo1i0WLFpGTk8N///tf+vfvz8GDBx3beuaZZ9i8eTNz584lIiKCnJwc3nrrLUJDQx0TgF177bV88cUX3HbbbWzYsIGJEyditVo5duwYX3zxBevXr69XY7clKpWKxx9/vMV2jzzyCJ999hmzZ8/m7rvvxsfHh48//pjk5GS++uorR+mBm2++mTfffJPrrruO33//neDgYJYvX47BYGjwvO+99x6zZ89myJAhXH/99fTr14/09HQ2bNiA0Wjku+++a/V+NOWee+5psU1bXtPRo0fzyy+/8MorrxASEkJUVFSjtX6b079/f/7v//6PZ599lsmTJ3PZZZeh1+vZs2cPISEhvPDCC3z88ce89dZbXHrppcTExFBaWsr//vc/jEYjc+bMOavXQhAEQRAEoTcQQVtBEARBEAShRX//+98d911cXIiLi+Ptt9/m1ltvdSyfPn0677//Pi+++CL33nsvUVFR/POf/yQlJaVe0HbevHmkpKTwwQcfkJeXh5+fH1OmTOHpp5/G09MTUIKdq1ev5tVXX2XZsmV88803GAwGoqOjueeeexg4cGCn7GdgYCDbt2/n4Ycf5o033qCqqopzzjmH7777jrlz5zraGQwGfv31V+666y7eeOMNDAYDV199NbNnz2bWrFn1tjl16lR27NjhyPQtKysjKCiIc889t97r19na8pq+8sor3HLLLTz++ONUVlayePHiNgdtQQnQR0VF8cYbb/B///d/GAwGzjnnHK699lpAmYhs9+7drFy5kuzsbDw9PYmPj+fTTz9t1WRngiAIgiAIvZUknzkeSRAEQRAEQRAEQRAEQRAEQXAaUdNWEARBEARBEARBEARBEAShGxFBW0EQBEEQBEEQBEEQBEEQhG5EBG0FQRAEQRAEQRAEQRAEQRC6ERG0FQRBEARBEARBEARBEARB6EZE0FYQBEEQBEEQBEEQBEEQBKEbEUFbQRAEQRAEQRAEQRAEQRCEbkTj7A70BjabjYyMDDw8PJAkydndEQRBEARBEARBEARBEAShG5JlmdLSUkJCQlCpms6nFUHbDpCRkUFYWJizuyEIgiAIgiAIgiAIgiAIQg+QlpZGaGhok4+LoG0H8PDwAJQX22g0Ork3giAIgiAIgiAIgiAIgiB0RyUlJYSFhTniiU0RQdsOYC+JYDQaRdBWEARBEARBEARBEARBEIRmtVRiVUxEJgiCIAiCIAiCIAiCIAiC0I2IoK0gCIIgCIIgCIIgCIIgCEI3IoK2giAIgiAIgiAIgiAIgiAI3YgI2gqCIAiCIAiCIAiCIAiCIHQjImgrCIIgCIIgCIIgCIIgCILQjYigrSAIgiAIgiAIgiAIgiAIQjcigraCIAiCIAiCIAiCIAiCIAjdiAjaCoIgCIIgCIIgCIIgCIIgdCMiaCsIgiAIgiAIgiAIgiAIgtCNiKCtIAiCIAiCIAiCIAiCIAhCNyKCtoIgCIIgCIIgCIIgCIIgCN2ICNoKgiAIgiAIgiAIgiAIgiB0IyJoKwiCIAiCIAiCIAiCIAiC0I1onN2BjrR582Zeeuklfv/9dzIzM/nmm2+45JJLml1n48aN3H///fz555+EhYXx+OOPs2TJki7pb09jslhYcWAjqSVZhBuDWDR8KjpNr3oLNcpSXc3xT97BlHEKXUgEsdfchkavd3a3Op/NCqe2Q1k2uAdCxARQqZ3dq05nMZs4+MtKSjNT8QgO55zzr0Kj1Tm7W52ur+63yVTN6k1LySlJJcAYziVTbkWn6/3/31aLiX2HlpNbkoq/MZxRw65Fren9f+++TPzN+xZLdSUHV71IaUYqHiHhnHPFI2j0rs7uVqfrq/tttVnZl7OP3Ipc/A3+jAoYhboPHLP1WX30GL2vHrP1aX30vd5XP9PFsWrjJFmWZWd3oqP8+OOPbNu2jdGjR3PZZZe1GLRNTk5m6NCh3Hbbbdx00038+uuv3Hvvvaxdu5YLL7yw1c9bUlKCp6cnxcXFGI3GDtiT7uelLV+yPOE/yOoixzLJ6sW1A+7mwclXOq9jnezAvx5D/9nXyJWSY5nkKlO98DKGP/S8E3vWyY58C+sehpKM2mXGEJj1Txg8z3n96mRbP30Z1esf4l1icywrNKqw3XM9k67+mxN71rn66n6/u+b/+CxvNXma2kEnfhYbC/0u4Zb5/3BizzrXL1tf4MUTn5Ktrv1cC7TKPDLwas6f9KgTeyZ0FvE371u2vn4bqk824V1au6zQA2zXTGHSPe84r2OdrK/u9y+nfuHF3S+SXZHtWBZoCOSR+Ec4P+J8J/ZM6BR99Bi9rx6z9Wl99L3eVz/T++KxamvjiL0qaFuXJEktBm0ffvhh1q5dy+HDhx3LrrrqKoqKili3bl2rn6u3B21f2vIlHyc+A4BU+z+E/Z2zOObvvTJwe+Bfj6H74Oua3+rsOMqOm27opYHbI9/CF9dh389aNa/BX5b1yi/KrZ++jM+z7wP1/9q2mt8LnrixVwYw++p+v7vm/3izcI3yLq/zwSbVfLDd6T2/V54E/LL1Be4/+WmT+/1K/957YNRXib9537L19dvweXsT0MRn+u29M4DZV/f7l1O/cP/G+5HPOGaTal6FV6a+0qtP8vucPnqM3leP2fq0Pvpe76uf6X31WFUEbVsRtD3vvPMYNWoUr732mmPZhx9+yL333ktxcXGrn6s3B21NFgtjlk3DpiqqF7C1k2Ul4/aO/u+j12hRqyRUkoRaVfNTc1/luA9qlQq1itp2kvK4pl67M7bjaEe97aobWce+nfawVFdzctxw5Eqof/jv2HPUBpmYFxei1mhh2JXgP1B5KPMgHP2u6Y0PuQQChyj3c47C4a+bbhs3B0JGKvfzE+HAyqbbDpgJYWOV+0WpsG95021jpinDSwBKMmHvBzW7ZYNdS8FU2sSKknKF895DvWpoisVsYvfEkXiV2Br9a9uAQqPE6cXnY0wvJH94OMUDgwHQlFcT/sN+ZJWK5MvHOtbx25vM5LJgvCaeh1t8PJtPb+Zw2u+M/y2TUI9Q/O66i6LqIj49+im++0/hmZBFUWwIBeeEASBZrER9sxeA5EvGIGuV19v7UBoTc73xGzMe9ylT2JO1h12Zu4j/Pokoz0h8b7wRk17Ne4few+toBj6H0yiJ9CdvbLSjb5Ff70VltZIycyjDH/oY7xK5yf0uMqo4d9sfjlIJb+1/C5tsa6R10y6IuIBYn1gAkoqS+CH5B4Lcgrhi4BWONh8e/pByc3mbtjup3yRGBIwAILMsk68SvsJL78U1g69xtFl5bCV5lXn11rNaLaw49B4VEjT1wRZgtfGabg5Vahs/2lJwUam51GcaRXFXoVOr2J69jrKUb1HJZiQkVJLyvaPcAjoPCBlRu83TexmscmW6aygAVTYL75UeBeD2oPNQj/8rAOtT1pNw+HMwVzS+0xo9hNa+z8jcT5RVYq5bpGPRW8WHsAFLfEbgPuURADaf3sz+g8tZkb2dcklqdL8lWSbABuuv2yeGIvUSVouJC5eNIltFk3/zQBusE3/zXsFSXcnuSaPwKm3qyAUKPGDfQ0OZ6B5C/yNWTNqBmCeM4Gv1QdxzjnFNlTeFG48j6dRsmOhCtrUSgIDDJbhX+tZ+/2UfRlOQT/jWXGwqiZQZgQAM1/kyIsFGlWoQ0pjhrHQ9hDr3OLda/cj/6U8Adk0zkmItA8D3eAmexT4UxfVTvv/yjiMVZRO1IQeA5GkByBoVsVovJqRqqKiOQjtiJJ94HYH8k9ws+VD47QEyNhzBxdT4fttQMm73vXULKo223mPhxnDmxdSe+L978F1MVhPXDr4WT70nANvTt7MvZ1+b/hb+rv4siFvg+H3Zn8soMZVwxcArCHILAmB/zn62pm9t03bdte4sGboEUIbPnvf5eZSYShptKyERaAhk3eXr+sSw2l7PZoXXhtbPOjyT3gPibwXpjGlrwsdB/xnK/YoC2Pl209voNxpiZyn3q8tg2+tNtw0+BwZdrNy3VMPml5tuGzAIhl5Wsy822PhC0219+8Nw5f/HZKpm9vKR5KhVTX6P+Vll1l27T5RK6C1afK/3zvNRq83KhV9dWC/D9kxuWjdenfoq40PGA1BQVcCKoyva/FxXxV2Fn6sfALszd7M7azdD/YYyNWwqABXmCj44/EGbtzu//3zCPJRz2UO5h9h0ehMxXjHMjprtaPPmH2/WW8dms7Li4P8ob+K8rDcfq7Y2jtj7C5I2Iysri8DAwHrLAgMDKSkpobKyElfXxmtgVVdXU11d7fi9pKTxg6XeYMWBjcjqokYPgqHm/0pTxEub1mGtiOnKrrWoXqD3jACwqm6QV10bOLbfTk36nnmVzQV+JawVElXf/Be3QBP0G1UbtM05Apv/1fSq/rG1Qdu8E8239QqrDdoWJDff1s2vNmhbfLr5tlrX2qBtWVbzbeuRoSRdqS0UNbmV63R/B39ZWa80wJlUgG+JTPp3PxOVApuL97G2Sjkg9iuWeetrKyYNPDKg9qTu5l+tlOyX0bu44RYfz46MHazZv5xpy63kAX533UVxdTFLDy5l8W9WRuyR+Wa8xGcoBx9as8ynX1sB+HvUH1TrlPfjgk1WRm2XKauy4T5lCn/k/MHSg0uZ8amFPMB70SJMWh1LDy7lku02Fm2y8etwiaX62oOaZastuJhhjWUf00qavm6nAnxKbBz8ZSWjZl8HwP8O/g+LbGnT6xvlGeUI2iaXJLP04FJG+I+oF7RdfmQ5uZW5bdqup97TEbTNqshi6cGlRBgj6gVtV51YxfHC443sXDP/35JEjkZNVfoy/C1WvgwLwcNkY8r2VBZsjADANexzNO4nm+9g3q56v15eUsb0/AIATCqJpRHKQc0lR/7k5VPj0WlUHDR9Tbp5T/PbLagfPJhWXsHcnNrA9P8iw7BIElec3I1q/N/QqVXsyNjBJ7k7QdX0/KOyJJGthj0HlzFu1E3N90HoEfYdWl5vmNmZZEkiS620Gzvyxi7smdAZDq56sV5pgDNJgG8pbDt6BIPHTgJ+cqPs1K/I7reyVP0+gbKahQfSyPsxALXeyupoOOiiBEHu+sPKsCPJDb//frNSrYFHxyqfQdcWl9D/ZyhK3IjLX29kqefH6JC4+WgaeT8qwcr1/U1sdDcAsPhPKyP2JPPN+H2O7z+dWeaT32q+/0bmUa2TuKisnOG/msg/4oHxmoUsDfsSgCUpaRSuD6a5qrUq+37/+B5HIup/Bk4MmVgvaPvB4Q8oN5czP2a+I2i7K2tXm09eB/kMqhe0/ezYZ5wuO83k0MmOoO2B3AMsPbi0TdsNNAQ6grb7cvY1GbAFkJHJqshiX84+xgaNbbKd0EOc2t58wBaguhS2NBI4nXhPbdC2qrj54/6xN9cGbc0VzbcdcU1t0NZqbr7tkEtrg7bQbNuTXhN5/chAThdWoC1ZQ05w04E5WZLI1Uis3rSUv1xwd9PPL/QcLb7Xe+f56L6cfc0GbAHKzeV8l/SdI2hbVFXU5u8RgAsjL3QEbX/P/p2lB5fyl4F/cQRtq63VZ7Xd+KB4R9D2SP4Rlh5cygURF9QL2ja63WbOy8Sxah8P2p6tF154gaefftrZ3egSqSVZrWoXHWhjoEcwNpuM1SZjk5VbqwxWm01ZZgOrXPu4xVq3naysKyvtLDYbVhuOx+2PWW217VvKEbfaZKzIYG37fs8uyGxVu3xdHG7x48ArvHah7wCIv6XplXzrBLe9o5pv6x9Xe98ztPm29kAwKIXam2tbNwPQ4FfbNv8kJP7W9Hp2Zc1/ofQ0pZmpzZ7w2cn9gkjs70n0mHAWxiknXdqyahLPP4CsVrEwboyjrWdRMm6DgnEZOhSAUYGjYHAVFfOz6OfeDwAPnQcL4xbiV3qKRM9sgoYEszCuNtM28fzfAbhi0GhsNZm2QVWn0Yd5YRg9GoAhvkNYGLeQ4ouTiDRGIrm4oFWpWRi3kABzBona0xj7+7MwLsrRt7QZe1FZbAwxWIEWAo81r4/dgrgFbc60jTRGOu6HuoeyMG6h4zWwu6T/JZSZy9q03VjvWMd9P1c/FsYtxNvFu16bWVGzlNe+RmmVmQMnt5KmafmzbZc2hiBNBBPKs1DLKv4wjCTK6IbJYqPCfA5DiotRY0GWzxzEBBWyK4flSMfvI1SJVFaG8JFFuYpqlmyMKFIyyVZUDGT1fuXgVOMZwgjXQFylahpjQst+W3/H70OkFDQmmY8stfs4rCgbWZJZWTqU1/6+HgCtUWaItwuJhqoW9/vP0wmMG9ViM6EHyC1JbblRG9oJ3VtpRuu+y+LLdAw+53zcJ1rRThqIJXYICzUL8cg5gercyXiXnkSlUzPT14UhNZm2hrhyEkO8ar//sv9Ea8snMb4QWS2x0MUXgFFGHwzjbUjjBiENHcZCt4Woc08gxc/AO+8YAJMDPQi2KiMr/KLLSDR6ETQsRPn+yz2BVJxDYnw+AFcYfLFpJIZ6eOI6XoP38Cj0o0ez0EcD+SfR+F1Axc7fMCQ3/plZb7+r3Bk+8KJ6mVnRntH12lw58EqqrdW46dwcy4b7D2dh3MJWvLK1Ag31k0LmxcyjsLoQ35rXCSDOJ67N2/XQeTju51a07kJna9sJ3Vxrj71jpiuZqnWFnVt7X29s/hzBntQBoHFpvm3dkT9qbZNtzVYbBR5xHDmWw+nCCk4XljPe61LKqiyUV1uoNNc/WUvIDeW7LOW46FzP1u13jvge6z1a+17vZeejrf2s9nPxc9w36o1t/h4BHBclAYb6DWVh3EJGBdQe/OvV+rPaboAhwHG/v3d/FsYtJM4nrl6bM7ebkr6LHaVJLW67Lx+rivIIZ1EeobFM27CwsF5ZHuGj33/h34fva7HdA0NfZcnorq2vItcL+NYEhK1yvcCwtU4Q2XJG8Lc2iKwEiOuuU/Xd+4T97+MW+2B78DaG3HhPF+xtF0neAh9f1HK7xd/3riubPy7D9b5mhmnVqHz1UUfGaW/QV/ZblmW2nczno+3J/Hosh6GG30gJ/6nF9Z4IubnVWRsWqw2T1YbJovxUW+r/bq65X11nmemMNiZrzXqO360168pnbNPacN0z17faGlzYGubWuv2+z/d6brjo/lbtt9C97fnjfW44+FqL7T44594+m73Qm+z79Elcn/2ixXaVT/yFUVf3nuSDVu/3xFKGBGrRT1kEo68Hv/4trtOd7cnaww3rb2ix3QcXfiAybXuDbnyMXlZtIb2wsiYgW0l6Ue3904WVFJSbWtyGu15DqLcrod6u9PNyJdTbQKi3K6dOfsp/S1o+J2vLMZvQzXXj93pn6quf6X35WFXUtG3lRGQ//PADhw4dcixbtGgRBQUFYiKyGq2paauyerF38QZ0mt6TuN2amraSAfrvOIBG34vqJzlqCGXSsPB7Da0BHkoBbe/Z79bUtC32VBO/dZ+jtmtvYN9vzxIbjQ2a7+n7XWGy8PW+dD7enkJCTm0W7+QYIxmqW8lVq5B7aX00+0WtuoHd7Sczef2Pi8hXS83u97OTf2FiTc1moWez17TNUdHk37y31gnri+w1bT1Lafoz3QPlM13fmpzcnqG1+x3sDlVZMsFji/CKroSoKTDmBoibq2QJ9jD2+oc5FTkNJq0BUdO212nxGL3z6nyWVJlrgrJ1ArOFlZwuUu4XVZhb3IaHi8YRiD0zMBvmbcDoqkFq5HvKZKrmwuWjWjx26cnHbMIZnPhed6a++pnel49V+2RN27KyMk6erB3qm5yczP79+/Hx8SE8PJxHH32U9PR0li1bBsBtt93Gm2++yUMPPcQNN9zAb7/9xhdffMHatWudtQvdjk6j4doBd/Nx4jPKpGN1/o/s4f5rB97dqwK2ABq9nuqFl6H74GuUL4u6HyDKjldfdVnvCtiC8sU36581s3VKNPpFaa6Ar26Ay98HrUtX97BTaLQ6bPdcD8++3+Ax+8zTtruX9MjAZXPs+y09+z426p/s2t/1PXG/0woqWLYjhc/3pFFSpdTfddOpuXx0KNeNj6T/yY/4ZWsB9wf4IclyvQME+yylV/ld0qMP/qWaet0atQpDzZ/v4hGRfLNhMvm+Wxvst/0DPaxoMuP6Bzmhx0JnUGt0PDLwau47+Slnfonb3+sPD7y61x0E91UavSu2a6Ygvb2pwWe647vsmim9KmALrdtv+arJaE5qkHI3Yxg3HnI2QPIm5Wf643Deg87pfDuoVWoeiX+E+zfej4RU7yTfPtP4w/EP96qT+z6t5hhd/kIZ+VT3zMT+PpdmvdjmIJYsy5RUWkgrrKjJkFUCs3WDtPZjqeZ4umodAdlQb0NNULbmvrcrnq5nd2FEp9Oz0O8S3ixc0+T3WE8/ZhPO0Oz5aM3f/yze691d3c/0M/Xmz3S1RscjA67i/sSVTZ6X9fVj1V6Vabtx40amTZvWYPnixYv56KOPWLJkCSkpKWzcuLHeOvfddx9HjhwhNDSUJ554giVLlrTpeXtzpq3dS1u+ZHnCf5DVRY5lksWLawfezYOTr3RexzrZwefuR/vJj/UXamRM113G8Ieed06nusKRb2Hdw/WLwBv7wbArlRlnrdUQORkWfqbMVNtL/LHwEvR/HK93IFzoqcZ29xImXf03p/Wrs2399GVUr39YbzK2Kp1E+cM39Jj9lmWZ7Yn5fLgthV+PZTsuKkX4Glg8PpIrxoRidNHC8R/hs4WAzOcR03jHlkCepvYU399i4yq/S7hl/j+csyOdbN3hTN7/9gnyAnfU228vi42Q7HO5ft4/mDVUZNn2KuYqlv03jpe8639WB1ltPDzwGs6f9KiTOiZ0li33DoJt4FdnUrJCDyVgO+med5zXsU629fXbUH2yqd5kbGfutzk9HW2/flCUCr9/TN6HK1BNuQPvxbcg6XSQthsqCmDABT0mIPDLqV94cfeL9SawCTIE8XD8w5wf0bWly4TOte5wJn9+9jgPaFfVW54h+/KM+VouWXRbg+9wWZYpqjDXlC2oLVlQN2O2tLrloKyPm65OILZOYNZHyZr1cOncbPV31/wfH+evoaTO5Jo+FhtX9+Jjtj6vqfPRWS/C4HlNr9fD/ZTyEw9seqDesl7/mb7tdX7Z9w4vuqnIVteenwRZZR4eeHWvPVbt8+URulJfCNqCUirhnl8eZ2v2WgZ5juKTi97vdRm2Z5IzD2L+91QK0o18Z41n0kGllEb/Db+hDe7lgQ2bVZmVsyxbmdgsYoJyApO8WQl6mcogZBRcvQrcfFveXg8gyzIXLo3HN6uChYFzCI0cxjnnX9XjMk3PhsVs4uAvK6nasxc3s4qBN96Da2RUyys6WYXJwjd/KCUQTmTXKYEwwI/rJ0YydWAAKvuMpFmH4P0LwVyu1DO86FVMZhOrNy0lpySVAGM4l0y5tddna6w7nMkz3x7A1/ozOQE7qHAp5dHcMgYNfZKRs5Y4u3tCR0v4mUNfLuL5gAAOa5UD4UkVlbxpHIP66pVO7pzQ4XKOkf/OeKaF9WNQmsytrmPwConknCse6XUZto2xVFdycNWLlGak4hES3ux+mzMySJw1G9lkIuy993CfNBE+uQJO/gzGUBi9BEZdCx7df/SB1WZld+Zubv3lVmRkfrriJ4Ldevlxah9jtclM+udv3FD+HjdrfmCTdRhfWaeQgxe7bXHYUOFt0HLrlGgyiqrqZcyWm1qeldnPXVevZEGotyv96gRn3fTOP+d7+4+3eevgW47fp7rM5I3L/tFrRv4JZ6guA61r4+ejvVhWeRYXrLoAFSqem/QcQW5BjAoY1esybB0s1fD6cCjNxHrxf9inspBbkoq/MZxRw67t1Rm2fbI8gtC5dBoNVwy6gK3Za1Fpqnt9wBZAyk9A525FPyGSf6Qv5gnbd1w6byIqd3dnd63zqdSNF3ePOg8WfwefXA4Z++CjuXDrZugFH6j5VflkulaRHa1h1tX/QKfu+fvUWhqtTplsrIdMOJZWUMHynadYuTvVMWzPoFNz+ahQFk+IoH9AIxngHsEQNEw5uJ/zEkgSOp2+z01cMWtoMBcMDmJ38lj++uNT+Eo78JTzGFmxA1ji7O4JHe3YWoaZTHwWNJND597A3pPfM3zjq6jzfoPq0l41WkIAjn6L7U9XVp304Pf5N1EaMwtXDxekPhLU0OhdWz3JmiYggMDHHqNi927cJk5Qhl4HDsaS+DuaktOw4TnY9CLEzlFq30ZNAVVjVXOdT61SM77feMI8wkgtTSWtJE0EbXuZ3ckFZBZXcoHudwBWWGew3hZfr01hhZkXfzze6Pr+HvoGtWT7ebsS5u1KPy8DrrruHxBKK0sDQI0WK2Y8c7+G05f0qsmohBqmCvhXNPgNhOvXgoun8hl98AvY+V9Y8Al4hTu7l50iuTgZgHBjOBfHXOzk3nSBQ19CaSZ4BKMevpCxvSCm0NF6f9RN6FCRnpEApJSkIMtyowXje5U8pUay2n8ApMMrgy/hhmsv7P373ZJ+o+CGdbD8Ujj31h4fsLVVViJbLCSXK1+SIW4hfSpg21PIssyOpHw+2pbCL0ezsdWMEwn3MbB4QiRXjA5tvmaamx8s/hYsVT1y0pmOpFZJjI/xZYxxMSVHI5irfxYS1oPVAmpxaNCrjPsreIZC5GSG+Q9jmN9Q2PMlFCRCwk8w9HJn91DoSEfWUHrKgLWoiO/IZXvIfgCCPV148uLBovxJHZJGg/dVC/C+aoFjmTzlcVJe3I7ObyjBEyrRFu6Bo98qP4MuVgIF3ViUZxSppakkFycTHxzf8gpCj5FTWoULJvbJA3CVq9liO6fRdqPCvRgX7VsvMNvPyxUXbfcPyrYkpSQFgMFe4zlUtJlkrVbJwhRB297n9B6lHF9lAehrMhAlCfZ/CpkHYMdbMPtF5/axk9iDtlGe3X/EY7vZbLDtP8r9cX/t8TGFziLOzIQ2CfMIQyWpKDeXk1eZh7/B39ld6lTFv2zDlmLA5ZxQ1CqJcpOV7JJqgjz7RsZKs/xj4Y5d9bO0zpytrocoWbuWrGefo+LieBjQR74km2EtK6Ni1y5sVVV4zp3r7O5QabKyen86H21L4Xh2bbHCyQP8WDIhkqmxAahVTbzvbDZlopmYmnrnGr3yIwAQ4+/O/44MpEJtxFBZCGk7IXKSs7sldCT/gdj87kcl1WQISpJSC27rq3BkjQja9ib5iZB9GK9YNz7OuYBj3rVZSFnFVdz+yT7evmaUCNw2o2L/fszZ2dhMXqhvXg+lKfD7h3BgJfS/oLZhVQnkHIGwc7vVcU+kMZJNbHIEt4TeI8DDhSr03G/+KxI2ZBrP+n7wwjjGx/SOsmV1ybLsCGZNC5vGoaLNpGg12E5tRcXDTu6d0OFObVduIybU/4ydeI9yXL/vY5jyEBh8nNO/TmT//LYny/VqCesh77gSmB+9xNm96ba65xgfodvSqXWEuocCtVeBerOCzclk7fXCUqInwscAQGJWEWVbtlL0zWrndq47qBuwLc+H92fCqR3O689ZqtizF7m6mjx1BdBHviSbUbFnD6fvuJPc1//j1H6cLqzghR+OMu6FX3n060Mczy7FVavmmnHh/HzfeSy/8VxmDApsOmALyvDW5ZfAr890Wb97kmh/N6yo2aMfq8zNe+wHZ3dJ6GCyLDPl8ynMWz2PrPIskouT+cm3H5lxs2DoFc7untCBrNXl7JBG8so5Ybw93ZdCY+3kQvYJLJ7+7ghWm5jOoilu8fHE/LCWfv/8JyqDAQIHw5yXKIp6EUv4hbUND34OH1wIb0+A3f+DqmLndboO+/FLcknvP0bva+KjfAj2dEGCRgO2EkpGfXxU7wtigVLCrMxchkpSMT92BgAlajWF6XvBYnJy74QOd2qbchsxof7ymOkQOAzMFbDn/a7vVxdIKU4BIMrYB5KItr2u3I65AVx679xQ7SWCtkKb1S2R0Nu5jx6E+wAP9CMnEu3vBkDelu2k3XwzOS++iGwSBwkOG1+A07uVkgknfnJ2b9ok+MUXiPzyS7aNUDIw+3qmrWFsPPoB/XGfNLHL3+OyLLMjMZ9bl+/lvH9tYOnmJIorzYT5uPL43EHsfGwGz10yjAGBrajDuf8z2PJv5b7vgM7teA8V7eeGIeIt7vZP4rRGA8fXKhnzQu/w0+Pk7vuIouoiUktS8XHx4YVdL/DAwf+wc/RVvXr25b7AYrVxIruUNfvTeeHHo1z6dQkLKx/kG6Mr+oCfUWmL6rWXgcziKnYnFzilvz2FLjwctwm1gYKKvXvJfOo5ki6+FGtZubKwsgg0rkq27Q9/g3/Hwbd3QcYfzul0jUhjJFB70i/0HuqqQl6eBLWXYGrZL10/efHg5i9k92D2ZKEQtxAC3LyRLN4ApEhWZbi80HtYTEp5BICIifUfkyQl2xZg1ztgruzavnUB+0W3Xn8+mvEHpO4AtQ7Ovc3ZvenWRHkEoc0ijZFsZnOfyLT1f/ULx/3o9KNwNIf9fv0ZFhuLYcwYbJWVqHWi9goAFzwDRaeUGokrF8KlS2FYz8jikiQJ12FDOXoiA6g96emr1O5uRH/3XZc+Z6XJypr96Xy0PYVjWbUlECb192PxhEimxzVTAqExp7YrJ9AAkx+AEQs7uMe9Q/8Ad1CZsKmsJOldCStMgdxjEDDI2V0T2qsgGba/gZ+k5uc7tpNuLUen1jHEbwhl5jJcNKLMT09SVGHiSGYJRzNLOZpZwrGsEk5kl2Gy2Oq1G1iYSkXqUPJ8S7BVBzS6rbTCCsbT+4ZPdxqVGn1sLK4jRqB2Vy7gM+VBiL9Zybjd+4HyublvmfITOhau/9EptdPtJ/kZZRlUWarE/3lvcmQNE3+9l53RM7gw8zaKKs2Oh4L6QM3qM+t8uqtDKKWQFK2G0ae2QdhYZ3ZP6EgZfyjzTxh8lYnIzjTkEmUEXXEq7F8BY2/s8i52lgpzBVnlWUAfOB8NHgHXrYGco2DsvZ9dHUEEbYU2G+o3lPigeMKNvXPGxqbE1GTansyvJGr1N2IysjPpDHDVClh9uzIL5Fc3QVURjL3J2T1rkizLIMtIKhXV1mrSy9KBPnBlsxtJL6pk+Y5TrNyTSlGFcgLiqlVz2ah+LJ4QycDWZNSeKT8RVl4NNjMMng/THu/gXvceXgYd+sKFFJWriTtnH/gF1k74IPRsx38EQBUxgSC/OIJqFt8z6p7aNnknlQmWxv0VtCK40x1YbTIp+eUczSyp+VGCtJnFVY22d9OpiQs2Mt0zE7OLD4OeX0l4WQ5PjL+RvU18fj615jCp+RUsmRiJn7uo8d0Sw6iRRH3zNXJV7d/AkpfH6TvuxPe2W3G/fQdS2k4leHtkDbgH1g/YFqV22SznPi4+eOg8KDWVklqaykDvRgIeQs90XClfFBg3gWsiI3jzt5OMj/Hh7ukDiY/y6bUZtnZn1vkMcAmjtPpPUuyTkU2612l9EzqYvTRC+PjGa4artTD+Dlj3MGx/Q6mFqur5E+3ZPRL/COll6Xi5eDm7K51LkiB6qvIjNEsEbYU2mx01m9lRs53djU5nyz2FpHdFMiqZKtH+7gAk5ZaLgG1T1Fq49F1w8YQ978HaB6CyECb/rVtN1GFXuW8fGQ8/gvfCqyi47DxkZDy0Hvi6iAwkO9Pp02hDQpBUHVdNR5ZldicX8NH2FNb/mYW9vGKotyuLx0fylzFheBrOMkOpshBWLFBmmw0ZCZe8Ax3Y995ogNcgdhcVsHPAQ8wf0c/Z3RE6Ss0JPrFzGn9clmHZPChJVzKrY3v/93p3U1Jl5lhNUPZoZglHs0o5nlVCldnWaPswH1figowMCjYyONiDQcFGwrwNqFQSvHcB8rHdbNUPxVyu5qRnaKPbUKskKsw23txwkv9tSWLB2DBunhxNWE3dfqFxkkqFZKh9jfLfe5/KAwfIe+tt3KdOVeouRkyAWS9CdUntigXJ8J+RED5Oqdk3aF6nXiCRJIkoYxQH8w6SXJwsgra9RXUZJG1S7sfNJfkXpUzHjLjAXjnpWGPsmbb27MNoz0gSc2CnIbJ2uLzQOwSfA+dcBdFTmm4z6lplQrIxN4DUe47zDVoDVw+62tnd6HxWs1NGo/RUImgrCE3Ieex2irYl4T9/FL4vrCCmJmibXlRJpcmKq06NKTUVa2EhrsOHO7m33YhKBXNeBldv2PwS7FsO8bcogdxupvj77zGfPk11YhIpJUoWTKRnpAjKUzNL7/xLqD5xgqg1q3GJjW33NqvMVr7dn8GH21M4mll7UjshxpclEyJbnlSsNRJ+gfwEMPaDhSuVDHChWdH+buxOKSAxt9zZXRE6SkWBY+bld6VSqvb9h4ui55Gd70FOaRUBHi6MjPBAF3cRqt1LlexAEbTtNDabTFphBUczSzhSJ0h7urDxWnwuWhWxQbWB2UHBRmKDPDC6NHGCU5wOp3cjqSQy/vkEN/6Yhtla/7PP/sn6xlUjUakk3t6UyIG0IpbtOMWnu1K56JxgbpsSw6BgkWnfGn6334ak1eA2ebLjmEG2WDDnlaOLiK5tmLZLCSik7lB+XB+GkVfD6OvBN6ZT+vZI/CPo1Lo+P6lqr5L4G1irwTsS/ONIzNkC4Dg36QsckzPVjIYb4t+fn3PghEoNkRObWVPocfqfr/w0R+cGCz/rmv4IHaskE5ZOhhFXw/QnQC1Cki0Rr5Bw1irMFagkVa+tl2XKyEG2Saj9lBorPm46vAxaiirMJOWVEbZ/G+n3P4DL0KFErfrSyb3tZiQJpj8OHsEQM61bBmwBAh96CMOo0ehjokku3gqI0gh2kiShCQykOimJ6oST7QraZhRVsnznKVbuTqWwpgSCi1bFZaNCWTw+ktigsyiB0JRzrlSGSPn2B4+gltsLhPqAzvdX1mWt537TC8rJoYsXRE12dteEs5XwM8hWCBjM1+kbSC9L55MNLuTkKpnUhsg3ULtk8lzIPcwHJSvXYgKNqNHeXuXVFo5l1dadPZpZyrHMEspN1kbbh3i6EBdsZFCdAG2kr1vbLmAdralBHnYuB9mCfuC3eJTMJj+9NkvpzJqXFw4JZEdSPu9sSmLziVzW7M9gzf4MpsX6c/vU/oyN9BYXMJuh9vQk4IEH6i0rXrOGzL8/ie+NNxJw/33KwuFXQdQU+GM5/P6Rktm+/Q3lJ3oqXPwf8I7o0L4N8x/WodsTugHHyIm5WGVIylMusvYP6BtB22prNRnlyrwT9uP0c0MHwZ9gVedTXl2Nm16UehF6vv05+9GqtcR4xvTaGAu73oHyXOWipgjYtop4lYSzctevd7Hx9Eb+PeXfzIyc6ezudIqw6VWYB2WjnjnLsSzG353fTxWSlFtO7LnnIul0qD09sVVWonJ1dWJvu6kzC8Mnb1Em6OgmtRNVrq54XnwRAClbPgEg3COCHYn5jmy0vlAnrClBTz6J2surduKVNpBlmT0phXy0PZn1f2ZjramB0M/LlcUTIvjLmDC8DB0YILLZassgDL2s47bbB0T5e6AP+JlMG5Rsfx3jxn/CgJkiaNuTHV8LQNXAmWSkfwNAbkGdDEpJBsnGg7vzmKX3Q1+VB8mbYUALmS2CgyzLpBdVOmrO2n9OFVQgN5zcHZ1GxcBAdwbVlDcYVBOo7ZDPwSNrlNvB80kpUGoBPnfRDLxsY5r8LpMkiQkxfkyI8eNwejHvbErkh0OZbDiey4bjuYwK9+L2qf2ZEReglF8QWlR54CBYrai9ves/YAyGKQ/BpPvh5M9K7duEnyH9D3Dzq21nqQaNCDwJZ7Ba4MR65X7cHNILKzFZbOg1KkK8+sa5h06lY/3l60kuTnaUMBscEIps0yOpqjm860PONXqKSWd7g6xDgAQBg1tX3qyiQAkCFiTD5f/r9O51tud3Pc/RgqO8Pu11podPd3Z3Ol5VifIdCKKsSRuIoK1wVow1E9XYJ27qdUwVSKWn0bnLED3SsTjaz80RtNUMD2HAls2oPbtnFmm3c3wdrFwEkZPgqk9B34HZlR1giN8QEvKyee9XM7k5Ox3Lg/vAjLxN0YW2vb5pldnKtwcy+GhbCkfqlEAYH+3LkomRnN8RJRDOdPxH2PQv5X1lDOnYbfcBQ4ICsJmNqLQlJAUNYQQotfOqy0DfN7J4ehVZhvJ8AFL6jURO/xrZ6oJsrf1b2qr9UbtkgC6P76tGcznr4cjqXhm0tdqUGtrtuRBXZbZy3JE9W8qRzBKOZZZQUmVptH2Ah96RPTu4JkAb7eeGRt0JdfdKs5Vh90DKq79xUdUJMmfIRHtGEuvTulqXQ/t58uaiUaTklfPuliRW7T3NvtQibl62lwEB7tw2JYZ5I0LQdkb/e5HgZ57Gc/48XIYOdSyrOnKEin1/4P2XK5F0OqUMSexsKDwFOUeUIb6g/N++O02ZsGzMDdB/xllPrFNhruDLE1+SVprG/537fyJjuqdL26XU6Xf1hrBxJCYUABDl18aM/B5MkiSC3IIIcqsdQaVSqdDLwcimfPTbngCNl5LZLt7vPdumfyqjRy54pnVBvaoipRyfbFPaBw1tcZXuzNfVFx8Xn95b3ub3j5S6736xMOBCZ/emxxBBW+Gs3DvqXh4a+xCe+l4asCxIBGTlAMlQe9Jjn4wsMbcMQARs20Lrqvwkb4Jl8+HqVWDwcUpXLLm5ZDz6GJ7z52O8aC6SJOFrnc7eXV6cmSCVVVzF7Z/s4+1rRvXJwG1rZRZX8snOU3y2O42CchOglEC4dGQ/Fk+IJC6ok+okZh2CVTeCuVy50n7BM53zPL1YqLcrstkftCX8UV3NCO9IKEyBxF9h8Hxnd09oK0mC69dC8Wl+PqoE82wmf2qrmtp/B0mfy6qiMVyuW0/V4e/4PvgBXF1ccNWpcNVqMOjUuOrUuGrVjvsuGnWPybxcdziTp787QmZxlWNZcxfiZFkmu6S6pvZsbfZscl65Y8LEurRqiRh/d0dgdlCwkbhgD/zcuzBb8th3gIzVZySVB/9kBFA5R0OEse1D7iP93Hj+0mHcO2MAH2xL4dOdp0jIKeOBLw/w75+Oc9PkaK6KD8OgE6cPTTGMHu24L8sy2S+9RMWOnZjT0gh89JHaht4R9csiZB+GnD+VnxM/gmc4jF4MI68Fj8A29UGj0vDq769ila3ccs4tBBgC2rtbgjOFjoXr1kBpFqg1jnOQmD5SGqE5E12fYt2BNIa63gzVOZCfCH79nd0t4WzJsqMeP2HjWreOT7RyrPrnN7D9P3DZu53Xvy7w9vlvO7sLncdSDTvfUu5PvFtMFN0G4qhLOCv+Bn9nd6FTVWz5mdL9RgyDQvGoc8U2xl/JiEjKK6vX3lZZibW0FG2AODBuUvQUWPwtfHIFpP8OH86Ga79xSmZk8fdrKd+6FVtZGZ4XX4TVJvP0d0caBGwBZJRQx9PfHeGCwUF9JqvBrmzrNgpXfoZh9Bh8r19S7zFZlvn9VCEfbk9h3eGseiUQrh0fwYIxYXi7dWKNzNIsWHGVErCNOk8pZi+0mUatwl0VQiWJHM5NhNi5sPO/SgazCNr2XJ6hJNZM3GKrrv+dbQ/aqnS57LZdSoHsjs5UxbvfrOeEHNbipl21tcFcV11NQLfefQ2uOhUGnQaXmoCvQadu5L6mXkDYVav8dERQeN3hTG7/ZF+TF+L+s3AEUX7ujuxZe4DWXnf7TD5uOqXubJ3yBv0D3NFpnHzScUwphaEaPo/KN4L46Md/4Onfr1218AKMLjwyO46/Tovh052pvL81mYziKp75/ghv/JbA4gmRLB4f2bmf772BLGOcORPTqVN4X3tt7WKrFUl9RhZt0DC4c6+ShfTHJ1CcCr89CxtfgLiLYPIDyozqraBT61gQuwAPnQdq6eyydYVuRKNT6h/XcARt+9AkZJ8d+4zcilxmRc1ioPdAx/L+AUZMaDnlOpiYiv1wapsI2vZkeSegIh80LhAysuX2dhPvUYK2h1Ypc6p4hXdeH4Wzd+hLKM1U5rwZdqWze9OjiKCtIDSifPceCo65Y3XTUncQvz3TNim3HFmWkSSJ4m+/Jeupp3E/fwb9/vUv53S4p+g3Gq7/EZZfCrnH4P0L4brVnTaDclM8LjgfW1kZ+v7K825KSCOrLB9ovHarDGQWVzH39S1E+bvh667D102Pn4cePzcdvu56fN11+LnrMbpoetVQRHN6OmW//Iq1qMgRtK0yW/n+YCYfbU/mcHptCYRx0T4smRDF+YMCOmcYcF2mCvhsIZScBt8B8JdloG5iZnWhRYGuoaTIkFSUDGOuU4K2J9YptfTEJAE9h82mXMSoKT9TZs1UFpvOCNpW1wZtraj5d+BLlLpHEmzR4GWyUmm2UmGyUGW2UWGyUGGyUm2xOdavNCttOouLVlUTzNXgolWCv2dm/NberxP41dqzgVU89s3hJi/EAdz12f5Gn1utkoj2c6tXd3ZwsBF/D333/Gy/8mM4sR4pYjxJ2TvZMFzFRGNkh2za6KLl9qkxXD8xkq/2nWbppiRSCyp47ZcElm5KYmF8ODdNjuozdTXbSlKp8F64EK8rr0TS1H6O5r76KlUnThD44IPoBwyoXcFvAFz4DyXocGSNUvcvbZdSumT04uafzGZVMtTKssE9kEfHPnTW5RWE7i0xR5mEzJ5I0hd8n/g9B/MOEucTVy9oG13zGuwjjhj2K/8DLf2vCN2XPcs2dGzbJkYNGalM+Ji8CXa+DbNe6Jz+dTJ7bKFXkmXYUZNlO+52Ub+9jcSZmHBWZFnm33v/zcnikzw38Tn8XP1aXqkHMZx7Ht5FxRji6w/NiPA1oFFJVJisZJVUEezpii4iAltFBdVHjyHbbEgi1b95AXFwwzpYfgkUJMEHs+DWTV2acasLDcX/rjsdv/92ej3uA/+DuWQYVelXN7nesexSjmWXNrttrVrC100J4vq66/GrCeb61gnu+tfc+rjp0Gu6x0lVU3Uf3SdPwv+++3CbMIGs4io+2XmKFbtTHSUQ9JraEgiDgjupBMKZbDZYfRtk7FNKmCz6XLkVzlqMZzQpRZBdmaoMSXP1hspCSNup1KEWeobMP5TP1Ng58JePKbUps203CNqalO9slaaCIC8rz9y2sMVRBDabXBPMtVJVc2sP7lY67tc+VnvfUn+dOm0rzwgQ21WZbVSZbU1mvXYUg1bFsFAvBgUbHSUOBgS646LtHp/LreJihHOUjJWUEylA7ezqHfYUWjVXn6uMnvjxcBbvbErkz4wSPtiWzLIdKcwf0Y/bpkQzILB71arvLuoGbK1l5RR+thJbeTmmRYvqB23ttK5Kbc7hV0HWYfjza4iaWvv4b/+AolSl9m1YvFL/cd3DUJJR28YYArP+CYPnddp+CV3g94+VJIcRi5RsbPpmpu0lAy4hzieOQT6D6i33cq/ENeJt/qUt4IoKkOxBP6Fnsv/9Iia0fd2J9yhB298/hvMedFoJvvb4155/8Vvqb9xyzi1cPvByZ3enY0kSLFwBu5bC6CXO7k2PI4K2wlmRJIkNaRtILU0lqSip1wVt3a64Hbcrbm+wXKtWEe5jICmvnMSccoI9XXE55xwiV36Gy/DhvffqWEfzjoAb1sMnlylXRz2cWyvWplKyRWVz80HHe2YMwNddR16ZifyyavLKqskvM5FfbiKvrJrSKgtmq0xWSRVZJVXNbsvOw0XjCOLWDfb619zag71+7jo8XbWd8h5rru7jhUOCOTXrCj7clsK6z37DUlMCIcTThWvHR3LV2E4ugdCYba8qWUgqLSz4tMsztXujoQED+LUIyqzZWCTQDLgQDq5UsrxE0LbnOP4jWE2AjCzLnCpJAUA+ozwCsg6b2ROVtpglUw21AVtZVtZvJANCpZJw02tw03fOoaPNJlNlUQK59oBw7X0LlSZbTXD3zKBw3bYWKs1WMooqSS2obPE5X7jsHOaPbPuEi91RwbLlmEt/R+0qE9lBmbZn0qhVXDw8hIvOCWZLQh5vb0xkR1I+X+07zVf7TnPB4EBunxrDqHBxEa0panc3or7+iuLvvsd9yhTH8qojR9CGhKD28qq/QtDQ+hPrWKph7/vKEOKDK8EYBiVpDZ7HVpJJ1tfXU1T+HIPHNjyeFXqIfcsgfa+ShR00jMJy5ZgTarNM+4IrBzY+lHpIUCBq11SqJJkCtRbf4lQoSgOvlsv8CN2MLCvlLeDsgrYx0yFwGGQfUj4jz3uwY/vXBRKLEskoz0Al9dIEMO/IHpsF7WwiaCuctUjPSFJLU0kpSSE+ON7Z3eky0f7uJOWVk5RXxqQBfkiShOuIEc7uVs/jHgBLflBmTpbqBAw6MfAtm83kvPoaxtmzcBk61BEAfXbK/fz2r0GUlZQ3up4EBHm6cPeMAc1mo1VbrEoQt8xEXrkS0FUCuzX3y+sHey02mdIqC6VVFpLyGn/uujQqqV5w168mmGsP7vrVKdPg46ZrVbZYc3Ufb/tkH+E+BlILKhzLz43y4fqJkZw/KLDzSyA0ZdiVcOgrmHAnRE50Th96mZEhkcjHNKCykFGWQfh5D8LUh5UJHoSe49gPym3sHPIq8yg3l6OSVDx70VQe+/povaZaaxBWbTGBvjWjB45+B78+A9HTYE7Xl/pRqSQMOg0GnQbflps3a0diPgv/t7PFdgHGs6/76nSVhUqN+Li5WAZeTfbzzzNPgq/uV3f6rNOSJHHeQH/OG+jPH6mFvLMpkZ+OZPNzzc+5UT7cNjWGqQP9xcXsRugiIvC/8w7H77LJxOl77sVaXEzYO+9gGNVMLUe1DhZ9qZROOLSq0YAtwB96HUtCAul36L+sG32LKJXQE5VmKQFbgIGzgdos235ermJCQMDb4I6h8AbyitxQ+y+DvINwercI2vZERalQkg4qjVIeoa0kCc57ABI3wOBLO75/XSCl5kJ7Z3+HdzmbVXwHtZP4tBfOWpQxis1sJrk42dld6VC24lysSX+giR2LZGiYLRLj78YvR5W6tmeSZRnMZiSdmJyjVVzqZLZaLfDVDcrER0M7Z0hI2bZtFHzwAcWrVzNg00bQKjVQ1SqJpy4aye2f7Guwjv1088mLB7c4fFivURPi5dqq+n6yLFNSaSGvvJq80mryHQFdE/nl1eSVKrf2wG9JlQWLTZnZPLukulX766HXOIK4jnINjsxdPd4GLY+vbr7uY2pBBa6SjZt8K5ltKGXwrXNb9dydyiscbtnYtnpXQrP6BxixmfxQu2RxND+R8Khpzu6S0FaFKcrM85IaBswkufgkAP3c+xHt5wlAgIee/5s7iAAPF37LPcDK48dJLqn5DldplUlAqstg1os9elbf+Cgfgj1dyCquavTzzX4hLj6q5w2fdDj+oxLQMVdgi/oL7rMvZMuJn6nWSR1eHqE5I8O9WXrtGE7mlPHu5kS++SOdXckF7EouYFCwkdumRDN3WLDzLvL1AOacHFSurtiqKnGJi22+sSRB6GjlZ9BcpbZ7IyLNSmmRDBVUJW3Epf+Mju620NmO/6jc9hsNRmVEmqM0QkDfKY2QVpJGmbmMCGMEBq2hweOxxvHkZOexe+CjzFwYKy4291TugcoE1QVJSkLP2RhyqfLTA1WYK8gsV+Yh6KzRMk7zZU2d6RlPKqMGhDYTQVvhrNmvAjlO+HqJyl+/IvWx19H7SURvPdLgcXsNKfuBk13JDz+Q++Z/8bz4IvxuF0PR2uyPZcqQ9yPfQlWxUqutg2kDAjBedBHakBAkbf1Jq2YNDebta0Zx12d/YLbWnuYH1ZQJmDW0Y0s4SJKEp0GLp0Hbqrpk1RYrBeWmOtm7NbflZ/xeE/Q1W2VKqy2UVltIya9ocfvNeePi/vS74TIALFddisbHCYGOgmSlrluskm0iArYdy9NVi9YWiI0s/sg8zoV1g7adnAEvdBB7lm3EBDD4kJKWAij1TU/mKN9XQ/t5Mn+EUg4g2RSp3NovvMZMA50HlGYowcCwnjuCRq2SePLiwdz+yT4kqBe4bcuFuG7tyBrldvB8dKGhmJ68ixfW/IpBY8Df1b/5dTtB/wB3/nXFcO67YCDvb0lmxe5UjmaWcM/K/bz803FuOS+GK0eH9qx6wV1EFxpK1DdfY05LQ2WoDUpl/+slDPFjcZ8ypfGMZVPT3+0+NhseVhulahWpBccYiAja9jj2oK39uAdIzO17k5B9dvwzlh9ZzjWDruHh+IcbPB7j786WhDz2WqKYKcpl9VxaF6XEQcx0Z/fEKVJLUwHw0nvh7dKLSgzlnoCj3wMyTH/C2b3psUTQVjhr9qtAKcUpTu1HR7OcOg6SjNan8fqm9hpSZ2ba2qpNmJKSKFn/kwjano1Ri5UJN/a+D9/fpwz9nHR/hwaLXAYPpt/LL9VbllyczDM7nmGI7xAeGPMAOrUKs9XKo7PjOCfUyzEhl7PpNWqCPV0J9mxlFm+VpTZzt6y6QWmG/DITKfnl5JS2nLVbbjDiNmE8ai9vbOXl0NVB28oiWLFAyQK8dCkMX9C1z99H+OhCyeMAx/OTlAV5CfDzk1BdAku+d27nhJYdry2NALXB2EhjJCczlaDtgDrZWfZsTMd3uEYPsbPg0JdKQLAHB22h9kLcmfW6O+tCXJeqKoHE35T7g+cDtX/HKM8op5YkCPZ05fGLBnPn9P4s23GKj7ankFZQyROrD/P6Lye4fmIU14yLwNNV2/LG+hBJrUYXGen4vWLfPgo++ICCjz4i5qf16EJDG67kHtj09oAos5mDaj3JWBjY8V0WOlN1GSRtVO7H1o5wSszpe5OQ1f1sa4yfVzla761szzkMDGq0jdDHZB6Eba8pWbeDLnZ2b1rF/j7vdVm2O94AZOVzzL+FkSRCk0TQVjhr9i/PjLIMqixVuGh6cG24OjzjdBivzMQ6svEP+eiaA6X0okoqTBZHTSnjzAvAZsXjwgu7rK+9ikoNc/+tzFq/5WWltmJlIVzwbKdm+Z0sOsne7L1UW6vJKqmi3GRFo5K4fmIUOk3PHM4pSRKerlo8XbVEN5Nw1eq6jx4uhH/wQQf2sA2sZvhyCeQdB2M/iDrPOf3oA0LdI8irgrSyU8oCnTscX6vcL80CjyDndU5oXkVB7azLNVlZg30Hc0HEBYwIGMHHBxoOqbV/h58uPY3ZZkar0ioBwENfKiMeZj7X4zOsZw0N5oLBQexOLiCntIoAD5ducyGuXU6sVyaM8x2A7BeLrayc7IpsoPvUwvMy6Lh7xgBunhzNF3vTeHdzEulFlby0/jhvb0zk6nPDuWFSFIE9ua5wJ9LHxOBz4w3I1aZ6AVtrWTlq95osy4gJYAxBLslEaqQQSKTZwkEXPSm6hhMLCt1c4m9grVYm7gmoDUQ6yiP0paBtTZ3PpoK2BkMRLkHfk2bxh0MxykXHkdfCwJld2EuhXcpyYPt/IHIyDOyA8+ij38Hhr5RRenEX9YhjGfuF9q4sb9TpSrPgwErl/sR7nNuXHq5nRiSEbsHHxQcPnQcysiOlv1fIO4GkAk3EsEYf9nHT4W1QMkSS60wepXJzw+vyy1G7950DqQ4nSTDjCZj5D+X37W/At3cq9W7bqXjtWiwFBQ2W172yaR9CHOFr6LEB27aw131s6lBGAoKdWfdRluHHhyBpA2jdYOFKR103oeMN8YvDUhqHiyVOWWAMVmrpQe0wTaF7UqnhwueVE1Uf5YD/4piLeWXqK1wQcQEJOcpkY/3rBG0DDYG8dN5LfHbRZ6jsh4P9z1f+14pTIeOPLt+NzqBWSYyP8WX+iH6Mj/Ht+QFbgCOrldvB87FkZXFi7FjGPfIFuxbu5MEx3WvGbFedmsUTItn44FReXTCc2EAPyqotLN2cxOR/buCRrw6SdEa5KQHUnp4EPvggQY//n2OZJT+fkzNmkPn009gqK0Gl5o8hjyDLMlYrlGfrKD7lSnm2DpsVIk1KXdvk0hQn7YVw1kzl4BGijJyoCThVW6yOiWFjAvpGeQST1UR6WTrQdDArPlQ5ZrGq86lK2QZHv4XEX7usj0IHOLVNOef77dmO2V78LaBxgYx9kLK1Y7bZyezlJrvLhdcOsesd5QJz2DgIP9fZvenRen9UQug0klQ72UWvmowsT5m8Bd+mC2VHO+raNpyMTOgAE+6EeW+CpILDX0P+yXZtzpSSQsYDf+PktOlYS0vrPVZ3ps6EbPsQYo92PV9PYa/7CDQI3DZV99FaXIy1rIve97veUWbIRoLL34Pgc7rmefuoc/sNofL0Eiz5dWof1gy1dwy9F7onF08YdxvMf7PBQyVVZsfkhXWDtpIkMStqFnE+cajts/pqXWHABcp9e81UoXupLoOTvyj3B8+n6thx5QKXWoVB54avq69z+9cErVrFpSNDWXfvZD5YMoaxkd6YrDZW7kljxiub+Ounv3PwdJGzu9mtlf70E7biYqoOHUbS67HaZG7fF8rLKVeQ8H0wqRv8yNjhTeoGPxK/DyTt+BgAkntZGbM+YcRCuP8ITH/csSglrwKbDB4uGvzd+0b2dGpJKjbZhrvWHV+Xxj/bBgeEItv0SJKNAx41gd1T27qwl0K72UcKhU/omO25+8PIa5T7217vmG12sl5XHqGqBPbUjNIUWbbtJoK2Qrv0trq2tuJ8Tq8tJeeAB3IzwxNiHHVtG2aHlG3ZStrtf6Xk5587rZ99wqhr4cqPYcEnEBDXrk1Zi4pwGToUQ3w8ao/6Adm6w1FO1vw9+/ehWXntdR+DPOsPUQ3ydOHta0bVq/uY+fcnOTFuPCVr13Z+x06sh/WPKfdnPgtxczr/Ofs4+3DLpLxyrLaaobb2oG3SJiVYJPQI5eZyssqzkGXZUQMx0KjH6NKKOqLDr4IR19QGb4XupapYKYEROBSChuExfRoDtmwm5IUXnd2zVpEkielxgXx52wS+vG08M+ICkGX44VAW897cxjXv7WJrQh6y3HC4f19WXGkmddJs0p54mY2zFnPfFweY+eom+h/ZxUW7tmOrrH/p1VKp4qpNx4g/biOpKFm8nj2RJIGuNqO2bmkEZ9at7kqOxApjZJP7rFKp0MtKfeedtprj96zDynwIQs9gD9pGdFDQFmD8HUryz8mflfdDNybLcotlQHqcP5ZDdTH4xcLAWc7uTY8natoK7eKYyKTmg6anMx3aTulpV9R6GX9jQJPtmsu0rdizh7ING0CWMV4gTnrbZfC8+r9n/wkewWBo23B91xEjiFr1pTKcsA5ZluuXR8jOA2BAYN8J2kLr6z5qAgJAlqlObF/mc6uk7gTZBqOug/F3dv7zCfTzdkWnkTDLJRzNzmBocD+llp53JBSmKDX2zvyfFJwvfR9kHYSBs8FDOXHdlr6NBzY9wJjAMcz2ewZofARBSnEKm05vwtvFm3kxNX/b2Nn1ZisXuhnPfnDlR2CzOYZNl3iouf/354kuiOaJcU/0mIDO2Egfxi7x4XhWKUs3JbLmQAZbT+ax9WQew/p5cvvUGC4cEtQ7Slq0Qnm1heS8clLyy0nJKyc5r4KU/HKS88opKDfVaSkBGahkGy/sX9VEiSNl6ZKfbdwxoJzcylwCDE0f1wrdSFGqUsPfPgKihv0CXF9KLGhtnU9vbT+ybakcLs0Hn2goSIK03aKubU9QUaCc30HHBm19opU6/X9+o5ReuGxpx227g2VXZFNpqUQjaQj1aGTSyZ5o1GKQ1EqpNZXIE20vEbQV2iXK2LvKI2j6xRB49RRkUzVSMx8wjoy0RjJtPS+5BGQZz/kiuNGhck/AxxeDWwBc+81Z1TZVubrW+z2/Kp9ScykSEuHGcE7mpgB9a4IHO3vdx+Z4X7UAryuvQBvY9IzVHeb8JyFkpHJ1tocEIHo6tUrCJ2wd5S4b+ODwQl4Jfkx57WPnws7/KiUSRNC2+/ljuVJGZPR+uPg1QPls00gaQtxDmj3RP1pwlJf3vszIgJG1QVuhZ6hzjJJcnMzv2b+TVZ7VYwK2dcUGefDKghHcd8FA3t+azMo9qRxKL+avn+4jys+NW8+L5tJR/dBr1C1vrJurMlvrB2Xzykmu+T2ntLrZdf099ET5uRHl60aknxtuR/bjYa5ssr0E+JXCoDTlArUI2vYAsgwfz4PqErh6FfQb5XioL09C1lKdz35uEWSX7iC1NFkJ/BUkKSUSRNC2+0vbBchKWUL3Dv6MmnC3ErQ9vEopNeIV1rHb7yD2OEqoR6gyKWxvoHdXynYJHUIEbYV2sX+JppSkIMtyjzxZqEsTNRifJ95psV20ozxCOTabjKpOFog+OoqAB+7vtD72WbIN1DrIPQofzITr1ihXUVtQdeIE+uhoJE3Djzv7l2Q/936UVUJBuQlJ6lsHxG2h8fPr3CcwVylDmTQ65XcRIOxyAa5BJNkkMsvqTNoXNxfSf4fQsc7rmNA4m612kri4uY7FC+MWcsXAK6gwV3DfZycAiGkkaBvrE8vMiJkM8RtS/wFZViYiS94Mk+7trN4LbZV7ApDBPxYA0+l08t55m8Bhsbww+QWsNqtz+9dOYT4Gnpo3hLum9+fj7Sl8vOMUyXnlPPL1IV75+QQ3Topi0bnheLSmzIcTmSw2UgsqlKzZOkHZlLxyMoqrml3Xx01HpK+BSD83ov2U4GxkTZDWXV//OKaw9ChZreiPd5lyvBMfHN+OvRK6RO5xKExWjnf96s+tYR/dZy/R1hfYj9NbqvMZ6xvNvlIoMKVDxAL445PaIfdC92avP9yRWbZ2/UbBOQuUckKuXh2//Q7S2osTPYK9FE8Pjwl1NyJoK7RLuEc482PmE+kZicVmQavu3gfSHSXcx4BGJVFptpJVUkWIl2vLKwntExAHN6yH5ZcoV9A/mAXXfA1BQ5tcxVZZyamFi5AMrkStXIm2X796j9ebhKwmGy3U2xVXXc/P5ulxbDZYfRuU58FflrW5BIbQMcb7z+Hg5sFExcfULoycCDeud16nhKZl7ofSTNC5Q9R59R7SqrR46j05mWOfYLFh0DbaM5p/T/13w+1Wl8D7M8FmVrLd21lXXOggW16Gg5/D9CfgvL9RuX8/xau+wjVhOBctWOns3nUYX3c998+M5dYpMXy2O5X3tiSTVVLFCz8e480NJ7lufARLJkTh7+G8yZgsVhunCytJzi8nObfcUcYgJb+c9MJKbM2UkDW6aIiqE5CN8nNz/O7p2vrjaF1A67LSCt17TxmzXu94zZwBUVNAX1vSRpbl2kzbPlIeoW4Js5bKI4wIGshnKVBFJrawcahUGlBrlWNLMTS7e8s5qtx2RtAW4LJ3O2e7HWhG+AwCDAEYdUZnd6X9En6CX56GKQ/CkEud3ZteQwRthXbRqrU8N+k5Z3ejw1T++D7asBjUseOQtC5NttOqVYT7GkjKLScpt7zRoG3V8RMUr16N1+WXoe/fvzO73Xd4R8D16+CTyyD7MHw0BxZ9CeHnNtq8OjEJSatFpXdBE9ywnEK9ScgcgY2GdR+FWtVJSeR/8AGSJBH87LMdt+GNLyhDmFRayDsB4eM6bttCq8UG+oGc3mjpF6EbOv6DchszHTQNA1hVZitphRVAG+sgungq20xYD0e/FUHb7sBSXZtVHTkZAP2A/vjeeiuawN457N1Nr+GmydFcNz6S1fvTeWdTIkm55fx3QyLvbUnmL2PCuHlyNOG+Bsc6VpvcYn321rLaZDKKKh0BWXvmbEp+BWkFFViaicy66dRKULZOOYMoPwNRfu54G7QdMjLNMGY0mqAgzFlZTdS1BZOPgaNh1YRV5rb7+YQucKzmM/2MyVezSqqoMFnRqCTCfQyNrNj7nFnCrDnjw2JhJ6CuJFl2JeaRNND1jdepx7t6FeQnglsnj+brxgIMAcwIn+HsbnSMra9Bzp/KaC0RtO0wImgrCDVkm43UB1/CZpGIXv4G+rHnN9s+2s+dpNxyEnPLmDSg4RdN3ptvUPrzLyBJBD70YGd1u+/xCIQla2HFAkjbCcvmw+LvIKzh0G3XoUMYsHkTpvT0RmsU152E7MjxprPRhFqy2Uzxqq+QXF0JeuIJJJ2u/Rs9+AVs/pdy/+LXRMDWiWKamWSRigJI3gSDLxHDnroLxwl+bWmEvMo87vr1LmK8Yrgq6m/IMngbtPi6Nf6/apNtZJdno1ap69e8HDxfCdoeWQNTHurMvRBaI2mjkgHtEewoVeISG4tLbCxrTq7BP2M7owJG4aJp+oJzT6XTqPjLmDCuGBXKT0eyeXtTIgfSili+8xSf7jrFReeEcNuUGFILynn6uyNk1ilBEOzpwpMXD2bW0Mbr4NtsMtmlVTUB2QqS88ocE4Cl5ldgstqa7Jdeo3JkytqDsvbf/T30nV4yTFKrCXzsUdLvuVdZINcGkWVAkiRCnniKrTOm9I4Mrt6uNBvS9yr3B9afDDIxR/lOjvA1oFX3jczRuiXM9Orms+q9De5IFm9kTSE7048T4ze1C3oodAhJAr9OTm6yWZVjmf2fwoJPoZnELKEd0nZD6nYlAefc253dm15FBG2FdjNbzaSVpmGVrQzwHtDyCt2ULeMkar0V2aZGN3R8i+1jAtz45Wjjk5EBeF5+OSDhNrGThnv0Za5eymRkX1wLVSUQOLjJppJOhz6q8WFVdTNtv83pW8POzpZ+wAB8b70Vw+hRHRO4S90Ja+5Q7k+8F0Ze0/5tCmct2t8Nnd+vlLklsOGUK9MiJioPmKvg1SFgroC/7hKZl91BYYqSzSCpYUDtZCtJRUkczj9MsamYse7KiX7/APcmA0gv7XmJT45+wvVDr+f+0XXqscfOBpVGGdWQnwi+MY2uL3SRI98qt4Murjfc12Q18eT2J7HKVn698tdeGbS1U6kkZg0N4sIhgexMKuDtTYlsPpHLtwcy+PZARqPrZBVXcfsn+3jx8mFE+bk7aszaSxqk5JdTZW46MKtVK5mNdUsY2DNng4wu9eY0cAbjzJnw+mtkP/8ClqzaCrcWX38in3xceVzoGU7UZNKHjGow2e7JnFKgb825kFeZh0bStLrOp7sqhFIKOZiVwNXDpyoLzVUiQCco86L8/HcoToMDn8GY653dI4dKSyWfHv2UKGMU08On9+z5gba9rtwOX3BWE4YLTRNBW6HdVieu5pkdzzCp3yTePv9tZ3fnrKlNmfS/OAfZMxrJteUi/zF+zWSkAR5Tp+IxdWpHdlGoS2eAqz4DSyXoGv69bBUVqAxND40yWU1klCsneVGeUSTk7AdEpm1LJJWKgPvu7ZiNFSTDykVgNUHcRTDjyY7ZrnDWPFy0GNxysBpS2Hn6UG3QVusCERPh5M9KzT0RtHW+9H3KxH3h4+vVgLbXrqxb9qW50ghhHspsyvaRBw4GH6VObuJvSobKZDHBptNYzXDse+X+IGWCRkthIZacHE77SlhlKwaNAX9Xfyd2sutIksT4GF/Gx/hyOL2YtzeeZO2hxqfksueePvzVoSa3p1ZJhHm7NqgxG+XnRoiX61mXV+gqxpkz8Zgxg4q9v7Ps+738kGFm2l9mcv/MQcokkodWweS/gZuvs7sqNKeJ0ghQe67RpjI3PdzsqNmcH3E+5abGz7POFOAaRmn1nyQVJyvzI3xyOeSfhIeSaye4FbqXLxYDMkx9FAIGdd7zqLUw/g5Y9whs/w+Mug5U3WP+klMlp3h93+t46b2YEdGDSyTkJcCxmprcE+52bl96IRG0FdotyhiFQWNAp+rhX4j5CQBIgQNb1TwmQAkUitqPTqTR1T8Q2/giaPTIE+8lZcECVB5Ggp97Fn10dINVLTYLD455kNNlp9FiJLukGhCZtl1GluHrm6EiH4KHKxMFiMkiugVvXSh5HOB4QVL9B+LmKEHbYz/A5Aec0zmh1tDLIHoqlNevVVl3tu2Tx+1B26ZrdduzmBqdqGjwfBG07Q6SN0NVERj8HJO1lG3YSOZjj1E9IhZmK0H6Hp2hc5aG9vPkmnGRTQZt6/Jz1zEo2EjkGTVmQ71de/yQc0mtxu3ceLQmPw6tPUpozXB6vruXTyqS2Vp2iGvGP8KkfpOc21GhaVMfhsAhSgmiMzgmIetDmbagTKjp5eLVqrZx3oM4nniCKrzA4KtkVZrKlNqaTcx9ITiRvU67tVqZXLOzjbxWOU8sSFIugg6e3/nP2QpalZa50XNbLAHS7W1/A5Ahdg74xzq7N72OCNoK7TYqcBQ7F+3s+ScLeUrQFt/W1dWJrsm0zSiuosJkwaBr/N/JWlJCyY/rMM6ZjdpDTHLVadJ2K5NZAebUNGUSMo0GjV/jhe0NWgPXDFaG4u9LLQQgyOiC0aX1Mzf3ZdUnT1K+Yyeel16K2r3lzPQGJAnmvQHf3wdXfNBotrTgHGHuEeRVwenSU/UfGDgbuE+puVeaBR5BTumfUIfBp16WLUBySU3Q1jOSn1qRaRvtqVzUSitJw2wzo1XV+QyMu0j5Hy0+DZVFSmkaoevZJ5wbdLEjO8hWWoLKzY28UOVv29ohxL1RTmlVy42AJy4azPwR/Tq5N84VV5TGaxtfhz1uShmpYVdy9I9X2VZ+ilH5R0TQtjvrN1r5aYQjaCsSC5p0ccw8Vm4IpMzPTTnGDB+vBOdSt4ugbXeUvk8J2Lr5t/rcu1307hB/izKHxtbXlFEr3SB2EeMVw4uTX3R2N9qnLEcpOwEw8R7n9qWX6tmXlYVuQSWpen7AFsj6ZAunt3lTWdq6yRq83XT41EzsktREiQSAU0uWkPXkk5SuX98h/RSaEBYPM58DQHfiffo/OpF+r/wbtbHlv2drhhAL9aX99Q6y//EPKvbuOfuNBAyC638EY0jHdUxot7ia2qUF5tP1HzAGK7X2AE6s6+JeCfXYrE0+ZC9zEOYeSXKe8t3UXNmXAEMArhpXLLKF9NL0+g+6+cHNG+CB4yJg60wz/wELV8LYmxyLfBYvZuCe3WyZHQoomdV9VYBH62pWtrZdTxYR5k9sURqRWYlUV1bD0MuZV1bBU7n5nO891NndE85CaZXZMRos2r9vXOA2WU1cvfZqHt3yKFWW1l2UsWchpxZUYLLYlJJOAKe2d1Y3hfY4tU25jZjQdcHT+FtA4wIZ+2qfX2g/N3+4agWce5uYTLqTiKCtINQoO1FIaZorsktgq9eJ9qspkZDXdNDWOHs2+gEDULn1jQMtp5pwF8x7EyQV2sTP8ShYpgy/acSB3AMcyT9ClaVKBG3PgvvUKbhNmIBK38bhPHs/hJSttb/3ggs+vc3IYGVCSTMllJhK6j9or7Vnr70nOMdXN8H7FzY4Ga2yVJFRptTq1lgDsdhk3HRqgj2bDlapJBURxgigiRIJISNALQZmOZXWRZkYLqh+0E1SqThZnQYo5RH6qvgoH4I9XWjq20QCgj1diI/yaaJF79FvWCyvjr+O26b/jeSiavDsx7mBY7i8rJzoU7ud3T2hMVYL/PBgzVBxS4OH7YkhAR76PjMaLLUklYN5B9mYtrHVw8YDjXrcdGqscjUncwsdpWRI3dnshU7BSezHL/bgeldw94cRVyv37ZNmOVlmWSYWW8P/+x5FkmDABTD7n87uSa8lgrZCh/j06KfMXz2fDw5/4OyunLWg+24h4C8T0I+d1up17Fd1E3Oarmvre/31RH27BuPs2e3uo9AKo66FKz8GtU6pw7hiAVQ3/Pv8c/c/WfD9Ajaf3kxCtjIrrwjatl7QY48R/sH7uI0f3/qVTqyHtffDsksg+89O65vQPkODA7GZlVIuSYXJ9R+MnavcJm9WZmUWup6lGhJ+grSdcMbJbGppKjIyHloPcouUQGtMgHuLo2GijErAz14Pt1E2W5MXwQTnkGXZkVndlzNt1SqJJy8eDNAgcGv//cmLB3f7CcU6gkqtJmfMeWS5+XLSPufCOVcqt4dWOa9jQtPSdsHud+Gb2xp9uC/Wsw10C+TVqa/y0NiHWj2aU5Ik3MI/wiPu7/yYuAmChoHOA6pLIPtwJ/dYaBOrRXnfg1LGoitNuFN5ztHXd+3zNkKWZeavmc/YT8eSVprm7O6cHVluuY3QbiJoK3SISkslScVJJBQmOLsrZ8190X34PvM+6sCIVq9jH6bUXKatpNH0ivIRPUXGo4+RsXwn1VPeAK0bJG2ApI0N2nm7eOOt91ZmWK85IG5uCLHQTtl/wqobQLbB8AUQMNjZPRKaEOLlCuYAAPZlnaj/YMAguHQp3LNfyf4Tul7yFmVyFfcgCBlZ7yF7AC/KM6pNs403OxkZwK534dUhSqa80HVsNvhgNvzyNFQVOxYXr1nDqesWk/7Zx5SYSpCQHNnSfdWsocG8fc0ogs7IKg/ydOHta0Yxa2iwk3rW9QYGKv/zJ7JrgraD5vGHq4FVFUkUn25HSSOhc9hrVg+8sNFRDfbRYPYJkPsCD50H50ecz6UDLm3Teka9csE5oeCUUv/bPlRblEjoXrIPKccxek9l8r2u5BMNN6yrHTnmRNkV2VRaKpFlmSBDD5wnwmKCdybDb/9oNEFK6DhivJvQIewZHvYTxr4iuhWZtnayzUblvn24jh4tgridxFpURPH334PZjM+SxbD4W6XQ/aCLGrT974z/AlBRbeF0oRKYGhAoJoprK5vJhK28HI23d9ONynKUjGdTGUROhrmvirII3ZhaJeGhDqacRA7nnKz/oCTB8Kuc0zFBcXytchs7G1T1r73bM2UjPSM5ebr1ZV/sQ+ubzLSVrVCaoYxeGNd4NpjQCdJ2KZPoZP8JUx9xLK7Yu5eK3bsxDwiEcAhxD8FFIy6izBoazAWDg9idXEBOaRUBHkpJhL6QYVvXIE8109L24bVqJ1zwLBh8+L+gENIwEZ65m/jQsc7uomAny7VB29jGg0h9MdP2bE0PuJF3N1+A+4ia2evj5oCLJ/gNdG7HhPrMldBvDLgHOibX7IvsF8rDPMLQqntg6ZPDq5QAfHkOnPc3Z/emVxNBW6FD2IO2ySXJyLLc44KSVb99jq0gA/24WahDB7V6vZiaTNvkvHJsNhlVEycGss1G8vxLqE5IIOLTTzCMbnx2WKF9VJ6eRHz8EeXbtuMSW3PAFjqmtkFZrhI49Kmt/ZeUV44sg0+dieWE1in84guyX3gR45zZhPzjH403MlfCZwuhOA18YuAvy0AjXufuLsg1gkTbVpKKmhkuL3Q9WVbqHkKjJ/j2E4BIYySrc+wjCFq+GNXihddBF8O6RyB1B5Rmg0fra78L7XD0W+U2djZoakth+NxwA64jRrDNkAE5fbs0wpnUKonxMb7O7oZTRRu1jPt9BTYkrKUPofbwICpoFGlZO0kx+hPv7A4KtXKPQ0GSUtKr/4xGm7Rl1ERv8W3it7hp3YgPisdD1/qEimFB4WDNJ8leGmTMDcqP0L1ETICbf1VGkzhLRQHseV+poXPeg07pQo8ub2Szwbb/KPfH3V7vGEXoeKI8gtAhwo3hqCQV5eZy8irznN2dNstf+l9OPf4uhW+90Kb1wnwMaFQSlWYrmSVN13eUVCpchg5F5eGB+fTpJtsJ7SNJEoZRo/C/686GD1YVwyeXwQcXImfV1rYSk5CdPW1wCHJlJdUnmiiLIsuw+q+QvhdcvODqL8HQ+yeC6Q2ia4bL51Q1UWPrj0/g44vh1I6u65QAGX9AaaZS+iXqvAYP208AIjwiHNlZrflssw+tL6wupKiqqGEDz1AlKwYZjn13tr0X2sJmUzKbAQbPq/eQPioKr8sv55i/CagtbyEIAANiw9kaPIzvYiZRXV4JQKSPkmnYbN1qoevZs2yjpoC+YXDSbLVxKl8J2vaVTFtZlnlx94vcu+FeMssz27SuY66R3HJkUWuz+1M5MRSVvg82PAdbX4PKQqd0wf553CMnEj35M+QeVepGiwsjnU4EbYUOoVPr6OfeD2imJl43pqYMjcGCfmDb6mxq1SoifA0AtVd1mxDwwP0M2LIZz/nzz7qfQjtYqpV6qmXZvPrVZcz6fBpfHP9CBG3bwTB2DFFr1hD5xeeNN7CaQVKBSgsLPgHfmK7toHDWhgUOAKDMmoW1sVmXk7cok5Ed+76Le9bH2U/w+89oUFNYlmWSS5QTAIMqhCqzDZ1aRZi3a4ubNWgNBLkp9dSa/A4fXPPdZQ8kCp0rYx+UpIPOHWKmN9rEccJn7IEnfEKnCfZ04fXzbuSdYfNJQ/n/r1cCJT/Rmd0T6nKURmh8suK0ggrMVhmDTk2QsW+UQMmvyqfUVHpWtbojfQ3oA77H5LeUpIIcZaEsKxnNucc7obdCm1WXKj/O1n8GBAxRRmDued8pXXCMjuqJF163va7cjrleKUEidCoRtBU6TIs18borWSZoeDYD5uXgMbdtBe+htq5tUm7Tk5EBaPz8ULn0jQMuZyj49FPy3lmKOTu78QbuAbDkewg7l0SVjfSqPMg5RkKOcuAgJiFrO5WLCy6xA5suh6LRweXvKUOgoiZ3beeEdhkVEoVs0yBLFjLKMho2sE/gcGytmDm2K/UbDbFzYUjD7yqrbOWBMQ9w7eBrqSj3ApTJMjXq1h3q2YfnNTmDsT3bM2UrlPe8ETU9jj04PvBC0NYG3iv++IOSn3/GkpuLh84DHxefnnnCJ3QaSZIcF6LtxziOEihpW+GDC5XZ2wXnMlfVTjDYZD1b5dwi2t+tyRJsvY19xEiIewh6dduGXBv0GvSeR9C4n2Dn6aPKwi0vw3/jYfPLHdxT4awc/AJejIC1Dzi3H5IEE+9R7u96R/l/7GI9tjxC2h44tU1Jyhl3u7N70yeIoK3QYRx1bXta0LY0U7nKJqnBu+3ZKrVDcVo/a6Kl0DnDMHor2WYj/733yX3tNSr/+KPphq7ecO03JBuMAERtf4eQ9PWAyLTtUIUptXWqJAmChzu1O0LbxQQYMeVNoyrrYqzWRk6aYqYrNfgKk0X2SleKnQ0LV8DQyxo8pFFpuHLglTw09iFO5SnD5mPa8Ln29ISn2XrVVi6OubjxBt6Ryv+ybFOC9ULnkeXaoO2g+qURilZ+Tvpdd1O48nNemPwCmxZsIj5IVCkV6hsY6A6yzOnDCciy7AjsZ6iguiIXkjc5t4OCMlrizj1wzwEwBjfapC9OQtbe7EN3lfJaHsquKd1ln3jv1DZxkbk7OLVdmdzULcDZPVGOpYyhUJ4LBz7r0qeutFSSUa4kRfS48gjba7Jsz1kAxhDn9qWPEEFbocPYv1x7XHmEvJovde/Is5ogKbpmMrKWMm0BbNXVnLr+ehLOm4IlN7fNzyU0wWrF/847cZ8yBfdp05ptalJrSZeU4d6RpkqeqHyJeartrZqsR2jIWlZGzssvc2rJ9cg2mxKw/d8M+HIxmCqc3T3hLLnrNfiY52IunEhBaSMz2uo9lBp8AMdFAK+7OZsRBCHuIXjqWxjiNnoJnHu7kvErdB5zhVKz2BgKAy6o95A2LAx9XByuw89xLOtpk78KnW+Ar4FP1j/LlGduw5KRga+LLx46D2RJ4pRGC4dWObuLgp13ZJMPJeb0vaBte8u+BLiGAZBYlKIsCB0LKo1SbqYotSO6KJwtWVaCtqBMRuZsai2Mv0O5v/0NaKwcWCdJLVHei556T7xdvLvseTvEjKeU48EJdzm7J32GCNoKHcb+5drTMm2LvvmG5PV+5Cd4ndX6MTVB29Zk2qr0euSKSjCbKd+1+6yeT2hI0mrxuvwywpa+g0rf/FCq1JJUbLINd607moFXkYMXx7RxBBpr1rNZlXqdh1Ypt134Bd4TqfR6Cj9bScXOnVQd2AMrFkBFnhK8RWQ09GQtXpCyl0g4/mMX9aiPO/y1Mst4Ew7mHmRf9j5KTaWdV6t7zA0w+0UIGtqx2xXq07nB/DfhvsPK/Tr877yD6NXf4DZZlJwRmjYgxJN8FyNmlYbqlBQkSXIcp6doNXD0OzBXOrmXfZjFpMy10IKTfTjT9myzD+0TqWZV1gRodW4QMlK5bw8YCs5RmAKlGcqw+tAxzu6NYtR1ymTJBYldOoqoR9ek9+sPF78OAXHO7kmfIYK2QodxDL0qy6Da2vKBSHdRdewEVYU6LNazOyCK9lPWyyyuory65RphgU88Qcwvv+B50dyzej6hfRzDroyR7Bj0BBdX/wNDQLSSqXTkW3htKHx8EXx1o3L72lBludAoSavF/567CXnxeXR7nofcY+ARDIs+bxBsEHqWSD8dKpfTbDm9rfEGA2smTjm9F0qbqCUtdIzKQvjqJvjPyCYzhd4+8DaL1y1mXfI6EhoJ2spWK+W7dlP8/VrKd+1Gtta/IFVlqeKfu//JX3/5K2abufP2RWi9ZjJoP/rzI2Z9NYsPDn/QhR0SeoqBgR48F7+Yv1z8D3Tnjgdqj9OTPfzBVAon1juxh31cwnr4VzT8+EiTTWRZrs20Deg7x1OOYNZZBm2H+PcHoNRSpx6/PaszVQRtnSp1h3Lbb1S9Wu1OpXeHcX+F4YsgYFCXPa194tgeVZNelBdxGhG0FTqMY+gVMqdKTjm7O63mc9+ThD6yGM9FN5/V+t5uOnzclLIKyXktl0hwHToEXWi/s3ouoaHynbsoWbceW3XrLhTUrZWVkFNOLl5KYOPIt/DFtVByxqRLJZnwxXUicNsMn+uuw1O7DXXGRtAaYOFKUeOoF/Aw5uEW9SZbiv7TeANjMISPV2bgrSrq0r71OQk/KzXg/AeBV3ijTfxc/QhxC8Fb24/SKgsqCaL8lBP9kp9+4uSM80ldvJiMv/2N1MWLOTnjfEp++smxvk6t46uEr9iSvoX00vSm+2K1QNJG2P2/jtxDwa44XbkQ0sjJkWwyOe4nFiWSXpaOyWpq0E4Qgj1dqPT2p0pSk5KvHJvag2ApvsrwcQ596azuCcd+qJlPo+kLM3llJkpqPssjfftG0NZkNZFepnz/nO3kTPGhSvafRZ1Huf3cIGKicisybZ3rVE0SQHcojVDX1Ifh0rfBb0CXPWWPnIRsw/Pw5fWQc9TZPelzRNBW6DD1hl7VfBD1BLrB5+Kx5BFcJjUx+UortKVEQl2yuGLVbnlL3yH93nsp+PCjVrWvewXfPuxsoL8r/PBgE2vU/I3WPSJKJTRl17uw53+ABJe9CyEjnN0joQOMDonFZnEHc0DTgaElP8A1X4F/bNd2rq85/oNyG9f4DOMAz058lvVXrMdgGwhAhK8beo2akp9+Iv2ee7FkZdVrb8nOJv2eex2BW5Wk4o4Rd/Dk+Cfx0ns13Zf8BFg2H9Y/BlUl7dotoRH7lsF7M2DNHQ0eynjkURKmTqNk3XoeGPMAH836iLnRYtSO0JAkSY5M+xPZSo1re3AgRVszf0PCT1BZ5ITe9XE2K5xYp9yPbfoz3X5OEeZjwEWr7oqeOV1aaRo22Yab1g0/V7+z2saQgDBkmx5JsrEnvWbekrBzAQnyT4qRQc7kqGc70bn96Abam1He5apLYfdS+PNryE90dm/6HI2zOyD0LjcMvYEqaxUjAkY4uytdKtrPnT0phSS2YjIyAGtxMTmvvkrF7j1Ef7sGSSP+Fc+GLMu4njMcU8opjBdd1Kp16l7Z/LrmRGaMdAzKsppZS1YmMDi1HaJEHcF6ynLhlyexVKoo91+Ei3YQzVcVFnqKwUH+lCc8jkktoWrqcEElrv12Oks1JPyi3I9tOUCXUGfiGtlqJfv5Fxof0ibLIElkP/8CHjNmIKnVLB6yuOX++MeB30DIO6EMrz7nyrbsjdCSI2uU28iG3zVVf/6JJSsLlYc7RhdvRruICeGEpg0MdCdyw7d4PPUZVS88QVRgzdwTlVnI0x5H6j8dXFqYfFDoeGm7oLJAqaMZPr7JZol9sJ5t3TqfZzvBokqlQi8HYCKN3zNOMDV6KLh6wYX/UL67xHveOWw2pQzBqe0QFu/s3jQu+0/Y9h+YeA8EDu7Up/po1keklKQQ6hHaqc/TYX7/GKqKwbd/sxebhM4hzraEDjUjYgZzo+cSYAhwdldaxZzwB8X/uo2qde2rCWevNZXUykxblcFA6U8/Y0pKonzHjnY9d18mSRIB991L/19/aVXJCVmWHQeE4R4RJNWUswjXlbbuCcvE1fkG3P3h2tVknx5Lxnu/Uby264r4C50rxNMVF60Ks1UmrbCFCWuK06GgZ01C2WOkbFHqT7oH1U6mcgabbHPct09CNiDQnYq9vzfIsK1HlrFkZVGx9/fW90eSYNA85f6R1a1fT2hZ7gnIPapM0hI7q8HDkV99Rfiyj3EdPqLr+yb0OAMCPBibfYyAI79T8fvvhHmEYdQZiTRGUj7+dug3utnh+UInsU92NPBCUDedtJGYoxyj2kfz9QV1S5i1h7dWCYQdq5sROP4OGHABaF3atW3hLKlUEH8zXPlh9w2cb/onHFwJ29/o9KcyaA0M9h2MUWfs9OdqN4sJdr6l3J9wt0jYcALxigt9WuWGb8n4YBNZ/3q9XduxT0bW5CzrZ5C0WgIffZTwDz/AbUI3q+vTA0mt/PLIr8qn1FyKhITK4o/JYsNFq8I7MKx1T+QeqNz++gx8tgh2vg1Zh5Wrx31Z+Lm4XfFXXAYPRhvQMy7YCC1TqSSiaj7bTuY0Mwx+2+vw6mDY9K8u6lkfc6ymNELsrCYPlN879B5TP5/K/w7+zxG07e/vjiU3t1VPYW9Xaank9+zf2ZC6ofkVBs9Xbk/+AtVtKwskNONoTZZt9FRw9W7wsNrdDbf4eJLNmby852XWp4iJpISmDQh054fIcXw5YQHuEyagU+vYetVWVl60Endd38ne7FZkubbcTQvZan0507a9dT77uUUAkFaa0s4eCX3KxHuU20NfKMkIguLwV8qIU/dAGH6Vs3vTJ4mgrdChzFYzW9O3suLoih5Rr1VlysMQUI1rTPsCTTE1dcOS8sqw2Vq3354XX4Tb+PFI6r5Rp6qjmdLSqDp+vE3r2EsjhLiHkJqvzI4e7eeOOnJizcRZzWScGPvVFs4/+j0cX6vUuX1nIrwUA59fq0zMk3O098+uaa6CLxZD5kHHIq9LLyHq66/wXrjQiR0TOprR9yhuMS/y1p9PNd0oZJRye2KdMkmV0LGSagKozZRGSClOIb8qH6gtjzAg0B2Nv3+rnsLeLrk4mSXrlvDk9iebXyFoGHhHgaUKTv7cqucQWsFeGmHwvGabHcg9wMdHPuabhG+6oFNCTzUw0INt/c5hWVA8hCoTGNYbcp7+O6y5Uxn2KnSNvBNQkARqnTKJZzMcQduAvhO0NeqMhLiFEO0V3a7tDPRRSoHkm07XLpRlZVLPX56CysJ2bV84C4dWKaNJuvM5Ur/RSmkim6U2s7QTbEjdwLM7nmVj2sZOe44OI8tKcgbAuNtBI4rgOYMI2godSkbmjl/v4IXdL5BXmefs7rTIPbiSiOn5BF43u13bCfN2RauWqDLbyCyp6qDeCc0p+PAjkudfQs6//93qdZJLaou+1w1soFLDrH/WtDozcCspP7NeVNoBXPI2nP8UxMwArZtSm+zot/DD32DFgvrDDUsyu/cBSlvJsjJBzpHV8NlCpd6m0Gv18zSi0hWRVZXadKPw8UptvsoCpVaf0LFu2woLPoGo85psYs9OCnAJI69M+Z+M8XfHMGY0mqCgpodASxKaoCAMY5TaqPbspsLqQoqqiprukyTVBhbtgUahfQqSIOsQSOpGA/T5H35E/vsfYM7IqM1Ga+cQYqF3C/Z0wV2vwWKTScmvPxJMlmU4/Tv8sRz2iaBtl3Hxghl/h/hbQO/RZLNKk5X0IqUsUV/KtH04/mHWX7GeCyIuaNd2RgYrk6NWUac8kCTBukdh66uQurNd2xfaqKIAvroR/jsWyrt5fGDivcrt7x91WnB/Z+ZOvjjxBfuy93XK9jtUws9K2SadB4y+3tm96bNE0FboUDq1jgkhEzg//HyqrT0gmJNXM6uo34B2bUajVhHuYwAgMaf1Q0WtpaXkf/QRGY8/3q7n74tksxlJq8Vw7rhWrzMldAqvTX2N6wZfR0KOUse2v/1gePA8+MsyMAbXX8kYoiyvm/kUOhom3QfXfg2PnIIbfoLpj0PUFBgws7ad1QL/jYd/x8GqG5UDgPzEnh3E3fRPOLwKVBq49O0GV1xlWcaSn++kzgkdbViA8tlYbs3CarM23kitUWrzQe2wT6Hj6Nxg0MVN1uGTZdlRBxCzkjEb4umCm16DpFYT+NijjW+3JpAb+NijjhEfBq2BQINSBsaxzabYSyTkHhclYjrC8R+V26jJ4Obb4OGCZcvIeeklTKdPO/42UcYeMuu04BSSJNE/wB03UyVpP23AlJrKtvRtzFs9j7s33A1DLlEuEqT/LmYD7yoegTD5AWVSrGYk5ZUhy+DjpsPHTddFnes9xofFYi4aRXX+ZHJKK2ofiKiZ+O3UNud0rK9KrZm/xW+gMhdGd9Z/BgQMAVMZ7G3fnDdNmRY+jZuH3cyEfj2gRGL4OLjgWTjvAWVCP8EpxJT1Qod7+/y3nd2FVpFtNsg7qeRV+rYvaAvKlfDE3HKScss4b2DrvpBslZXk/OslsNnwu+kmdJGR7e5HXxH87DMEPHA/Ko+mMxXOFGAIYEaEMhztxZytQE2mrd3geRA3V5nZtCxbqd0TMaE2w7Yxai2En6v8nPdg/ccKk5VM1OoSJdB5eJWy3NhPGX4z7EoYcH6r++90h1bBxheU+3NfaZD5V3noEGm3/xWNtxfR333nhA4KHW10vyjkIxpQWcgoyyDM2ET959g5cPBzJWg78zkxuU0XyqvMo8xchkpSUVrqBWTRP7D2c9E4cyaW/3uM7OfqBwk0gYEEPvYoxpkz6y2P9IwkuyKb5OJkRgSMaPqJQ0bBrZsh6Bzx9+4I594GwSNAaphPIdtseC9cSNWhQ7gMHkLKzymAyLQVWjYw0J2Z33xNyA9/UFJ2N/pLx5JcnIzJagL3AKV+cuKvSs3CKQ85u7tCjcTcvjcJmSzL9ct3tIO3wR3fysWkF1VyKr+KAA8lsYaIibBvGZwSk0B3qVPblduIHhCklCSltu03t8DOd2DcHR0+ed244HGMC2590pFTuRhh4t3O7kWfJzJthT7LknyYEyvdSfnVD9krot3bi67J2Exs5WRkANqAAHwWLyboyb+j9m2YWSM0T+3ldVY1gWVZrp2s58xaYSq1kuk07ArltrmAbUv8BsAjqbD4e5jyMIRPUGYFL0lXZidN31vbtrII9n8Gxaeb3JxTpe6C1X9V7k+4C0YvbtBEFxaGNT8f0+l0rCXNTFwl9Bj9A4zYTMpn06GchGYazlBq9BUkKZmXQvsVpcLbk5QJ3prJzrdnXfZz70dKnjLCpf8Zw2n1Awei8vJCGxlJwMMP4339EqLXft8gYAu12Zv2cjJNkiQIHi4Cth1FpYbIibWZYHVIKhV+t9xM6Bv/weaq43Sp8j0R5SkybYXmDQjw4Jh3OEXeAUh6Fwb5DmLpBUv58MIPlQbDrlRuD37Rs0cB9QRJm5SL35VFLTa1j9rrS6URViWsYsrnU3hl7ysdsr3omoB3Um6dEZD2oGHmfjGJZldyBG0nOrcfrTX0MuXC9Lm3gtzEKDNB6EIi01boFLIsU2IqwVPv6eyuNMl0YAc2swqLSY+kc2339uxXw5Py2nYQEPiwyGxoC1t5ObLFgtqzbe8tk9XEsiPLiDJGMdB4LuUmKxqVRIRvJ2cxaF2U4G/UZJgGmCqUup8pW2DgrNp2KVtg9W3Kfe8opX3keRA5qWHJhq5WeApWLgJrtVJr8fynG22m9vIi8ovP0cfGotKJ4Xy9gUGnQS8HYSGbP7KOM6f/9MYb6j2UzOuTvyiT9AXEdW1He6NjP0D2IXDxbDYDru5s2wmpdWp11+EWH8/AHduxlVeQNGcOlpwcPKZMwW1cw0wPe/amfeLGVrGaAUkplSF0qrTSNKyyFYPGgL9rNx9mKjjdgEB3/hE9iSPjZ/PLDVMAmBBSJ9stbi5oXCA/AbIOKhdihM6x8y1lws5pj8OUB5tt6piErA8FbVOKUyioKsBsM3fI9qL8XNh2Kod9mcdYgDIRH17h4BkGxWlweg/ETOuQ5xKaUV0KmQeU+z0h0xaUUZQ3/9YpF6XzK/NJKEog2jOaAEP7JkLvVPmJSh3iCXfB0Mud3Zs+T2TaCh3uz/w/GbdiHFd9f5Wzu9Is11mLiHr/ZUKe6JigqT3TNqkNmbZC2xV9s5qEyeeR89prbVovtSSV1/e9zuPbHicxR6lvFeXnhlbdxR+DOoNykDjj7xAyona5pFZmLZVUSlmFfcvg65vglTh4Y7SS6eosrl7KiVzQMLjs3Wazj12HDRMB217GRxcKwImCFjIvx98Jl78PY27sgl71Afb6wLHNT5RpD9pGeUY1PYIApb6l2t0Nt8mTMIwZ0+T27NmbLda0tfv57/BSf2WItXB2vrgO1v6tyZEW1YmJ2CqVSYnsGdCRnpEdNpRY6L0GBHqAJJGSV47J0kjtaRdj7QXkQ192bef6ElM5JG1U7sfNabG5ozxCQN8pj3DHiDtYedFKrorrmPPHEt1W3GJeYXvh8voP2AOH9uxPoXOl7VayVb3CwTPU2b1pvU76ft2TtYebf7qZBzY+0Cnb7zDb34CMP5RRGILTiZQIocMFGgKpsFRQWVZJtbUavVrf8kpOoDJ44DKx4QzNZ8ueaZtZXEV5tQU3fev/vWSzmbKtW7Fk5+B91YIO61NvVHngALLJhManbeUkVCoVc6PnopbUjoPhxgIbThM3R/mpKlEK9idvVrJvMw9C/kll8gq7Q6uUmW+jJkPEpEYnrelQLp6w6AuoKgZ9N3rNhC4R7hFBTgWcLjvVfEORsdJxKotqJ0pp4QTfHlwNMYQ7Zhs/szxCXcHPPddssM9eHiGtJA2zzYxWpW2+r+ZKqCqCI2tqJ6QTWq8kQ3ntACbf3+BhWZY5tXgJ1oICor5aRbKlNkgvCC0J8XTBXa+hrNpCSn45AwLcOZB7gC3pWxjkM4jzI85XSiRkHgBjDwqo9DSJv4GlCrwiIGBws01tNtkxpL8vZdoatAaG+A7psO0N9e/PL1k6KsxnlP2ImKDU4M861GHPJTSjp5VGqMtmVS6gn/wVLnq1QwK5dS+8dlul2bB/hXJ/4j3O7YsAiKCt0Al8XXzx0HpQai4ltSSVAd7tn+SrJ/Ay6PB105FfbiI5r5yh/Vo/fL/ywAFO3/5XVG5ueM6fh8q1/eUaequQf/0T3+uXoA0JadN60Z7RvDj5RQAe/fogAAO6U9DWzsWoBD7swY/KImUIl3dkbZvDXytD0Pf8T/k9YEhNOYXJSk1EV++O6UvSJmXIu1Qz7LmVweGCZcso27SZwMf/D32UCCz0dLG+MeytgEJzN6233Bsl/Aw2C/gPAp/oZpvaM221tkCgHD93Hd51Zhu3lpWRdtPN6GNjCXricSRN84d+gW6BuKhdqLJWkV6a3vKJxeD5sPtdOLZWKZOgbiHIK9R39HvlNjQejA2/12zFxSABKhW6qChS9n4KKOUwBKElkiTRP8Adn22/UrnwdXJmzmDPHB/ePfguF0dfrARtY+coZRJE5nbnOf6jchs7p8XXOb2okmqLDZ1aRai3oQs61zvN6j+R51Y9TaVKhcliQ6epGVk3aB6EjQP/WOd2sK+YeDeEnQuGHjh3S0UBrLpRKQ93zl86pLxD3ZJW3dbupco+h8ZDeMM6+0LXE+URhA4nSVJtTbzWDq90gvxHr6L4lbuw5XVcICLGMRlZ2+rauo4ahevw4XhdcTm2qqoO609vJEkSLoMHo/byOutt2IcQx3THoO2ZXL1gwAX1l425HuJvUQI6ADl/wq534POr4d9xYK7zHrJaWvc8Niskb1GyeJO3KDOmLpsH39/X5slJSn/9f/bOOzyO6urD72yTtOq99+KCKwZ3qgvFAdMJvYdQQie0QEJCSUKJgY+S0EICoTcbgzHVxhVjcLfVrd573TrfH6ORXGRpy2yRNO/z6Jnx7tx7j6XdmXvPPed3vqVr/Xq6NqrVeUcDMxKlhY2FdjrMHUNf3FELa5+E1Q96wbJRjIPSCCabierOaum8R1oQHRqZZSoopGfbNjq///4gh61otWLrPFzORyNonHuGp82B4Fgp2rZ07fDXqxyMHGU7cemgb2sjIshdu5bcNd+jCQzsj9JRI21VHEXeoDZUV9CzbVv/Z0d2HqDRqA5bT2K3SVq24JA0QlHfGiIzJhitZmz8XSo7Knlk0yN8WPChYn0mhwdjNOiw2UXKm7sH3jBGSbr76mfeOwSGQ95iSJnha0ucJyQWpl0sna9/RpEu5XoBfvsMN3XAllek83m3qt8TP0GNtFXxCJnhmexs3DkwIfQz7B0t1H+8DRDIPe93iu1eZMUG8+P+5v70e0cRNBoy3n1HIStGJ6Jd0mITNK79tWq7aokJikEraCmo6yvWExeqmH1eJXfRgCO3s0GSUdj/A+xfB8YYqfiZzKsLpWPGcVLUbNpsqWjUgexZDqvukdJ0DyUizekHduRFFxG6YAEhxx/vVDsV/+SoxDjsllA0+g6KWkqYHj9EoZruJvj2L1JhmxPvBcPY0eNTDKtZKugGUvTbEJS3lyMiEqoPpbpJmtIdWoTMkJlB0lNPIvaa+l9rfuMNGp77PyIv+jVxdx6uq5YRlsG+5n3S4iJ1GHs1Whj/K9j6uuSAzFkw7H9RpY/OeijvSx2deOYRLxMEAV10NKIo9i/4/DpKR8WvyIsP5fm4PFb8+k7uuOUcrEIzIG3KiKI4IJdiNUlR/rmLQadq0ytGxWbp2RgYAWnDR+oVD6FNPlrZ17yPd/Pf5ajoozg3T5miR4IgkBUbzK6qdkoaOsfU71NFQeb+Drb+W9p4qd8LcRNc7koUxf7NcL+VR/j5P5IcXnSOlBmg4heokbYqHkFeTDhVfdqL2Kv3EZ7VTXCSDW1KjmL9ZvXp2jobaasyPF0bN1K0YCFNr7zidFtRFDnn03OY+dZMfq4poK3HgiAM/L1GNCGxMOkcSWvp5i1w2ccD7/W2QfU2SUh+w7Pw1nnw13R4ZSF8/SdJZ2rPcqkIzmAOW4DobKdNCjv1FKIuvwxDiqqPNxpICAtEsEoVbrfWFAx9cdxESbPP2itp+Kk4T2+bVBgodjwkHT3kpQcWIevX6j4k0lYXGUn4kiVEnHtO/2ua8HDsnZ307Nw1aL/9kXjtDm68ylGi+1Y6Ht2vAvs+A9EOSdOlDbJhaDG10G5uR0AgPSzdCwaqjAZy40NoDQxldVguuqgo0sLS0AgaOi2dNPU2SReJIvzzeCljRy0qqCxVP0vHvFMkqalh6C9CNhrmqA7iKUeWPnIdxqyn+aj47YPfqNsNH1wNH9+g6Hgqh7D9Xfj6YUkze6QSnQ0TzpDONzznVld13XX0WHvQClpSQ4bbEfcBNgtsfF46n3uLlIWh4heofwkVj+Dv8gg6ay1JM9tIuzjN5cjNwZDTUkucjLQ9EFNJKd0//aSUSaOG9i++wFpTg7mqyum2Tb1NdFg6sNqtdHZKUaZpUUYC9VqlzfQ9B0bZBobD7bvh7H/B9EslZ5pokzRy1/0DtrwqRdhyJPkDAVbdJ6X2qYxZNBqBUI2ktbm7vmjoiwVhIDp03+cetmyUEhIL574MN24adsI8K3EWLy18iZum3dQv+5LjQAZB6IknkvHBB6S9OvgmWEZYBkadEQEHo+wz5kNQFHQ3DkSOqgzPMNIIoihSdvkV1PzxT9ja2jDbzCzJWsIJKScQqAsctI2KyqHkxkv3hP2NXZK2p9ZAckgycIBEgiBAdl+U/M73fWHm6GXuzXD7HjjhHoculwM/RoSEl0L0b0CGKZsyHma0oQ2oZ3978cFv2G2w60PYu0LdaPQkO96BdU9D+WZfW+Ie826Tjjvegzbn16Eysl8kNTQVvT/q/wtaOP1JGLcEpqiF0f0JVR5BxSPID93SttKDU6/8hcY+x0O0skXSsvqctqWNndjtIhontajav1xN1a23YsjJJmvFCv/7vfmQhAcfJGT+fAJynI+MlieDSSFJlDeZgaGrq48qwpNh6oXSD0BruaRZu38dRKTCrg+GaCxCe5UUkZt5nFPD2ru76d66FUFvIHj2LNftV/ELxgUvYF1pBkkpDkhejDsNNr0gpZLZbVL6vIrzOHD/Dw8IZ17yPMxWO2XNkmbigfIIoijS8cUXBOTlYcjK6t+k1EZEEDSELvjijMWclnma488grR5m/VYqnhahRoA6hChC4lRoKpYK4wyCpaKC7h9/pGfbNhL+8AAJ+vD+gpoqKo6SFB5ISICOwNZGSl55g/ioYDKiMqjoqKC0rZRjE46VLpx8Hmx6XtpwM3VCwBiZJ3mD8GSHLy2RnbZjZZ7KQGam0pG2eVFZ/NwBTeZD6pfEHwUB4WBqg7qdUraDirLYrAPOWgUKePmUlBmSzNz+H6T57SmPutSN38sbaTSS7rYD2tsq3kWNtFXxCIOmXvkR9pq90kmMsk7b1Mgg9FqBXoud6rYep9sHz5mNxmjEkJyCvat7+AZjCE1AAGGnnuqS0/bAtKv+aLT4sTMZPoiINJh+CZz9IsTkOdams87pYVo//IiK635D07/+6XRbFf9jWvxkrJ1H0dASNPzFaXMl7b6eZknLT8Vx2qqgdqfTxf/2N3Vhs4uEBuiICw3of91SVU3VHXdScvY5YHM8Yl6n0Tm/aXjiPXDyAxCpOm0dQhBg0Z/htp1HlKHRRkWRvGwZcXffjaD3w6gclRGBIAjkxIWQ2VaDuOzvNL/+78OLkYHkuIrKBmvPQDFEFffoq8fgKK3dZho7peCCzJixIY8giqLHCixOS5Tmub3UHPyGRivVeAApMEFFeWq3g6VLyvqLm+hra9xn3q3SPTJ9nstd+L2erYrfojptVTzCoKlXfkTZi1so+DierlplvwI6rYb0aGmS5YpEgjYsjJy1a0n950toQ8bGZM0b9FfqDMuksH6EFyFTkpB4Za87gOA5s9ElJWLI8NPqqCpOIUf8OFRkUauTtPtA0jhVcZyf34CX5sOKW4e9VBRFXtr+EitLVrKnRioslBMfcpCz1d7VSdC0aQRNmXKY08/a0ED9M89Q/cADyv4fVJxjCOe4NiRE0gi/7FIAGrobsNrVVF4V58mNCyE/Mo3G8dMIO+00Mvo0kQ+SMRMEmHy+dK5KJCjDG2fAf8+RChg5gPyMTQoPJDhgbCTENvU20WHuQEAgLXR4bW9nmJs6XjrRdlPSfEgAghz9qTptPYP8e02bOzq0UXMWwnXfuRWFemAdAr/j/Sthzd+hp9XXlqgMwij4Bqn4K3Lov785bUW7HXOzBZtJiy5jkuL9Z7tZjEx11h6MubKKsiuvou3TT13uQ16UZIZnHqD7OEYjbQ8kfS6EJcERdSsFCEt2Ka3JkJ1NzjffkPDQg26ZqOIfZMUGow3ZS5HpUxq6G4ZvMO50MIQ4HTE65pF1gNPmDHtpY08jz297nvvX3d//vDlU9iVw3Dgy3nmbjLfePKy9KIo0vfgSbR99jK219bD3X9z2Iks/WcqK4hWO22/pgb2fScVHVI5Mb5tUqM9mcarZVV9exbFvHcu2+m2esUtl1JIXH0p7QDBvn38Xsbf8jow+p8FhBYMnnycdi76BrkbvGjna6KiDsvVSYbfAcIeaFNePPT1b+TOYFJKkuFZ3pDEEwRoJwOaKfQe/KUdMlm1Q5yqeQHbajnRpBBlBcEi2aij8Vh6hcivs/hjW/A3MrtflUfEco9Jp+/zzz5ORkUFgYCCzZs3ixx9/HPL6ZcuWMW7cOIKCgkhNTeX222+nt7fXS9aOXvy1GJmg0ZC7di2Z/3ocw2TlHyRZChQjA7B3dWGpqRn+wlFO+4rldG/aROvHn7jch7xxEBuYQn2HCVCdtoCUHnbq3/r+cehEpO/fp/7VJU1SQRBUTeZRRFZMCAGxX2KP/Jwt1TuHbzDudPh9CZz6mOeNGy20lkvaeoJmIFJ5CGyijXNzz2VR+iJKG5y/r+nj4oi6+moS//Jn0B2eet/c20xJWwlFrcMUnzuQ4u+k6vPfPOx0WvCYIv8L+O/Z8O9fHfES0Waj9ZNPMBUVIYoiNruNpp4mrHYricGJXjRWZTQga10X1ElOQTnSq6qzCpPNNHBhTC4kTpOKlhZ/520zRxcFqwBRSqkOS3KoSfFY1LP1cMp4iEa6X+6oKzz4jcSpoDdKUk4N+R4Ze8xit0P5RuncDTkBv6S3DdYtkwrZOUGPtYeaLmld73fyCBuekY6TL3BKf1vFe4y6vIt3332XO+64g5deeolZs2axbNkyTjnlFPLz84mLizvs+v/973/ce++9vPbaa8ydO5eCggKuvPJKBEHg6aef9sH/YPQwqF6Wn6CJjCPw+LM80ndWjHuRtgBtn62kpq/wVspzzypl2ogkfOlSEAQC8hzUXz0Es81MVadU6dNmigVaSOwryqECTDwTLvgPrLoH2qsHXg9Lkhy2EwcvkOMM1pYWdJGRbvej4juCDFqCrJPpakukvdswfAOdA9eoHEz+F9IxbQ4Yo4a9PCE4gT/N/RMApy5bCxxchMwR4n9/9xHfOy/vPE5MPZG8SCfuvdknSxHW7VVQ/TOkHOOUPWOGPculY9YJR7zEXFpKzb33IRiNjNvyI1qtlvUXraeuq4444+HzWRWVociNlySh9jd2YeoxEdrQTag+lA5LB+Xt5eRGHlDj4bS/QVAkxI7zkbWjBFkXeNwSh5v0O23HUGBBf8p4mGdSxuOC0ugw7aG49ZD1qM4g6dp2N0uOOBXl6KgGBNAHQ+IUX1ujLL+8BV//EaJzYOJZDge2NHQ3EBEQgR07kQF+tCZqKh6Yk8y7xbe2qByRUee1ePrpp7nuuuu46qqrAHjppZdYuXIlr732Gvfee+9h12/YsIF58+Zx8cUXA5CRkcFFF13E5s1q8RR3OTHlRLJPzfZP3RYPIk+03Im0DcjNQezpwbx/P6LViqAbdV9Vh9EnJRHz29+63L6iowK7aCdYH0xjq+RIUqNsD2HimTB+iZTK1Fknadimz3UpwvZA7F1dlF5wIeb9+8nbuAFtWJhCBqv4gomBF/JDYSOCyUnNudZyqQCeytDI+r/jnNNLs9lFShql501O7IBWt72nh6LFiwnIySH1hRfQBDlQRO4AxkWNYxxOOm30gVKU8K4PYc8nqtN2MEwdUPS1dD5x6REvs/f0EDRjBhqjEUEr3Ys1gobEEDXKVsV5kvo2q6MaKimZPQut0UjGA+nsbNpFaVvpwU5buUCTiuuYu6Dke+l83GkON5M1bWWptbFAf6Sth1LGM8IzKK6Hmp7yw9+85AO357oqgxCeAncXQ3slaEdZEc2jL4M1f4WmImljZsIZDjVLC0tj7a/X0mnu9K9MxA3PASLknQpxE3xtjcoRGFXyCGazma1bt7Jw4cL+1zQaDQsXLmTjxo2Dtpk7dy5bt27tl1AoKSnh888/5/TTj7xoMplMtLe3H/SjcjixxliOjj+ayEA/2k0C2v5xO423nUnvt57R3MuOkRyCte29dJpcKxgSOG4cGR9+QObyT8e0w1YJ5B38jLAMiur7HBuq0/ZwNFrIPE7Ss8s8TpFJrCY4WEqRstvp3b1bASNVfIlTxcgAzN3w3DGwbAp01nvQslFAT6ukfQgOF7mo6qyi19pLRXM3ZqudQL2G5MgBx6ypqAhbQyOmgsIhHbbWlhbav1w9qK6tS8iOyD3LVZ3AwSj4EmwmiMoesqJ20OTJZLz1Jmkv/8uLxqmMVgRBICcuhJrgaESrDdFq5XjjVBamLSQiIOLIDVWZE9co/g6svdKGZfxRDjUxWW2UN3cDh+uTj2b6iwV7KMhnUmwOAB3W6sPfVB22nkOjGZ0b9gGhcOy10vm6ZU7Pc0IMfvTd7qyHbf+TzucNXwBXxXeMKqdtY2MjNpuN+PiDK53Hx8dTW1s7aJuLL76YP//5z8yfPx+9Xk92djYnnngi999//xHHefzxxwkPD+//SU1NVfT/oeJZ2r76gYZVhfT87Jlo6nCjnpgQKaKz1I1o26CjjvKvnTgvYzeZqH3kUbp//gXRjYX/QUXI+tLOcuNCh2ihoiTJTz9F3sYNBM8ZvrCSin8jRf7Y2NNQ7FgDgxECQgBxIPVfZXCKvga7FWInQFSWQ02u/fJaZr41k69LpE3nrJgQtJqBZ0bAuHFkvP8+SY89OmQ/5VdeRdWtt9K1adNh763ev5r/++X/qO0afA41KDkLQRcErWVQs93xdmOFvX1piBOXOlXU5MVtL3Ln93eypXaLhwxTGe3kxoVg0epZ++AL5G3exG9Pvpd/nPQPZibOPPziljKpmviri7xu56jgQGkEB7/n5U3d2OwioQE6YkMDPGic/2C2mansrAQ8p/M5M0XKGLFqG+m2mAa/yNwt/ai4jyiO/g3bmdeDNgCqfhrQ7h2JbP6ntImccqxDBXBVfMeoctq6wvfff89jjz3GCy+8wM8//8xHH33EypUr+ctf/nLENvfddx9tbW39PxUVFV60eGTxbfm3PLHlCX6p/8XXpvQTmtJDeEY3QUcPMklViKwYOSLNdV1bGVEUsZuOMMkYxXR+9z0tb75J1R13uBXpcWCkbWFfAQ410tZ7BE6YgDYiwtdmqChASpSWkPEPsZ376DB3ONZI1vJTnbZDM+FMuOwTWPhHhy432UxUd1UjItLWIW1CHXpf0xgMBE2eRMgJR9ZNBTDOmklAbs6gi6yXd77MP3f8k33N+wZpeQQMwZDb5+jZ86nj7cYC5i4o/Eo6H0IaQRTFwzYr11evZ3XZapp6mzxpocooJq9P13aHzYigGWYJGBgGez+TnBJ1e7xg3SgjfR5kngATjlxs8FDkNUNWXMiYCdrosfZwds7ZzEmcQ2xQrEfGmBSXhmg3IAh2tlQMUlhzxW3w11TY/bFHxh9zNJfAP46CT24avc7b0HiYdpF0vv4Zh5rc9t1t3Pj1jRS2FA5/sbeYfD5MuxTm3+HUJrKK9xlVTtuYmBi0Wi11dXUHvV5XV0dCQsKgbR588EEuu+wyrr32WiZPnszZZ5/NY489xuOPP479CI6igIAAwsLCDvpRGZxvyr/hP3v+4z+RIZYeIpOqSJrdSuDMBR4bJjtO0qIqcdNp2/7FFxSfeiqNL7yohFkjCkNaKuFLlxL56wv79fxcQU67SgpOo6q1B5CiTVRUVJxjQkIsotUIQGFziWON5FT/ku8kh5XK4OgMkH2Sw9qH5e3l2EU7ofpQqholCR1X72vx99xD1ooVhJ12+NiyxqDTBUVlh2RjgUs2jVpK14KlW0oZTZx6xMtMe/dSMGs2lbfeBkhOXE8X61EZ/eT0FSqUN7BB+mzVdtUentEUFAm5i6XzXR94y8TRw/RL4IrlkDHf4SZjUc82PCCcP839E/9a/C+POao1Gg0BopSFu7U6//ALAsOlTJeyDR4Zf8xRtkEqRtpcPLodgXN+BwhQsArq9w55qSiKbKzeyA9VP6D1J0mOuPFw1vMOy3Kp+I5R5bQ1GAzMmDGDb775pv81u93ON998w5wjpOZ2d3ejOWS3WdvnIHInJVtF4riU47h0wqVMjT3y4sSrNBUDIgRGQHCMx4bpj7RtdN9JYSkrp+Prr8fc5zFw4kSS/vZXt4qQAby8+GXe/dW7xGgnARATYiAyWK1s7006vvuOqjvvon31al+bouIG8WEBCFapav3WmkEWPoMRN1FyUFl7JY0/FUXoL9wSntH/nDkw0lYURRpffpmOb7/FbjYP2ddQm2KyxqA8nsOMOw1u3gq/fsu5dqOdvFPh+rWw5B9DLmZ7du3C3t6OrV2qaN5iaqHd3I6AQFrYKNQIVPEKcqRtVV0r1Y//jf1XXMHxb85l0QeLaOxpPLzB5POk4873R2/EnB9RVC8507PHkJ6tt8gNWEpP1YWIpkEkDdPnSUdZV17FPWTnd/pc39rhaWJy4KizpWhV3dByJiIi/7fg/3hw9oOkhqiymirOM+oqHN1xxx1cccUVHHPMMcycOZNly5bR1dXFVVddBcDll19OcnIyjz/+OABnnHEGTz/9NNOnT2fWrFkUFRXx4IMPcsYZZ/Q7b1Vc59SMUzk141Rfm9GPrXwHWAS0Kbke3f2TI22L692LtA05+WQS//o4YYsWjZlUKaUx6o1MjJ7Ix+WSZpY6GfY+Pb9so33lSgSdjrDFi31tjoqLCIJAmDaJDorZXT9IiuHgjSSJhM0v9lXZdTxVdMyw9gnoboYZV0LsOIeayBkE6WHprOh7zuTGD9zbrHV1NDz1NGi1jPt5q0N9iqKIaDajCRhYfMiRtvJ4DmMIlhY0KgcjCENG2MpEnH02QZMmIVqlYqby7z8xOJEg3ZGLyqmoDEVSeCDBBi1dJpG2jz+C9nYmzEhiS0Qv1V3VxBoPSU/POxUMIdBaDpVbINVzsmKjBlGUnNyZJ0gp1E4gyyOMJQmvuq46wgPCCdQFenSc2fEnsWlnIbXNgwRtpM0CBGgphfYaCEv0qC2jHtn5nTbKnbYA574qFVwbBo2g4diEYzk24VgvGOUAO96Hoq9g3m0Qf+SCqCr+w6hz2l544YU0NDTw0EMPUVtby7Rp01i1alV/cbLy8vKDImv/8Ic/IAgCf/jDH6iqqiI2NpYzzjiDRx8dunCHysik+YPPaFyeSORcgYRrPTeOHGlb2tiF3S6i0bjmcNUEBBBx1lkKWjYyaP3oY0KOm48uVjl9Kzkd8EDHhop3CF20CEGnI+TEobU1VfyfhKBUOmxQ6kzk5fjTJadtwSqw29RqzQciivDT61IqYeYJDjtt5VT52IBUOk1WdBqB9OiBlFrRaiV86ZnYu3sOcsIeibbPVlL/1FOEzJ9P4l/+3P+6XBjGaXmEAzF3S0XpVBxG0OsJnDiwkDowslpFxVUEQSAnPpTtFa00nns5k3OTeHjmRGKSstBr9Ic3MBhh/K9gxzuSI1J12g5PYwF8dJ1UpOie/Q7f+0RR7A/0GEvBBXesuYOdDTt59uRnOTH1RI+NI/9OSwbLgAwMh4TJULsDyjfApHM9Zseop61KKkIqaMbG/cIBh63fIYqw7mmo3yPNOVWn7Yhg1DltAW6++WZuvvnmQd/7/vvvD/q3Tqfjj3/8I3/8o2PFP1Scp83URmlbKXmReRj1vl24WRuk9C9dgmd3UVMig9BrBUxWO1WtPaRGqQtWR+nNL6Dm/vsRAgPJW78OTbDr2l5rK9eytnIt85LmUVTfV6xnDE2G/YWgyZMImjzJ12aoKEB2RBaFTVDf40QBzrS5cOy1kLNQTbE9lJrtksNWb4Qsxzc1ZCee1iZtSKdHG9FrBxYPhpQUkv72N4f70wQbsdbU0L314KhcOdK2xdRCm6mN8IBwh/tEFOHDa2Df53D9Gocd0qOWz38PvW0w93eQ4Nz9sF/PNlzVs1Vxj7y4ELZXtPLT0Ys4blEeEcM1mHy+5LTd9RGc8jhoR+XSUTnyP5eOGfOd2qyqazfRZbb1bcCNnTVDc08zIiJJIUkeHSct2oA2uICinjZgkAjQ9HmS07ZMddq6RflG6ZgwRSpmOFZoKJCCExb8EYIiDnv7u/LvaDW1cmzCsaSEpnjfvgMp+lpy2BpC4JirfWuLisOMwO0BlZHGrz/7NZd9cRm7m3b72hQSX19N3trVRN7m2UhqnVZDRl/U06C7uk7S+cMPlF9/PW0rPnO7L3/H3t1F0LRphBx3nFsOW4DNNZt5N/9dfqz9sV8rLLdP001FRcV5psTnAtBpr8VmtznWSKuDJU9JOqfqgv9g5AV+9smgdyzt/cCiVKYeSZs9N869+1rwzJmkvvoKmR+8f9DrRr2ReKPkGHY62lYQoLcdrD2wZ7lb9o14rGbY/o7k/DJ1DHmpqbiYhmefpXP9gL6iLI8gO9FVVFxF1rUtrB/6c9hP1gmQeTzMuwVsQ+tjqyBtUoHThX1kaYS0QzbgRjufn/M5ay5cQ1Z4lkfHSY0KwJj2GvboDylprjv8All/VS1G5h6yNIKsEzwWEEX44Cr46TXpZxDeyX+HhzY8xI+1P3rZuEFY/4x0nHGlVHBSZUQwdp4KKj5DkfRKBdHGpaKN87wIeFasMrq2AD07dtC1Zi2tH33odl/+jnH6dDLeeZukp550u6/jUo7j2snXMjtxLvubDi/Wo+I9RJuNnp07aV/1pa9NUXGDo5MzEe06EKxUdVb52pyRj+y0Hef4Ar+pt4lOSycaQUNTa18GwSH3NXt3t1NmaIKDCZk3D43x8Agvt57hE5dKx72fOt92NFG6BkxtEBIPqbOGvLRr4yYaX3iR5jfeGGjerkbaqihDTp9EVGFdJ5aaGso/fZc/ffV7bvvutsEbaPVwxQqYd6sqczIcnfWS9i9A3mlONZWdtmNJGgEkyY6owCh0Gs9u6EYZQ9Ga8rC0H0VhffPhF6TPhQlnSo4sNSPIdSIzJd32jPm+tsR7CALM6cvw3vwSWHoPu8RvNl4rt8L+H0Cjg9k3+NYWFadQQ15UPE5meCbrqtb5jdPWW0gTrzpKGt132oYvPQvRYiF86VL3DRshaAyDFAtwktmJs5mdOJv82g7s4lpCA3XEhQ6v76iiPL179rD//AvQhIYSunABgk59/IxEsmPDsJuj0QbWsaOu0LlK9lVbYe9nMP1SiM72nJEjhdZyqN0pab/lOV6wU36WJgUnUVpvAg7W6rabzeTPnIU+Lo7Mjz5EGxHhlpkZYRlsrtncL8ngFOOXwIpbpf9nU/HY/bvv6XNaTzhjWA28gOwsws8+m6ApkwGw2CxUdkiFNH2+4FMZ8ciRtqWNXey//AqsFRUUXahhR5YWk81EgFadI7lM/heACInTIDzZqaZjUc/W20zW/p4fqhpp7Rgkiy84Bi78r/eNGm3Mu0X6GWtMOhe+/Yskd7XjXZhxRf9bPdYeqruqAT/YeN3QF2U7+XwI97FMg4pTqJG2Kh6nv/q0Kws+Ben59n2qL5hNy6PXeWW8LFn0vsF9eQRDSjJxt91GQObojrLp2b0b0WJRvF85DTAnLgRBcK0onIp7BE6ciD45GePMmdja231tjoqLBOq1BJAAwLbaQucaf/eYVPxg7xhPlZfJ/0I6ps6G4GiHmx2ob1o0SHSWuXQ/WK3YurrQhDuuQWvr7KT5jTeovv+Bg16XFxkubbwaoyDzOOl8rP7dbRbY1ydtNHH4jdfgOXNIevwxIi+6CICKzgpsog2jzkicMc6TlqqMAZLCAwk2aLHaRWxHTSFgwgSChSBERMrby4/c0NQhSXzsX3/ka8Y68j19/BKnmw7cy92TBRtJvLbrNW765ia+Lf/WK+NlK7guU1E5CJ0BZt8onW94Fuz2/rfk+2p4QDiRgT6UI2gqHpCqmjsGHesjHNVpq+Jx5AWfnBrgK3p/XEvbjjY6N233ynjyxEtOeVIZGltHB2UXX0LhCSdiqat3u7+W3hY212ymobthQM9WlUbwGYJWS/bXX5H6/P+hi4rytTkqbhBjkHbn85uLnWsoSwDIC9sxjwChSU5rH8oboAlBqTR3mRGEg522gePyyNu8ifR/v+7UJpUgCNQ98SRtH32EubKy//XMsMyDxnUa2VE5VnVt96+DnhYwRktF+ZxEdpZnhGeom44qbiMIAjl90bbFV91B1scf0XFsHjDMxsy6f8DH18OmF7xh5sjDapbSjsEpuRuZ4nrJkZg9huapW+u2srZyLY09jV4ZT5KtE8lvaBj8AlGUHFt7R3/9EI/QXAJm56SZRhUzroCAcGgqGpC+YkDeyOeZMsExsOBBmHYpxE/0rS0qTqM6bVU8juy0reqswmQz+cyOoBgrMZPaCZvtnQrWcqRtXbuJTpNVkT579+6l9tHH6Nnt+6JuSmMuKUETEoI2KhJdXKzb/W2t28q1q6/llm9vobDPaavq2foW1eEwOkgNTQegqnOIqKzBGNen8Vfxo6T9N9aZ9Ru4Yw/M/I1TzeQN0MC+iOeUyCCCDNqDrtGGhxM4YYJT/WqCg4m67DLi7r4LTdBAUTRZ07aiowKL3YVMiPG/AgSo/lmShBhryBHG4381bCE+W1sb1paWg17zGy08lVFDXt9cqKBvbuRQRtyk86Rj4WppE0LlYHQGuG0nnPc6xB/lVNNOk5XadkkHcyzJI3j73iYElBOS9ye2WR8e/IL2anjuaHjvcjCpATdO8/6V8Nc0KPra15b4hoBQOPYa6Xz9sn5t5AOzo3xKYDgcdyec9bxv7VBxCdVpq+JxogOjCdGHDJ965WECA+uJndRJ+KkLvDJeeJCemBBJG6xEoWjbpldfo+W//6Xtw48U6c+fCJo6ldw135P60kuKOPfkxUdGeEa/Vpi7FdZVlMHe1eURGQwV7zC+T5e01eJkIbKwJEiaDohQsEp5w0YiggA65zQknznpGT5d+imRzAAgR8FFfvw9vyf6mmvQRQ/INSQEJzAhagInpJxAt8WFKJqQODj2Wlj0FzCMHYdEPzF5EDfRIWmE1o8+pnDOXGoefLD/taPjj+bayddyctrJnrRSZQwh69rK0lGZYRkIojh0Rlz8ROlzbDPD3hVesHIEYoyCSedI93UnkNcIsaEBhAfpPWGZ32G2mans7NPq7tsY9DRTEtMQtCas2ka6LYMEEYUnQ0QaiDao/NErNo0aetsl7Xq7BWKd2zAeVcz6LYQlQ85CsNsAdeNVRRlUp62KxxEEYUAiwZe6to19+ovRuV4bMqtPIkEp/aSI884l7PTTCF20UJH+/A1Bp8OQoowwuryzmRaa3v/7VyNtfU/l7beTP2s23Vu2+NoUFReZlTKJrpLfEdLwB+cbj+vT+hvrEgmNRf0TemfRa/VkRWRR1SRF1x56X6v9819oev3f2DqV2SzUCBreO+M9lp20jPAAxzVyD2LJk1JxEuMYlEaZfQPcuBGyh3e6WmtrANAnDxQxmh43nVuPvpVTMk7xmIkqY4ucvsKFhXWd1D/1NHNueJW5e8Thdasn90Xb7nzfwxaOLYrHoJ5tRUcFdtFOsD6Y2CD3s+scYVJ8KqLdgCDY2VJ5BE3+9HnSsWyDV2waNVT8CKIdIjOcLsI3qgiNlyLuT7y3P7PmQIkjn2DqhP+eLUlUHaC1qzKyUJ22Kl6hP/XKR7q2Ync7vaXV2K2CFPXiJZTWtQ2ePZvkp58meM4cRfrzF2wdHYr3KX/WQrRJmG12gvRakiOChm6k4nE0hgCwWunZscPXpqi4yIT4KOymZCqbbZisTjoeZf3W4u/GrvaZ1QwvnwRP5kGzC8W9+pCfKwdmEFgbG2n53/+o//vfETSuTfHsJhNdmzZjbfSOzuCYwYHou/j77iNvy49E/vrXXjBIZawiR9qWNnZh7epG29xOTrXI/vb9iH0pvYMy6VzpWPoDtNd4wdIRQsWP8OopsOUVl5r369mOIWmEfkdWmPe0urUaLQYxHoCtVQWDX5TepzuuOm2do6yvQKHs9B7LaAbkqkRRpKy9DPChPMLP/4Hib+HrPwFD3N9V/BrVaaviFeTdJZeqTyuAads6Sr+MpXB5AqLR8Srd7qJWKh0eURQpPfc8Si+4EPP+/cr12Sf8bu+VdvCz44LRaFRNVV8Tc8Nvyf5qNTG//a2vTVFxkdjQAEIDdNhFKGty0vEaN1FKPzQYoekIkS6jnbJ1YGoHQQMR6U413VK7hT+s+wMrildQWNcXnXVgpK1GQ8zvbibyoovQGI0umVd5ww2UX3klHV9/c9DroijSaXZjA7KnBba9DQVfut7HSMJuh30rnd6c0IaGoo2IAKDb0s2mmk3UddV5wECVsUpSeCDBBi1Wu0jnKWeS9J/XefckHZ2WTpp6m47cMDIDUmcBIuwefTJdLrN3BVRsgvLNLjUfiLQdO07bAyXMvEmkXooC3XekQqqy07HyJ7D0esmqUYDs5E53vtjmqMRuh/xV1K/9K93WbrSCltSQVO/bYbPAxj4N27m/O8ihrDKyUJ22Kl7B1/IIttoKtAEihmiDy9FHrpClcKStjLWlhea33jqsYMlIxFxcjKWqCnNREbq4OEX6bOptosPcgYBAa3sYoKzuo4rrGDIyMKT6YOKiohiCIJCYUEFAwse8s/cDZxvDFZ/BXYWQONUzBvo7+/qqCo87FZx8Hv1S/wufFn/K2or1/YVrDpRH0EVFEXvTTSQ89OCRuhiWoKNnoI2NOUh3+ofKH5j9v9nc9M1NLvfLtv/BJ7+F9c+63sdIouoneOdieHa6y1IY+5r3cd3q67j8i8sVNk5lLCMIAjmyrm1QLOEzZxMTITmzhpdIOF86Nh3B6TUWkeV+5EwSJ+l32o4hCa/+4kxh3o0+TA6WNkorOsoGvyAqC0LiwWaSimeqDI+lB6q2Sueq01aidju8fSGlm58DIDU0Fb3WB3rVuz6C9koIjoWpF3l/fBXFUJ22Kl7hQHmEIVOvPETwOdeTt30f6Z9859Vx5V3z0sYu7Hbl/t+Vv72Bur88QvvKzxXr01cE5OSQu3YNyc8+63Jk2KHI0ghJIUmUNUqOh9x4tQiZiopShIY1YIjczNb6jc43jkwfu7v9ojiwwJf1fZ1gduJsbpp2E+PDpGigOA8Uron+zXXkrl1L1GWXDrwWFE23tZuKjgrXO55whnQsWw+d9W5aOQLY86l0zDzeoc976wcfUHXnXXSuWdP/Wq+1l4ywDHIiczxlpcoYJa/PQVhQ11eMrC+4Ylin7ZQL4Pbd8KunPWrfiKGxUMoa0egh2/lCx1abndJGWR5h7Gja9hdn8nKkbV5UFgCNpsrBLxCEAyQS1nvJqhFO1VapAFloIkT6SALA30iaDunz2K+VMjx9UoRMFGH9M9L5rN+CPtD7Nqgohs7XBqiMDdLC0tAIGqyilTZTGxGBET6xQxPuPWkEgJRIIwatBpPVTlVrD6lRyjglw5YswW4xo4seHUVddNHRhMxXTgfpwLSrwr1jL+3M3+ndt4/W995HFxdHzG+v97U5Ki4wKXo6O3fXEJY03fVO7Haw9kpSCWOFmu1S1IPeCFknON18SuwUpsRO4f2fKoAdhxUhMxUXo09JQRMQ4LKJGoPhsNdyI3L59KxP3Uvvi0iDpKOl6KV9n8ExV7vel78jigNO24lnOtSkc81aOr76isCJEwk5QfpszE2ey4qzV/hks1tldJPbV4ysqL6T3n37WPBDOx2inf0T9w/dMDBc+lGR2LdSOmYeD4FhTjevaOnBYhMJ0mtJCh8bdRcOlDDztjNremIe75ZBL0NoMs/6LUy+ANJme8+wkUxUFpz6V6kQmZf0iUcE824laPlVTDBbGe8LPduib6B+NxhC4NhrvD++iqKokbYqXiFAG8BX533Fpos3+cxh6wu0GoGMGMkhoaREQuSll5D10UeEnXaaYn36Ak8tRPsLHIRmDBTriVedtv6CpaqKlv/9j7ZPPvG1KSouMjt5KuaGU2hvdrGw4y9vwT8mwvePK2uYvyNH2WafDHrXF+hF/UXIBu5rosVC6Vlnk3/0DCw1yhQJEm1SWr9eqycrPMv99L6JS6Wj7NAcrVT/DG0VoA+GnIUONYm6/DJib72F4EE2ML1VqEdl7CBnHxXUddDx1ddMfHcr83eLztWe6GqUNijGMvl9GW+uSiPUS/fyrNixU3ehube5X8IsPcw5XXd3mZ06TjrRdlPSfASt8LTZ0t/TODoCYzxOWBLMvgHmuCGfNBrJWcTSoDTeq6rmpl4fuNzWL5OOM66EoEjvj6+iKKrTVsVrxBnj0Aje/8iJdjvlp0yh5qK52KpLvD5+Vozyxci8qcvrSapuu52q3/8eU6myBerkSNsIfTLdZht6rUC6QlHOKu5jnDmTyIsvJu6uO9UIshFKVn+RxU7X/oYGI3TUDCx4xwr5fVFZ45xf4HeaO1lTsYby9nKK+oqQHRhpa6mrR2M0ogkMRBcf75aZXRs2UHL2OVTdcadb/RyGHHVa+gN0Nyvbtz+xZ7l0zFvssHPeeOyxxNxwA4HjxnnQMBUVibw+p21pYxeGY2fCSXMJnTuPpdlLh28sivDupfBkHtRs86yh/kxnA1T8KJ3nuRZEMRaLkMkbA0khSQTqvJuyHW0MRbBFALC5Yp9Xx1YZY2g0MO8W6XzTi2A1eW9sUYSZ10mFI2ff4L1xVTzG6PD8qKgMgXX/HrrKLLRua0aIiPX6+NlxnilGBlIUVNfGjf3RUCMJa2MjHV99RfvyFYr3LU8INVapsFlmTDA6rXq78xe0oaEkPPQgoQsXqhFkI5T0aCNaXTvdmr3srHNh0yVnIWgN0FQEDQXKG+ivLPkHzL8d8k5xuune5r3c/O3NXP/V9f2RtjlxA1rdhpRkcjdtJPvrr9ze2BOCgjDt3Uv35s2IdjsAG6o28MC6B3gv/z3XO47KgoTJINoG0opHGwdKI0xwTBphMCx2Cye8ewKXrLyEdnO7QsapqEgkhQcSbNBitYvUZ05gwouvcvGdL3Nq5qnDNxYE0Oik7/FOJ4tRjiZM7dIGXPo8CE92qYux6LTtlzDzhc4nECIkAbCzrujIF9XsgO8eg92feMeokUpTsZQ51bLf15b4HXbRjm3i2RCWDJ21sMONuZOzCIKU2XTNaghP8d64Kh5D9WKoeI1djbu44/s7eHTTo14dV9NdRdLsFuLm6NEYvV+MyhORttCnCXX2OZRfdTVdmzYp2rc30EZHk/H2/4i7604CMpXT+jHbzFR1VgHQ3SWlNuXGqUXIVFSUJFCvJTz1c4zpr/Jp4WrnOwgIhYzjpPOxFG2beiws/BMExzjdVF7opoWmU9HcDXCYpq0gCOgi3U+DC5o0iaSnniRrxfJ+B3BpeynLi5ezvsrN4iwTlwICNOa7badf0lgALaWgC4TcxQ416dm5k57t27GbBiJxKjoqaO5tprC1kFC9+gxTURZBEMjpl0hwIahg8vnScdeHYB95gQOKEJ0NF/0PrnR9A6q4b20gB3iMBSbHTObmaTezJMv5YpxKEBsoObGKWofIviz5Htb8zbuOtpHIvs/g0xth1f2+tsTvKGwpZOa787kiORniJ0OIexlQKmMb1Wmr4jV6rD18VfYV66rWeXVcbU8l4Rk9RC+Y4NVxZbJiPRNpKwgCQTOORhMejrW+QdG+vYEgCARNnUr0tdcq2m9DTwOhhlCMOiM1TVJBney4sRPBMJIwV1TQ+uFHqkTCCCUmQIosKmgudq0DWQNwLDlt3UCuth2hT8YuQniQnpiQw4uGKYGg1xO+ZAm62IHslMwwaXNNdh67zIyr4c59sPgR9/rxV2LHwS2/wDkvQ4Bjz57G519g/4W/pvXdAQdBf3X1sAw1I0HFI+T1zY0K6joAaGmpYdvOb/o3vockZ6FUkKyjBsrc3MgZ6bj4/RRFkaL6sRdpOy5qHNdPvZ4zss/wyfiZfUWhanrKj3xR+lzpWL5BKpqqMjhlG6Sj/PtS6ae0vRSz3Yw1JBZ++4Mkl+QNvn0Ufngaelq9M56KV9D52gCVsUNeZB53H3M3WRFZ3h24sVA6xuR6d9w+ZO3H+g4THb0WQgPdLORyALG33EL8ffcNWu17rJIcksy6X6+jw9zBla/uBA4u1qPiH9jNZkrOOBOxt5egqVMIyMnxtUkqTpIamkFNJ1R1DrHwGYpxp8PKOyVNwM56CIlT1kB/oqcVvnpI+j/nneLSIl+WfdHbEwDpvnagM6/i5pvRxcYSe9NN6GKcj+QdDnmhW95RjtVuRadxcQoZHK2gVX5KVJb04yDaiAi0UVEETp7U/1p/Qc3wDKWtU1EBBgq0FtV30rZiBdW//z27swS6H7+d66ZcN3RjXYAUNf/zf2Dn+5B5vBcs9iOa+jYro7Nd76LLTFuPBUGQZLxUvMPCtAWs2CIQHTpEEbTEqaA3Qk8LNOyD+IneM3CkYLdD+UbpXHXaHsaitEV8cc4XdFu7Xd7YcZrOelj/DNhMkDoTMuZ7Z1wVj6NG2qp4jfCAcC4/6nLmJ3v3BtL9y3ZMbTpEbzuL+5CioQIAqeCDkugiI0ekw7bp1ddoeP55LLW1HhsjRB9CYV/0yKEpxCq+R2MwEDxrFkEzZmDvUvZ7oeIdJkRLjvYWiwNRWYMRlgRJ0wERClYpZ5g/UvQ1/PyG5Lh1cfIuR7iaeiSn54H3NVtrK51ff0Pr2+8gBAS4bS6AaDbT+skn1Dz0R0SbjfjgeAK1gVjtVsci8RzBrH73AZL++ji569cRNG1a/2vy31t2lquoKE1uvzxCB4b0dDQixHXrHC8aLEsk7PnUu0V2/IF1T8NzR8OaJ1zuorgvyjYlMohAvVYpy/wai83Cmoo1lLWX+SzL6uiUDGw9GVQ2arDYjhBFq9VLTi9QI8mPRP0e6G0DQwgkTPG1NX6HVqMlJTSFvMg86QVTB2x8Hiq3em7QH/8lOWyTZ0ha2yqjBtVpqzLqqf54PyVfxNFT77uPe7aHJBIOxNowMiQSRIuFptdeo/G5/6N3716PjdPQaaK914pGjWDwW1JefIGMt94kaOpUX5ui4gIzkqSJqFVoo8Pc4VonR18Os2+CpKMVtMwPkSUgxrlWYfxAre6W1nDgYKetoNeT9MQTxN52G9pQhfRPtVrqHn2M1vfeo3fPXjSChvQwKTJJjgJ1md52+M9SeCJXWsiMFtYtg7cvhtK1TjcVBOGgyGlZHkGWpVBRUZq8PqdtaWMXmtxxZK/5nkXf7eCaydc41kH6PAhNlBw3RV970FI/w26D/L6NRtmx5wKynm3OGJJGqOio4OZvb+aCFRf4zIaEsECMfUX4yvv04QdFdnrJEgAqByP/XlJngVZN3h6Wrx+GL++HH570TP+mTvjxZel83q3ei+5V8QrqN0zFq1R2VLKzcSdxxjhmxM/w+HiixYwuRI+t14xhqu92nLJiQ9hc2qx4MTKQoqHKr72O7p9+Iuer1eiTXatg6zVEkfh7fk/H198QMl/5qOu719xNt7Wb42MuAyAtyjhmIhhGGu5WuFfxLZMS47FbQtHoOyhoKmZG4jTnOznmasXt8jusZij8Sjof71rhlfL2cuyinRB9CGV10v3sQKetJjiY8DN+5bapByJotUReeAEIAtoIyVGcGZ5Jfku+5FBMdaPzgFBoqwRLFxR8CZPPU8Rmn7PjPajf7dTfWRTFQTVrS9slx7gaaaviKZLCAwk2aOky2yhvN5Mb72ShHI0Wjr9bcg6kzfGMkf5I5RbobpQ0fd1IC5cDOcaSnm23tZtxkeMI1gf7TKtboxGITyiiuncP35UGkx174uAXyn/bsg0giqoT7FDkCOT0MfTddxBRFHlg3QOkhKZw5VFXYtQbYdb1sOUVaRO/IV/Sv1eSX/4Lva2SNNN4ZeeDKr5HXTGreJUvSr/g92t/z4cFH3plPEFvIOO7HeRt34M2xXeamZ6MtBUMBtBoQBTp/uknxftXGsFgIPzMM0l59hkEvXL6viA9JDdUb2Bt5VoqWnoAyIlTq277O6LNhr17iGgHFb8kJsSAxirp0P5UU+Bja/yYsnVgaofgOEg+xqUu5FT59LB09jdK3xVvyL7E3XUXcXfeiSFV8tDK+qpuFyMTBJhwpnS+51P3+vIXGoskh61G51REdeVNN1N6wYV0bx1ImWzpbaHN1AZAWlia4qaqqIAU3Z3TL5Fw8PzU4dT1Y6+RNt+MUUqb57/ImRO5i6U0ehfpd9qOIQmvSTGT+ODMD3jjtDd8aocmdDuG6B/4sXbLkS9KngEaPVh6JK1QlQFE8QA9WzUN/1Dqu+tZUbKCf+34F3pN3z0iJndgQ3fDs8oOaLNI0gsAc38nbaipjCpUp62KV1Fsweckgkbj06g+eRfdE5G2APH33UfOd98SvnSpR/ofSfzjxH/w4OwHaW45PIVYxf9oevVVCmbPofkN307gVZxHEATCdEkA7Gkocr0jqxmKv5OiFEcj+2RphFOlDTYXkOUIYgNTsdhEjAYtSeFB/e93rluPqaQE0WZz29yhkFP13ZZHAKmIEUhRyKNB23Zvn/M58wSHHViiKNKzdSu9O3YgBAb2vy7PkRKDEwnSBR2htYqK++T1zZEK6jowl5fz6XWn8u8Lp/NTnf8HAfiM/nv66W51MxYjbf2FieGzMTfPw9aTcuSL9EFw02a4pxRCnYxCH+0IAtywES58a/TLW7mA/AxPCU1Bf+DGzrzbpOP2d6G9RrkBd38MbRUQHAtTL1auXxW/QXXaqniVjLAMQFrweUWA3kci94eS1RdpW9LYhc2uvE2B4/LQJyQo3q/SdP7wA20rV2Lv7fVI/4IgMDNxJheMu4DSBjMgVVhX8V80RiP2jg66f/nF16aouECCUYoCdMuJV74B/nsWrLpP0gocTYgi5H8hnbuxwJcXAIGidJ/Pjg1Bo5FSNUWbjcqbb6bk9CWYy8rdMncwRLud3vx8LHV1ym68Jk6FiHSw9owOPcw9y6XjxDOdapbx3rskPfUkgbm5/a/J3ydVGkHF0+TGS3Okor6iWHk/lHH0rl72NxY63klPq6SluPoPHrDQz2gshKZCKQIzZ6HL3fRabFT2ZYTJ2XhjAV8VHzuURemnYao7g9bm9KEvjM5WoxaPRHA0TPgV6AOHv3aMIWvSy36PflKPhbS5YLfA5heVGzBhMky+AObcrP49Rimq01bFq6SFpaERNHRaOmnqbfL4eHXXnULZgqPofPPvHh9rKFIijRi0GsxWO9WtPR4dS7QfoRKqH9D4wotU33kXLW+/4/GxCvsWIPKCRMU/CV28mIz33yP1RQUnLypeIydCcirV91a63kn6PEkbsLtR0gocTXQ1SBNovRGyTnS5G9mJZzPHAgdvRtna2ggYl4c2KgpDuvKp9DX33Ufp0rNo+/iT/gVIc29zf/q+ywjCgINzpEsktOyHmm0gaJzSkhMEAUN6OuFLlkhSR30cccGnoqIwuf3yCB3oU1PJP2say5ZqKGsvc7yTrkb4/C7Y+AJ0joyiuC4jSyNkHg+BYS53U9rYhShChFFPVLBh+AajAFEUWfj+Qs5Zfg41nQpGGbqA7Cgv8WCBaJWxy5Ca9PNulY4/vS4VcVSCuAlw7ssw/zZl+lPxO1SnrYpXCdAGkBQspdMqkl45DN2FNXRX2bFbfBu9pdUIZMQYASjy0ATB1tFBzYMPUrxoMXaTySNjuINotxM8Zw76tDTClriXUnYk1lau5ePCj9lTX0Jjp/Q7UNPO/BtddDRBkycjaNVIhpHIlPg8ALrttdhcjZLV6iVtQIB9KxWyzE8IiYPfbYXf/SylWrqAKIr9TryO9kjgYA1EXVQUme++S+76dR75HgVOnoJgNGLv7saoNxJvlNJElZFIOEs6FnwJFs9kYHiFvSukY8Z8CI5xuzs10lbFW8gbQKWNXVjtIuarzmbLOA3F3U5E7cfkQNJ0EG2w5xPPGOovHHutlBIuO15cRI5szo4N8VlBLm/T3NtMfU89RS1FRAZG+tSWzJhg0HTTJhZR1TaE40wU4ZObYNlkaKvynoH+zgdXw3ePQXezry3xS4bceM1dDHFHQfbJYFI3DVQcQ3XaqngdOb3S405bUSRhRhuJM1sImnWCZ8dyAE/r2mqMRjp/WIelqoquH37wyBjuIGg0xN7yO7K/XIU+Ls4jY7yb/y4PbXiIz4q+ByA5IojgAJ1HxlJRUYEZyZmIdh2iYKW6s9r1jmTpAFlKYLQRluhy0w5LB0H6IDSChupGKTpoMK1uTy38I849h3GbNxF3x+2Awtr0yTNg8vmw+BEQ/TdLZFjCkiF11oAT2kGa33yLthWfYTvEaSD/buXftYqKp0iOCCLYoMVqF9nf2NW/USA7HRxm8vnScef7yhrobxiCpZTwLPfWFQN6tmNHGkG+ryWFJBGo820Kt9GgIyz7OYIzXmTt/m1HvlAQoG4XtJYPFN4a67RWwK4PYe2ToB0bUeLOIvs4Bn2GazRw3bdwwRsQnuzeQFU/w6c3QUO+e/2o+D2q01bF6/RPCD1djKyjlqCwNiKyTejzZnh2LAfI8nAqjqDVEn//faT95w1CTj7ZI2MogScjCuRFhs0kpRCPpYq8IxlrYyMNzz5H9QMP+NoUFSfJjAlFtMRgtxopanIj3TFnoaQR2FQoaQaOBszdYHU/6yHMEMY353/D+gs3UFLvfa1uTVAQgn6gkMaitEVcOuFSZaJABQHOfQWOuQoMRvf78xWTzoFrVsMxVzvcRLTZaHj6aarvvhtLXd1B7/1p7p94cPaDjI8cr7SlKioHIQgCOf0SCZ1khKaT2iCS/mMFJpsT96+jzgEEqNgsyYWoDElxXwDHWCqW2+/I8hPZlxCNtJm6s26YQqrp86Rj2XoPWzRCkJ3XiVMhYOx8fh2l19pLTZc0Hz7iPEkp3dn1z8Avb8IPTynTn4rfojptVbzOgcXIPEpT38I/Ih10AZ4dywHkSNtiD+onhS1eTPDMmQguVij3FKaSUnp27fZoAQKzzUxVp5S61NEhpV2pRchGCKJI4wsv0PbRx1hbWnxtjYoTBOi0xLTdSVfhQwTYs1zvKDBM0giE0SOR8Mub8Pcs+O5xRbpr7RbosdgwaDWkRQ04OEsvvJDyq6/BXFGhyDhDIYoiF46/kHtm3sPU2KkeH2/E4cSmpNjbS/g552CcOZOA7OyD3psRP4MLxl1ARGCEwgaqqByOPFcqrO8g0mLgqVds3PqpjbKKXY53EpYImcdJ57s+9ICVfsCXD8C3j0CbGxrufRQfII/gCqLNRtfmH2n7bCVdm39EtPl/EU85sMJfZF9iA1MAKGorGfrC9LnSsWyDhy0aIcjOa/n3onIQZe1liIiEGcKIDBhGBqSpGL59FFypR9NUDHv7CqC6Kdei4v/4l2dHZUzgcuqVk/RuXU9HVQAWfYZHx3GULA/LI/gzTa++wv7zzqPhmWc8NkZFRwU20UawPpiKRikqbCxFMIxkdLGxRF11FYl/+fNBEX0qI4Oc2GhgIHLIZcadJh2rfnLTIj8hfyWYO6V0WgWQiytmxgSj00rTN1tnJ73bd9C1YQOaEM/d73p276bsyquo+M31nhmgvRo2/xMqRmAhuvwvXNL10wQHk/CHB0j/zxuqpreKT8nrK9haWNeJLiKCmpQgdqdBVdU+5zrql0gYhU5bczdseRXWPuG2jqfdLlLS6LrTtn31aooWLKT8iiuovusuyq+4gqIFC2lfvdotuzxNv+yLn0TaZoZLG801w+k3p82Rjg37oMvzRbT9Htl5LUcgqxzEgUXIhswutZrglQWw9u9QsMr5gTY+L8lK5SyC+KNctFZlpKA6bVW8jvywru6qdi71yklaV6+j8odomnf4x+6zLI9Q32Gio9fisXFs7e00vfIKVXfc6bExnEXQaBEMBkLmz/fYGAeKvpfUS84jNdJ25BB/z++JOO88tB50PKl4BsWkXyadC9f/ABf8VwGrfExvG+xfJ52PX+JWV39Y9weu+fIa1pZtBg7ejNIEBJDx3rsk/e2v6CI9V9hFE2Ske9Mmujdtwt7bS5upjW3127DarcoM8MNT8MXv4ed/K9Oft+iohbcvgifzFFvMb6ndwseFH3ulWKuKCkBuvzxCBwBfP3QKD1+iozDUyY24CWeAPhgiUsE8ygIUSr4Haw+Ep0HCZLe6qm7roddix6DVkBLpXIHK9tWrqbr1Nqy1tQe9bq2ro+rW2/zacetvBRaPis0BoMM6jB5/cDTE9knVjHVd284GaCyQztNm+9YWP2XIImQHoguAo6+Qztc7GdTU2QDb3pLO1SjbMYHqtFXxOjFBMYToQ7CLdsrbnahO6yS62BgCYrUETPSP3aewQD2xoZJMgyejbUWbjfpnnqX988/pzS/w2DjOkPiXP5O77geCjj7aY2PIO5spIelUtfYAaqStioo3iI3oISj1dT5vut+9joxRkDjFqRRzv6XwK7BbISYPorOHv34Ifq7/mR9rf6SiRXpuHKjVLej1BE2ZQvjSpW6NMRyGzAwS/vJnMj/5GNGgZ+H7C7nsi8v6JWncZmKf/ftWgs1zm5qKs3cFIErafsHRTjW11NcPKhm0vHg5D214iFX7XYi8UVFxAXmDu7SxC4vNTkaEixlxQZFwdyFc/K5iGQZ+Q36fbM+409x+RslZKRkxxv6sCUcQbTbqHnscBpMa63ut7rHH/VIqwWKz9D8v/KXA4rEp4wCwahvptgwTRKRKJEjITuu4o6Q5m8phOFVIdPYNUjG3ik1QvsnxQX78F1h7IeloyPBcQJSK/6A6bVW8jiAI/btPnixGFvPoG2T9sIuIW/7qsTGcJStGmsR6UtdWFxlJzHXXkvCXP6NPTvLYOM6iDQvzqNauvIMfqpX+zzEhAUQY1aqmIwlbayvtX67G2tjoa1NUnCAvLhZdSD7dQgmdZoXubR7Uv/YK+Z9Lx3Gnu93V34//O4/Of5TG5hjANxkEgiAQef75BGRno9VoyQjPIN4YT0uvQhrUaXPBGAM9LQMRyiMBWU9uonNOc9FioXjRYgrnzD2sCFleZB6zE2dzVLR/bDirjH6SI4IINmix2kX2N3b1R0KWtZY6X4tgtDlrAew2yO/bRBnv/j3dVT3b7p+2HhZhexCiiLW2lu6ftrpjnkeQJcyMOiOxQbG+NgeASXGpiHY9gmDnp8phipFlzIf4SZJ281imuxECwlQ92yFwKqI8NAGm/lo6X/+sYwOYOiWnLUhRtqMh0EFlWHS+NkBlbJIbmUuXtQu76ILw9ggmOy6EzaXNHte1jb3lFo/27yi29nZEm82jabsy8gaAYIkDVGmEkUjFDTfS88svJD76CBHnnutrc1QcZHJiPD3V5yFaohDtbk4rTJ2w8g4o/QF+txUMxuHb+BtWsxRpC25LIwBMipnEUdFH8WC91OeBGQStH32MNjIC47Ez0YZ4z1nyvyX/Q69RUH9aq5N+Vz+/AXs+heyTlOvbU3Q1DjiYJ57pVFNzRSWi3Y5otaKLPdiBcdnEy7hs4mVKWamiMiyCIJATH8r2ilYK6jrJTUnn3vdsTKjYhnnSfgIyXUhnbymTnAkRacob7G0qf5KcVYHhiuh4FjW45rS1NjQoep03OdCRNaTOpxfRabUYxAQsVLC1uoDjM4fYKJt0rvQz1jnmaimlf7TJnyjIiaknEm+MJy8iz7EGc2+Bn/8rRfM3FEDscO1EmH0jFH0tSdKojAnUSFsVn/Dw3IdZftZyTsk4xTMD2CzSzrif4Y1IW3+i9b33KDz+BOo9WIAMpIrm8oSwu0tK11GlEUYewXNmY8jOBo1alGckERVsINg8B1t3FpUtZvc6MwRD2UboqJY0BEciZevB1A7BsZA8Q5EuGzpNtPVY0AhSITIA0W6n7tFHqbzhRqw1w2jyKYAoinR8/z11f/s7Wk/osvdLJHzml8/vw9i3UioCkjgVIjOcahqQlcm4rT+R8d57Hs1AUVFxFHmju7C+g/TwdEJ6IMgMDVtdSAdf83d4ZorjkWP+jiyNkLsYtO5vVvVH2sY5t9F26AaPu9d5E1nCzF+kEWQi9ckA7G0aJtJWZQCNFgLDfG2F33LD1Bt49uRnSQ1LdaxBTO7ABv8GB+6ZAaFw4j1wzWp1vTSGUGeKKj7B07us7f/+G0XHTqDu6pM9Oo6zyFqEno60BRDNZjq+/prmN97w+FhHonfPXrBY0Cd6Np2oubeZDnMHAgL1zVJBjdx41Wk70oi56SayV35GxNln+doUFScQBIHsWIU2pARB0gyEgYXySCM6B07+gxQJ4eaEekP1Bt7Z9w5rS3cCkBZlJFAv9Sn29BBy8skETJyAISPDXasdovbPf6b59dfp/vkX5TvPPB4CI6CrYWQUe9nzqXSc4FyUrYzGYCAg6+AIxm5Lt3ISIyoqTpDXN2cqrOskQBuA/rbraP3XQ0T9yoXPd3Jf/YLdH48sjeojodFL9yYF5G5gQNM2JzbUqXbGY2agS0g4cjq0IKBLSMB4jDKbhUrSH2kb5h9FyGSSg6VI8IqOMscaWHqhtcKDFvkxNoUKj6oczrxbpUj+8BTH2/hJxLqKd1Cdtio+RRRF5/WyHMBcsBdLlxZbj3/JL2TH9BV7aOrCZvesZmNvfgGVN/+O+qf/ga2jw6NjHYnkp58i89NPCTtdmYnukZClEZJCkiipl4oJ5DiZdqbiewStumM8UkmKNqML38KX+xVwtMqagfmrRkbE5aFEpMLxd8Nxd7jd1cqSlTy6+VG+Kf8WODiDQBMcTPITfyfro48Q9ApKFRwBQRAIP/NMIs4/n1ajyNVfXs25yxVMF9XqpWgTjR7q9yrXrycwdR4gjXCWYt2u2r+KOW/P4a41dynWp4qKI+TGSw7Egjppvnj6mbcz5/iLMAY651gEIPNESaO6uxFK1ihnpK9Y8CDcXaxIKnJbt4XGTmmemhXrXKStoNUSf/99ff8Y3GETf/99fjmXcqo4kxfJi5IKhTaaKoe/uOBL+GsafHSdh63yU757BJZNkVL5VQalvrue+u7Bi4wOSepMuGMfnHjvka8RRfj05r65sX/5N1Q8j+q0VfEJdtHOJSsvYe7bc2nqbVK8/8gpBtJPbiTqV/4llJ4cGYRBp8FstVPV0uPRsQInHYVxzmwiL70E0eq73dHAcXloQzzrQJV38NNCMyhv7gYgR420HbGIooi9S9XLGkmEhNUTlPQhW1o/cr+z9HkQEC4t+Cu3uN/fCEa+t5l7pCJkOXEuOFAUJO6220j8y5+Jmnw0W2q3UNBSQJupTbkBTnoA7i6CmX6+KA4Igdt2wjmvQEyOU03tJhMVN95Ew/PPI5oPlhOR/97RgdGKmaqi4giyPEJpYxcWm5sOAa0OJp0jne98303L/AStThlphEYpkj4xPJDgAOc14MMWLyb5mWXo4uMPel2XkEDyM8sIW7zYbRs9wQOzHuDR+Y8yPW66r005iKnxuQD0UjP8xTG5YDNB1VaweHYN55eUbYDWMjUlfwhe2/UaC95fwLKflznfeLgaDsXfwC//hQ+uht5WV8xTGcGoTlsVn6ARNDT3NtNp6exfpCiJtms/xjgzgdNnK963O2g1ApnRfWnEjZ5NgRQEgfTXXyf+7ru9UgjsQESbDdHmvQi5AG0A46PGEx+YhV2EsEAdsSEBXhtfRTk6f1hH8YKFVN2pRpqNJKbES4UTuuy12NyNjtXqIXeRdJ7/uZuWeZk9n8LOD6DXfUemKIrsb9sPQEtbBHBwpO2hDj9vYtQbiTNKRR/lCCpFCE+GoAjl+vMkofEw5Xynm5ny8+n89lta3nwLDomQlv/e/haNpjL6SY4IItigxWoX2d/YRVNPE99+8A/WP3QjpiIX9D4n93039n0G5m5ljfUmTcVShJtC9OvZupENFrpoETnffE3aG2+Q9OSTpL3xBjnffO23DluAidETOTP7zP7nhr8wN308AKJgoaJ1mCCiyEwISQCbWXLcjiXM3VD1s3Se7l8BUf5Ep7kTjaAhLdTFAoyiCEXfwO5PDn9vfV99mBlXgDHKZRtVRiaq01bFZzx23GN8svQTpsVOU7ZjUQRZUD4mV9m+FUAuPCBP3EYjnWvWUHTSyTS98opXxjsj+wzeP+N9ZoZfCkhpfv5SnVbFOXTRUViqq+n55RevOv5V3GNGchaiXQeClepOBYpiyRIJ+0aY03btk/DhNbB3hdtdNfU20WGRtLrL66QIjNwDnLYlS8+iaOEievd6X0rAUlfHZDEJwCMbr4AkQTAK0cUnEH///URfe81hz6n+FOKwDO8bpjKmEQSBnD6JhML6TkraSqh+/WWi3vuOrg0uaEynHAsRaWDuhIJVClvrJTob4LkZUkq4SRmZMVnPNttJaYQDKb/8CqpuvwN9YgLhv1pC8KyZIIq0f7mark2bFbFzrBBtDCW49k905j9Mfdsw6wZBGHBYlrlQoG8kU/UT2C0QlgwR6b62xm95ZP4jbLlkC0uylrjWwd7l8OY5sOpesJoGXq/6GUrXgqCV6iWojDlUp62Kz5geN53siGz0CqQbHYitqpCm7TY6awIhKkvRvpUgq0/XtqTRe+nfpsJCujZ4b4LRvmoV1vp6rE3NXhsTpIUGqHq2I5mA8eNJffllcr771i912VQGJzM6FNEipfD/XFvgfoc5iyBxGhx19sgpZNNaAbU7QNBA3qludyc7QxODk2nskNKV5WKW9t5ezGVlWCorvV4pvP6ppyk64URO2iA9w+ToUMVoKoZ/nQQvzFE0wk0xdn0I//4V7HJNCkQfH0fU5ZcRfc01B71usVuo7JB0FTPD/atYj8rYQN4UKqjrIDM8k/oZ6ZQel4Uh1zkJEEBycMnRtns+Uc5Ib1L4JSBK0f8BykjTFMmRtnGuzVMttbV0b9lCx1dfoQkecPw2vf46VbfeSsNzzylip5Jsq9/G2/veZm+Tf2qV50SnABqK6x1Yl/U7bdd71Ca/Q3ZSp89VC2ANg0FrIFAX6FrjvNMgNBE6ag6WltnwrHScfJ5UN0FlzOG8mI6Kip/Tu3Ut9dvC0YdBjs7/UuTlwgPeirTt+PY7Km+8EX1aGtlfrvJKBGriI48QunAhgXl5Hh/LLtoRRRGtRtv/O81V9WxHLIJGQ8hx831thoqTGHQaAknATC3bawtZmrfAvQ4Dw+D6EVbAJv8L6Zg6C4Jj3O5OjrqMCUgmH0kDMaRPA1ETGEju+nWYi4rQxbg/ljME5OWBRkN0r+4gOxUjLAka8sHSBdU/Q7KfVULf9RHs/wHSZgPnKNZtRUcFVtFKkC6IeGP88A1UVBQmr2/uVFjXSUxQHnf8+Qv3Opx+KcRNhHGnKWCdD5AzPca7GDU3CCUN7skj6OLjyXj/fXp370YXPaB9HX7mUlr++ybGY45BtNn8atP767KveWPPG1wy4RImRE/wtTmHkR0bzLqiRsdk69LnSceKH6UNZYUDj/wW2UntA2kE0Waj+6etWBsa0MXGYjxmhl99vhVFZ5Aiab96ENY9A+FpULdzQC5h7i0+NU/Fd6hOWxWf0drbykdFH9Ha28odx7hfZVtGExJG6FGR6CLCFOtTSeSJmrcibYNnz0IbHk5Abi72jg60YZ7/vWgMBq9pa5W2lXLBiguYGD2RuvprAdcjGFRUVFwnJiCFarZR0Fzia1N8g6y/O+50RbqTI1gDSQAO1rMF0EVGojv2WEXGcobQhQvI27SRzR274OvrlZdH0AdB3mLY/bGkEexPTltTJxR9LZ1PONPp5vaeHrp//pmgSZPQhocf9F6/nm1Yhirvo+ITcuNkeQRlpACIyvLLjDeHMHdD8bfSuUL3dLPVTllfsVxXnbaCIBA0eRJBkycd9Lo+Ps5vM5RyInM4MfVEpsZO9bUpgxIUWkVg8lt8W5/IfTw19MWx4yEoEnpaoGY7pBzjHSN9idUMFX1FYWWntZdoX72auscex1pb2/+aLiGB+Pvv8zsN5zUVa3h558uckHIC101xo5jqjCvhu8egqQD+c8bA69oAaC6BhElHbKoyelHlEVR8hlW08o+t/+CNPW9gtilXUCVowQWkfLiBhFf9U0NLjrRt6DDR3uv5tF+N0UjOmu9Jff7/vOKw9Tb72/Zjtpsx2cyU9jnCc1Wn7YhGtNlo/s9/qbj5Zuxd3pMRUXGP9NAMAKq7ypTr1NwFez+Dbu9KrThNbxvsXyedK7TAl52hdpMkf3Co09ZXaIKC0IaF9RfLKu8ox2q3KjvIxKXScc9y/5JIKPoKrL1SQZqEyU437929m4prrqXkzKWHvdevZ6sWIVPxEXKWUmljFxabJMlisploqirB2jRMkabRRsn3YO2B8FSXvuuDUd7chc0uEhKgIz5M+UxAf3TYApyVcxbPnfwcp2X6Z8R1dKiIPmwnDdZtw1+s0cCcm2HRX6Q09rGAuROmXwJpcyHG8xmUMu2rV1N1620HOWwBrHV1VN16G+2rV3vNFkfY17yP7Q3b3c8+ku89h2IzwXuXS/MilTGH6rRV8RnRgdGE6EOwi3bK28t9bY7XCA3UExcqTdZKGrzjkNIEuqit4ySmklL2X3oprR997JXxAE5MPZHPz/mc30y8F4tNxGjQkhQe5LXxVTyARkPzG2/Q+fU3dG8dYxV6RzATYrIBaLFUKdfpf86Cdy+BfSuV69MTFH4lFemIyYMYF/QfB0Ge+Ld3RAIHO20b//UyzW+8gaW+XpGxXCEhOIFAbSBWu5WqTgX/5iBpGuuCoKUUancq27c7yIuliWe6pOtn6+xEn5pK4KTDI2VkJ31mmKpnq+IbkiOCCDZosdhE9jd28dbet/i/K46mfsESWt76n2udiiKs+we8OB/aFShS6S36MydOU0zDs6h+oAiZK9H0nevWU//MM/TmD60b35tfQNdGF4rHjVHmpU2ht24J3TW/6t+sGJLj74J5t0B4sueN8weMUbDkKbj6C6/p2Yo2G3WPPT74pm3fa3WPPe5XBYtL2/ue4e5o0tttsOqeoa9Zda90ncqYQnXaqvgMQRD6KyQrqYknNleA3YGHrg+Ro21lbStvYevowFxR4bH+2z79lJ6fttLx5ZceG+NQtBotqaGpWHukHe/s2BA0GjW1dCQjCAJRl19G3F13EpCd7WtzVBxkRpIUgWEV2ug0K3Rvy1koHfPd1Fb0NHW7pKNSabQ2c78jtKZRSlmWU5dFUaTp1Vepe/yv2BobFRnPafsqKqi+/U7++I60eFK8GFlACOT06SLv+VTZvl3F0gMFfc+2iYdHyjpC6IknkvPValKW/eOw9+TfoVqETMVXCILQvzlUWN9JnDGOmigQBbA2NLjaKeSvknQZXSze53XsNijoy9ZT6J4OUOymnm3bxx/T9OJLtK84cqRd+1dfUbp0KTV/+hOiH6yFui3dNPY0IvpTxsQhjI9NRNdxIubOPMr75CtUfEv3T1sPi7A9CFHEWltL90/+E9jR/wx3Z+O1bMMwm1sitFcNFIZTGTOoTlsVnyIvTpTSxLN3trFv/iKKZo3HVue/0bvyhK3Yi07b9s8/p3D+cdQ+8ojHxoi8+CJi77yDyMsu89gYR0L+XarSCKODqCuuIPraa9Enj5FIhlHA5MQE7FbJsbivqViZTuUCNsXfShqD/srCP8Htu2HWbxXprry9HLtox6gLprpJKnTSH2lrsRB16aWELlqIwUebGprgYDpWrSK7qIvQblH5YmQAE8+Sjns+9Q+JhKJvpOJo4amQdLRbXQn6w4vXqPIIKv5Abrx0Dy+o6yAjLIO1kwRu/n0ECX9+2PVOJ58nHQ+shu7vnPMyzL5JUQ3Pfqeti/PU0EWLCF28mNBFi454TcjcuWgjIgicMBF7h0LaxG6wsWYjJ713Eld/ebWvTTkiGo1wQDCNgxmQrRWw/R1oUVAOyh+x26B8E1hNXh3W0U0ilzeTFEYUxYFsGXc2XjvrlL1OZdSgFiJT8Sny4kSpBZ95x3qwC9hMGjSxKYr06Qmy5GJkXpJHAAiYMAHRZMJaU4toNiMYDIqPoY+PJ+Y6N8TXnUQURf644Y8kBieyt1YqcKAWIVNR8Q2RwQY01jjQdfBTdT7HJCpQdCRhslQ9t61c0vkar1zUk+KEK/fMkZ+JicY06hCIDjYQFSzdswWDgdjf3azYWK6gi4oi/qEH+Vyzm+7uT5UvRgaQdwocdTZMOENy2vq6OFdguBT5HT9JcVtaeltoNbUCkBaapmjfKirOkNena1tY10la2FH0BmjooZOm3iZigmJc6/Sos+GLe6BmGzQWQkyucgZ7Ao0Wsk+SfhSkuGFAHsEVwk49hbBTTxnyGk1wMDnffoPGaHRpDKWRnw3xwfE+tmRoEmN6yO/YzvoKWDRxyfANVtwibSaf9gTM+o3nDfQVdbvhtVMgOBbuLJA0fb2ALjZW0es8TUNPA93WbrSClP3pMiEOfk8cvU5l1KBG2qr4FFkeQakFX0CYiZyltaSdH4vgpQeLK8g7ut6MtA3IzCRrxXIyP/3EIw5bX9BiauHjoo95cfuLlDRKou1qpO3owW4207X5x2H121T8h3CtFBm9u6FImQ4FYSDaNt9PdW1tChfhAgxaA9NipxGjl5wb/rgZFXXxxUTOnItNK3jGaRsYBuf/Gyad67WF4pBkHgeXfihFVbtA16bNFC/5FfVPPnnYe/1O+uBEjHr/cLaojE1kGZbC+g4CtAEkh0j3dLe+48ExkH2ydL7zA3dNHJGIokhxvXvyCI7iLw5bGEgZl9d7/oopYDNBKW+zudFBKab0udKxbL3njPIH5DT8xKlefQ4bj5mBLiHhyBcIArqEBIzHzPCaTUMhf86TQ5LRaw/PpHGY9LkQlgQcaWNYgLDkgc+fypjBD2bBKmMZOYVgf9t+RfSOhKYi9EF2giYd5XZfniSnb8K2v7Ebm917aZ8BubkuFT8YDntXFzUP/ZHuLVu8qlslLyISgxMpqTcD/lNhXcV9Gv6xjPIrrqDlzTd9bYqKg6QEZ2PrSaanV8Hih/1O21X+V3xBFOH5mfDmuVK6pEIcn3I8/z39v4zTS1IzB97XLNXV2M1mxcZyh2lx07j7mLu5cdqNvjbFe7j4DO3ZsQNzcTHmysOLtk2JmcIX53zBUyc85a51KipukdsXaVva2IXFZicjPIMJ5SLWB/9O44svut7x5POl4873/UPu5Eg0l8Cq+xTXjKzvMNFpsqLVCKRHOxdpK4oi7V+uxtbe7lQ7W2sr3Vu2ONVGaUaK7Mu4aElqqMlU6VgDWTajbIN/f57dRXZKe9lJKGi1xN9/3+DP277X4u+/D0Gr9apdR0IRaQSQovxP/VvfPw79v/f9+9S/StepjClUp62KT0kLS0NAoMPSQVNvk/sdNhVKRz9PvUqKCMKg02C22als8b5Oo2i3Y+9Wbtz2r76i9b33qP7DHxTr0xHknc1EYxq9FjsGrYa0KP+JMFBxD+OsmWhjYtCEhvraFBUHOSlpKd37f0dQj4JppRnzISAcuhuhZrty/SpB7U5oLob968EYrXj3RfWHa3VXXH89+dOPpmvzj4qP5yxRxY2c/kMPM/Qe1NZtLIS1T0rOFF9R/C20ObiYPwIR559HyksvEnX54ZrvWo2WlNAUJsdOdmsMFRV3SY4IItigxWIT2d/YRWZ4JpGdIhE/7KLj2+9c73j86aALku6XNdsUs1dx9q6ATS/Amr8r2q0cZZseZcSgc275bcrPp+rWWyk6eQGigxt2vfv2UXjiSVTe/Dvsvb1O26sEB+l8ulOcyQtMjZfWjT0MUfzqQJKOBm0AdNWDUhr+/oYoDmxeKKjt7ChhixeT/MyywyJudfHxJD+zjLDFi71u05Ho35xQIqJ84plwwX8gLPHg18OSpNcnnun+GCojDlXTVsWnyKlXlZ2VlLaVuq6X1UfTF78gdAUTtjjBrz/cWo1AVkww+2o7KGnocnrX3R3aV62i/smnCF2wgPj77lWkz8Bx4wg/71wCsnM8Esl7JOTJYIhGerBlxgSj06p7UaOFkOOOI/eHtV79TKm4x4Bet4LSL1o9nPsyROdAtG8Kbx2R/M+lY/bJYFBmw0gURSx2CwatgcK+hb4caStarVibmsFmw5Due+3TmocfxrRnL/qUFMJ/5YAOoCt8cQ8UfyNF1xx3p2fGGAqbBd6/Cnpb4brvINm1ImS6yEhCTzxRUdNUVJRGEARy4kLYXtlGYX0nGWEZfJYqsPGMTC487y7XOw4IhcnnSgWNtAHKGaw0+X3p8eOU1U+X5dCyXJBGsLW2YsjJxpCW7rC8WUBuLrqoKDShoVhqagjI9L7TtMXUQru5HQGB9LB0r4/vDHPTx8OPgLaL/c31ZETFDd1AHwjJM6B8gxSNGpPjFTu9SmOhtFmuC4Sk6T4xIWzxYkIXLKD7p61YGxrQxcZiPGaG30TYysjrUcUiyieeCeOXSE7zzjpJwzZ9rhphO4bxZ7+WyhghIzyDys5K9rfv59iEY13uR7TbadzUgd0STrAuzu8/3FmxktO2uKGTk8YPMzlQECEgAEtlJZ3ff0/cvfco4hALnDCBpEceUcA655B3NgWrJMieE69KI4wm/G1SpjI82f0VmDswWy0YdG5oex1I3tDFV3zGvj6dXQULpDX1NrHg/QWkhqRS1nQ9oOnXmRR0OnLXr8NaW4su3veFKEIXLsQaH8XP1mISm/cxPmq88oNMXCo5bfd86hunbelayWFrjJF0/TzA3378G6GGUC4Yd4Hbm9cqKu6SGx/K9so2Cuo6mDcpk5ZQgQ/mwNWzZrrX8dLnlTHQU3Q1QsVm6VyW5VGI/iJkcc4HaQTPnk32Z585FTEraLWkv/M2uthYn218HyhhFqhTUDLJA0QbQxGsEYi6VjZV7hveaQuSE618g+RYm3GF5430NrI0QvIxoPP+Rkv31q10rd9A6CmnEOzuvcfDyOtRt+URDkSjlbT0VVRQ5RFU/AA5lUBOdXcVsbeLyBMmEDI+HP2k2e4b5mHkQgTyRM5bhMyfT9JTT5L5yccjPoJRfkh2d0UCahGy0Yyv0vtUnCM1yogx5b9os+7ni+K1vjbHs7RWQO0OQIC8UxXrtqy9DLtop9dqwWbXEBKgIz5sYMEkCAL6xES/uH/H3ngj712RwS1tL7OqdJVnBhm/BASNJI3R7IGCZ8Oxd7l0nHCGy1Euvfn5tLzzLqbCwsPes9gtvLPvHV7c/iJWu/JF7VRUnCWvbwO8sL6z3wlR1VmF2eYfWtoeo2AViHZImAIRblSAHwQ50tadImSaQOccn/q4OJ8+J+R1naKOLA8S3Je1t6Pu8Pv0oPQXI1NW/9hvKN8oHX1U9Krt0+U0vvACLe+8DYBosdCzYwc927b5xJ4j0WvtpbqzGvD/gnsqIxfVaavic+SHubvVpzXGUOL+72NSP9mExuj/GphZ/RFpCqYRO4Cg1xO+ZAmaoCC3+xJFkZa338ZS66AGlIJYbBYqOySNwfrmcEAtQjYasVRVUXrOuRQtXIRot/vaHJVh0Gs1GA16BI2NbbUFynZe9A28fTFs/pey/bpKQZ+TMnWWVB1dIY6OO5pvz/+Wi9IfAiA7LsQvHLRH4qjoo5gWO434YA9F/gbHSLrGIOlNehObFfZ+Jp1PXOpyNx3ffEPtn/5E48svH/ae1W7llqNv4fy884k3+j56WkVFjuwvrOsgOjCaEH0IWouN/etX07lWgc24uj2wZ7n7/SjNvj65m/HKS73ImrbOOm1t7e1uF/gVbTZ69+xxqw9XGClFyGTiAiVHfVGrg/rpqbPg/Dfgum88aJUPmXMTLPozTPiVT4YPnj+P0EULCTtFyrRqef999l9wIQ3PPucTe45EWXsZIiJhhjCiAqN8bY7KKMXfM8hVxgDjo8YzN2ku0+Km+doUr+KrSFsl6d25k9qH/4zmiSfJ3bDe6SgAd6joqMAm2jDqjOyv0wK2/oWGyuhBFxuLqbQUsacHc2kpAdl+pmmqchjjAy5kfdFiUrPnKNtxcwnkr4TuJpj1G2X7dgUPSCOAFEkba4ylq7MVaD0og6Dur39DNJuJvPQSArKyFB3XVc7OPZszExdj7/Lgs2ziUkmmYM+nMO8Wz41zKOUbJE2/oMgBx7ELGFJTCZ43j+CZh6d4BumCuGrSVe5YqaKiKLl9kbaljV1Y7SKZ4ZkI+duxPXE3tamp5Hy12vXOK36EVxdBYLgke+ODtOtBsfRIBQdBcWmELpOV6jYpW0iWEHKUqjvuxFRSTNIjjxA81/mIR2tLC/vPOx9rfT0533+HLlr5gplHYqQUIZPJCM+kpAFquyscaxAQAked5VGbfEriVI9JAjlC2OLFBxUbM06fjjY8HG2UfzlGo4Oi+cOsP9Br6/XrDXaVkY0aaavic6bETuGfi/7Jb6a4twi3lm5DbKmSql2OADJjpIlbY6eJth6L18fvXLOG8quvoeX9913uQ7RaCTpmBiELF3jVYQtQ2i5NBlNC0unotaERICNGmUJAKv6DYDCQ+uIL5KxdozpsRwiT4nIQLVHsb1JY0kJeSFdshs4GZft2hQlnQOYJMM4zBbgOLUIG0LZiBS3/+x/2Tu9maAxF83/fpGDWLBqWLfPcIOPPAASo+kmSpfAWcjTg+CVSQTwXCT/jDNJefYWI885TyDAVFc+RHBFEsEGLxSZS1tRFRlgGRUkC5ohgAnJzEc1uyCQkz4DQJOhtg8KvlDPaXZpLpc2Z8FRJHkFBSvqCM2JCDEQYHSskBmA3m+nZsQNrdQ26hMThGwyCLjISbVQUGqNxUHkWTzLSIm0nxUrFxNqtVT62RGUwAsaPJ3fTRpKffMLXphxETFAMF46/kCuOGoW6xip+gxppqzJqqL75OrpKO0i6cSnhN//N1+YMS2ignrjQAOo7TJQ0dDI9LdKr45uKiunasAF7Tw+R55/vUh/Go48m4803Ea3e1+GTd/Aj9MkAZEQHE6BTC1eNRoJn+79GtcoAsvRLsdLSL+Ep0mK6dgcUfgnTL1W2f2c59hrpR2H+vPHPhBhCyG/IA4T+SFvRbif+nt/Tm59PQI7/VKo2ZKSDzYapsgKLzYLeDefmEQmNh7Q50t++fq/iepODIorS5wxg4lkeG2Z3024MGgPpYekYtI47dFRUPIUgCOTEhfQVI+vkuinXcc3ka0i5JoUAdyNjNVqYdA5s/D/Y+b7PUq8PI34i3LEH2qtB4Wg5+VmY5aQ0gsZgIHfN93Rv/ZmALNejVZOffAJdfLxXgysOlDAbKTqfx6aMhz1g1TbSbTFh1DvwWe9uhi2vQut+/y+05wzb35W+B9knKyr/5Cgd336H8dhj0IYOZFCqUawqYxk10lbFb2g3t9NmanO5vaW5C+wCurQ8Ba3yLLJEQokPJBLCzvgVMTffTNLf3XdwCzrv7//IBQ50NqnCa7aqZ6ui4hdkxhgxxHzFHuvzdJoVdtzKWoOy9uAow2wz82Hhh7y+63XKmqTnghxpK2g0hJ95JvF3343G6D9ZBcaZM/n3X+Zy1qIdbKj2YEGWs1+Eu4shb/Hw1yqBIMBv18O5r0oR1S5i7+oacmPzr5v/yjnLz+Hb8m9dHkNFRWly4yVnSUFdB5nhmWRHZLvvsJWZ3BcoULAKetuV6VMJBAHCkxXvVnbaulJ3QRMURMj8eW6Nb0hP93o23IESZnHGOK+O7SqT4lIR7XoEwc7WymLHG373CPzypn9kACnF2ifgo+ug8ievD20uL6fyxhspPP4E7D09g14j2mxeturIfF/xPTsadmCymXxtisooRnXaqvgFj2x6hHlvz+O9/Pdc68DSS9bianLOqCNo7iJljfMg/cXIGr2f6qqPiyP25pswpLoWsdT9yy/Y3UmRcxNZHsHcI+0A56pO21FNx9dfU33f/fTs3OlrU1SGITcuFH3kZqxB29jX5MTCxxHG9enHFn8L5m5l+3YUmwV+/g901CnedXl7OXbRjlEXjMkUjEGnISXSfxy0g6EJCMAWF4ldtLtdUHRIIjNA713HA4FhMPk80LkeAdv48svkH3MsjS/9c9D3R1oKscrYQJ5TyTItB+J2dlXiVIjOBWvvgDa4LzF1gt1zTiDZaetsETJPYKmu9so4meGZfHfBd7x26msjJkJSp9VisCcA8FN1vmONjFEQd5R0Xr7RQ5Z5mc56aCoEBEib5fXhrQ0NGLKyCJo69bCi2ebycvZf+GtKTveMLJWziKLIPWvv4ZLPL6GqQ5XVUPEcqtNWxS+IDYoFoKHHxV3K5hIEQUQfFYIm2gtpkwrRX4ysfmQVI7O2tFB22eUUHXc81qYmn9hwQsoJnJR6Ei2tUlEFVyIYVEYO7Z9/QdvHH9P5/Rpfm6IyDBFGA1prPABbqhxc+DhKwmRJc9DaA6U++izsXwfLfwcvzQe7Xdmu+xx4MQEpgEB2bAhajbTg7dm5UyrK50cRJjJy+qtsv0cRRf+KzhsGU0EhYm8v2ojww95r6W2h1dQKQFpompctU1E5Mnl9kbaFdR0AvL7rdR5dcSeF55xN4YknIbpTP0IQBqJtd7peV0Ex1j4BT+bCT695pHt5ju9MEbKuH3+k/NrraFu+XBEbRKuV8muvo2jBQkwlJYr0ORSCIBATFMNR0Ud5fCwliTKkYTfFUNPmxLosva/oapkHM028ifz/iJ8k6Tx7GeOMGWR/vpLU5//vsPd00dH07NqFuawMS53yG+fO0m3tZnLMZBKCE0gNHTn+B5WRh+q0VfELLppwERsv2sj9s+53rYOmPnH9mBzFtag8iS8jbWV6du2m9s9/ofvnXxxuYy7djy4qCn1Kilcr0R7Ib6b8hmdPfpaKOmlhkRsXOkwLlZFM2K+WEH3tNYSccLyvTVFxgHBdEgB7GoqU7VgQYPyvIHU2aDygneoI+V9Ix7zFoFF2GiVHqgYiRfscuBlV89AfKTntdDq/+07RMZUgixiuW2VjzqOfu+fMGY6yjfB/x8I7F3tuDIDKrfDP42HTi253lfJ/z5H1+UpCFx8u6yA7uRODEzHq/TuiWmVskRsv3XtKG7uw2Oy8m/8u7zd8iaWoGFtjI+b9+90bYHJfUb7aHWAZPAXaa+R/Dt1NEBCmeNc2u0hpo+y0dTy4oOOrr+lat46uTZsVsUPQ6RAMBhAEurd4P+V9pHB6/O10ldyF0O1EMbr0udKxbL1njPI2stNWdkb7CE3w4ZscmuBgUp57juyvVqOL873sRrA+mFdOeYWvzvvKM3r+Kip9qIXIVPyCMIN7E6X2L1bR/XMYoSGxOL6P7XvkCdz+xm5sdrE/osqbtLzzNm0ffIjd1Ivx6OkOtTEePZ2c777F2uBb/abmLjNNXZJEQ3bcSPrLqzhL6MknE3ryyb42Q8VBEo1ptJo9FHl56uO+25wTRWmBDzBO+fQ8+fdlN0uLkf4iZKKIxmhECAwkYPx4xcd1l/T4PFJ2iOhtHZj37ycg0/WiOUMSliht0jYXS/qBIbGeGWfPJ1CzHaJzgBvc6krQaAjIyhr0PdlJP1IK9aiMHZLCgzAatHSbbZQ1dXHBuAsw28wEPZVA8oRj0ScnuTdAdDZc8RmkznJLfsRtGougsUDaBMxVXl6tsqUbs81OgE5DckTQ8A36iLrkYnRxsRinOzYvd4S4u+4k/v77MaQor9t7KE9vfRqAC/IuICU0xePjKUVOX4S5U4VU0/qctrU7obcNAg/PqhhR9Dtt53p9aFtnF5pg45CSGqEnn+RFi1RU/APVaasyKuj8aQ9tBSHoJupHlNM2OSKIAJ0Gk9VOZUs36dHetz7inHMRe02En3GmU+0ErRZ9QoKHrBqaxp5GAIrrpId6ckQQRoN6O1NR8RdyI7PYWwcNvRXKd+7LbIrandBWAbogyDpR8e7lAosd7RHAAUXIBIGMt96UpBEUju5VgozYcTx8kob6CPhHRBAKlSs6nMgMSJwGNdtg32dwzFXKjyGKsOdT6XziUuX7PwD5763q2ar4GxqNQG5cCNsr2yio6+TqyVcrP0jmccr36SzyJlzGfI8424r6NIGzYkPQOBGYYcjIIOa66xS15UibR0ojiiIfFnxIu7mdJZn+oT3qKFkx0jqs2JkMyLBEiMqC5hIo3+y9YpmeoKcF6nZJ52ned9rW3HcfvXv2kPDQg4Sc4HoBUG9htVvRadT1p4rn8fnMv6enh+7ugWIiZWVlLFu2jNWrV/vQKhVf8OL2F7l29bXsaNjhdNvQU5cQdVIuxhNO8YBlnkOjEciUJwjO7OoqiPHo6SQ/+QTBsx0Tm7e2tHjYouH5965/c9J7J/Hy7meBgTQ+ldGNKIr05hfQ9eOPvjZFZRimxOcC0CXWYfNUgZfuZqje5pm+j4S8wM8+GQzKprOLotgfeVnbKGWfHKrVLWi1flnUJVgfzNbjE9iaq6Hc6mGdOdmRulcZrcfDqN0BrWWSYz5noVtdtbz7HnV/f4KeXbsHfV8uqJkZ7qHIZBUVN8jtizos6NO19RiiCFYfVV7vz5w43SPdDxQh86+QEltnl8ekbOyinZum3cRF4y8iPSzdI2N4irToAIwZz2NJfoCyFieyCdPngiEEOmo8Z5w3qN4GiFKWSWi8V4cWbTa6t27FUlWFNurI0nuiKNK2ciW1f3kE0AI6BQABAABJREFUe5dva8Lc/O3NLHhvAWsq1HobKp7F507bpUuX8p///AeA1tZWZs2axVNPPcXSpUt58UX3tcRURg7bG7azuWYz+S3OF64JveL3xL+4HONpl3nAMs8iSySUNPh/MTLRZqP0rLMpPedczBUeiKBzkA5LBxpBg7k3AoAcP6jIq+J5OlZ/RenSpdQ98qivTVEZhmNSshHtWhAsVHV4oFp18XfwRA58pGwk0rD0L/BPU7zrpt4mOiwdCAh0dkWg1Qhk+CD7wlVkx6McPeoxZKdt6VrJca80cpRt7iIwuPf7b1+5kubXXsOUP/i8pj/SVpVHUPFDZHmWwvpObHYb5e3lbKrcQNvy5dQ+8ih2kwKO1l/egmemwIbn3O/LWboaoaJPM9YD93Q4sAiZ4/PUpldfo2vzj4hWq0dsqnviCYqOP57uLVs80r9Wo+XiCRdz/6z7CdQFemQMTxEWGIjO0IGg7WVTxV7HGy5+BO4pgxlXeM44b5B9EtxZAOe87PWhBa2WnK+/IuXFFwicdOQCdoIgUP/UU7S89RY9O5wP9FKS/W37qe+pJ1g/cuZqKiMTnzttf/75Z447TkqP+eCDD4iPj6esrIz//Oc/PPvssz62TsWbZIZ5acHnZ8jFyHwVaStjbWqi+T//wVJXf8RrTAUFWJubMVdVoYv37g7sgTw892G2XLIFW/sMQI20HSsYjz0GwWhEFx+PaDb72hyVIUiPCkG0xADwc22B8gMkHw2CRtIibFS42NmR6GqE2l2AAHmnKt69/OyLCkgAUU96tBGDTpqmVd97H+XXXkfPtm2Kj6sUGeEZJDSLWD5aibmyynMDRWdLVa3t1oGicEqhsDRCxIUXEHHRrwkaRC/eYrdQ2VEJqJG2Kv5JXl+kbWFdB829zSz5eAnXf/Nbav/6V1refJPePXvcH0S0Q2s57Hxf+v55k4IvpfETpkCEZyq/y3P7Q7MmjoSltpb6J56g/MorsbW2esQme0cn9u5uOr7+2iP9j3SyxN/QVXIb9l4ntHiDIkE7StLkQ+OlOZYP0BiNhJ500rAZRRFnnUXkpZei9VExbIBeay/VnVJQgvoMV/E0Pr+7dHd3ExoqTQpWr17NOeecg0ajYfbs2ZSVlfnYOhVv0h+l42ThGltFPrbqfPQT5yCEeqgoiQeRd9+LfRxpW3Xb7XRv2YJoNhN97bWDXhM4YQK5a9dgKihEY/Bh4QjAoDVQUm8BHJ8Mq4xsdFFRjNu8CUGvVmj1d3RaDUEkYKKObbWFnDVugbIDBIZLGoQl30H+Soi5Vdn+ByM4Bu4ugsotHimAJafKh2qlAj+5B9zXujZuxFpXR8yNNyo+rlJkhmeS+6Wdo/b/QGfS90RdeonnBpu4VNLd2/MpTFdwnPq90FQE2gDIc19uKXzJEsKXDK7pWNlRiVW0EqQLIs7o+yrYKiqHIm+IlzZ2EW6IIkQfQqelE5acTJQmGG24AhqwE86AlXdAwz6o2w0Jk9zv01FSjoH5d3jMYQsHyiM4Nk8VLRbCzz4bW1sbupgYj9gUffVVhJ1+GsZZjsmiOcuepj3Y7DayI7Ix6pWVEfIGU2Kms62wjP1NLkaS2+1+qT0/moi95RZfm0BZexkiIqGGUKICo3xtjsoox+d3lJycHD755BMqKir48ssvWbxYEu+ur68nLCzMx9apeBPZaStr+jlK+/+ep/iKe6i8ZGSJ3cvIkba+lkcIO+NXBE6Zgj556KqyushIgmfN9JJVR6aj10JNWy8AObGhPrZGxVuoDtuRQ0yAtBAubC72zADj++75+z73TP+DYYxSxJk3GHKkrcYqOfAO3IxKeWYZCQ8/TOC4PI+MrQQZYRlsyxIozgpCF+Ph6JeJZ8H0S2HW9cr3fdTZMPFMCPDsc0We62SEZaARfD4dV1E5jKTwIIwGLRabSHlzd7+MR8WlJxJ/333KFLYKioDcvsJNO993vz9niB0HC/8Ix3igyBrQ3GWmpduCINBfv2I4DKmpJD3+GKkvPO8Rm0AqchY8e7bH9NFf3PYiF39+McuLPaQ77mFclq3b+gY8ezSs/bsHrPIC+9fDf8+BLa96feienbso/81vaP3kE6+P7SpykFlmWKZf1hpQGV34PNL2oYce4uKLL+b2229nwYIFzJkzB5CibqdPPzydzBGef/55nnjiCWpra5k6dSrPPfccM2ce2cnU2trKAw88wEcffURzczPp6eksW7aM00/3jCi9yuDIk8GqzirMNjMGrWORnLaGKgSNiCFxZEaqZPVNDho7TbT1WAgP8o1TKuL884m84IIjvi+Kol88lNZXreel7S+RHXIMkENsaADhRtWRN9awm80Ier1ffCZVBic9LIOqdqjuKvfMAHmnwud3SZqEXY1SJOwIRnbidXdJERsHOm2Dpk0jaNo0X5jlMBnhGXw2S8OqOSJbFrtXwGtYYvNgqQecGvET4fx/K9JVb34+2rAwdAkJg96n5AWfqmer4q9oNAK5cSFsr2yjoK6TjPAMdjXtcjojblgmnw/7PoNdH8GCP46aKEU5yjY5Ioggg9bH1gyOaLeDKCJolbOv/94WnqFYn94kNsKCPmoNO7pswDGON7RbobkYytZ7zDaPUroGir8BYzQce41Xh+74+mu61v6AxhhMxFlnDXqNKIpYrVZsNlv/vy3V1WhCQtApEfXvJLWttSQaEpkUMYne3l6vj68yMtBqteh0OrfXqz532p533nnMnz+fmpoapk6d2v/6ggULOPvss53u79133+WOO+7gpZdeYtasWSxbtoxTTjmF/Px84uIOd+qZzWYWLVpEXFwcH3zwAcnJyZSVlREREeHOf0vFBWKCYgjWB9Nl6aKio4LsiGzH2k0TiDbWIC6+3cMWeoaQAB3xYQHUtZsoaehkelqkT+wY7mZScf31aENCiL3lFgwZGd4xahD2Ne9jW8M2rOYIIOegFGKV0Y8oilT+9ga6Nm0ia8VyDGlpvjZJ5QhMjMlhQzu0Wj2kbxqRKmkR1u6AglVS5KWn2P0JbH4Jjr4cpl3skSGOSzmOEH0IX22S5iq5cSMrgyAxOJEAbQAmm4nqzmrSwsb2d7P2Tw/T88svJD315KASCa29rWgEjaqFp+LX5MaHsr2yjcK6TjLjBjLiRFHEUl6OLjYWjdHNFPi8U8AQCm3lUPkjpM1WwPJh2Pa2FOWbdSLogzwyRHG9c9II5ooKhIAA9IOsVz1B60cf0/TPfxJzy++OKOPiLBabhYoOqVCxXKtkpJEYoSMw/gu6RC3dFhNGfYBjDdPnSceKLWA1g863MnJOU7ZBOqbP9frQEWefhSY4mMCJEwd932w2U1NTQ3d3d/9r1uZmxN5etOHhaJo9UJR0GHLFXO7JuYdQQyilpc5lCauMLYxGI4mJiRjckJb0udO2ra0Ng8FwWFRtTk4OOp3z5j399NNcd911XHXVVQC89NJLrFy5ktdee4177733sOtfe+01mpub2bBhA/q+tNsMHzqkxjKCIJAZlsmupl2UtpU67LSlqRBBA0LKkStN+jvZsSHUtZsobujymdNWRrRa6dqwgeA5c/pT0S01NXSt/QEEgbi77vKpfXI0mmCVHRuq03YsIQgCtq5ORJOJ7i1bVKetH3NMch4vbp+IjkTsot0zKeDjl0hO232fe9Zpu3cFlG+EVM9Jw1w0/iJOST2X9774ChiQzulcswaAoKlT0frxhrJG0PDsyc8SFxRHQmAs1qYmdJ4sEiKKUP0z7FkOx98NAW4+C8o2SsVk4sYrYJoo2afVEjhh8EXoHcfcwc3Tb8Zit7g9noqKp5DnWAX1HSzNyQAkKZeyiy6mZ9s2Ul58gdCTTnJvEH2QpG27/X+SRIKnnbZ2O3z1IHQ1wGUfQ/bJHhmmyEmnbcOzz9G+YgXx999H1OWXe8SmA7FUV2MuK6Pto48Vc9pWdFZgE20YdcYRq9U9OT4N0a5H0FjYWlnMcZmD38MPI3acFKXa3QQ12zw6X1Acq0nS64cB57MXMWRkEPOb6wZ9z263U1pailarJSkpCYPBgCAIWMLCsDU3o42MRO8h/eeh0LZrMdlMJAYnEmJQ16IqhyOKImazmYaGBkpLS8nNzUXjYiaJz522v/71rznjjDO48ZDiGu+99x7Lly/n888d16ozm81s3bqV++67r/81jUbDwoUL2bhx46Btli9fzpw5c7jpppv49NNPiY2N5eKLL+aee+5Be4RUEZPJhMk0IE7e3t7usI0qQ+N06pWlF1r6CtbF+K/W33BkxQazobiJkr5UKl9SesEFmPbsJeWF5wk9WZrI6hISyPjwA3p+/gV9UpJP7ZM/G92dh6cQq4wN4n//ezRGI4ZsBzd2VHzCpMQEeisvpxfoMtkIDfSA03bSeRCaAHmnKd+3jM0ChZIjlXGe1U6XF/kpkUEYDdIUreH/nqd3506Sly0j7FTP6OkqxdykubSvWkXp/RdinD3bo7qMAHx4LTSXQOJUmHSOe319cTfU7oRzX4XJ57nVlSAIZLzzNvbeXoQhIisMWoPDUlAqKr4gL16K+C+s6+hPdy9tK8WQeQK9e/ZgratTZqBpF4MuAKZcqEx/Q1H1k+SwDQiD9PkeG6a/CFmcY3q2tpYWAAKP8k4QSsQF56OLiSb8jDMU67Nfqzs8Y8TKV+m0Wgz2eCyaSrbWFDjutBUESJsjSX2UrR9ZTtvqX8DaC8YYiMn1tTUHYTabsdvtpKamYjwgqj8gIQESExF8IKciiiI2rQ2NRkOIMYRAXaDXbVAZGQQFBaHX6ykrK8NsNhMY6NpnxeeiQZs3b+akQXZoTzzxRDZv3uxUX42NjdhsNuLj4w96PT4+ntra2kHblJSU8MEHH2Cz2fj888958MEHeeqpp3jkkUeOOM7jjz9OeHh4/09qqueqjo41ZG03R4uRmbavo2JtJA37YiBY+Wre3kLehS/2A6dt8KzZaCMjsbW09r8mCAJBRx1F1GUejGRzAFEU+z8bDS2SflHOCEshVnGfoClTCMjJGbELgrFCeJCemBAprbC00UOFFmNyYMaVEBo/7KUuU7YeTG3SYibFCX07J2jsaaSio4KCujbg4M2ogHF5BOTmEODHRcgORJ+cjL27G1NhoRRx6ikEASacKZ3v+dS9vppLJIetoIUsN6MGD0ATGOiTBaWKilLkxkv3otLGLpKCUxEQ6LB0oLvlWsb9tIXIX/9amYEyj4MzlnnH0bVvpXTMXeTRFPbivkJWOQ5G2qa98jI5a9d4Tb9cHxdH5K9/jSbYMaeyI8gFNUe6VneEQSrKvLexyLmGsrSALDUwUpB1eNPnSs9WL9L81lt0b9mCaLUOed2hEYqCVuuz56vVbsUu2gHUjVeVYXE1uvagPhSwwy1MJhPWQb6kFouFnp4ej49vt9uJi4vjX//6FzNmzODCCy/kgQce4KWXXjpim/vuu4+2trb+n4qKCo/bOVaQtd3kh/5wmLZtorM6kM7aYK8/ZJQky9VKpR4g5sYbyF27hohz3Yxa8gAtphbaze0ICFQ3SJNMNdJWRcV/yYw1Iuja+amq2NemuM6+voyfcaeCxjPFZD4p+oTTPzqdd0qfBA5e5Cc98ghZK1YQkOn/+oC1XbW8bd/Mtr9fSvaXqzy/sTJxqXQsXA3m7qGvHYo9fVXOM4+DYA9KOvSxq3EXl35+KU9vfdrjY6mouENSeBBGgxaLTaS21UpSiJRtVS60DBlF7tfky/d0zxWc7rXYqGiR7knZTsxT9XFxihYFcwYlNtlGehEymWRjOgDlHWXONZSdtuWbwG5T2CoP4iM9W2tLC3WPPkbZZZdjOUKAnT9itpkByWHrEekvFZVD8PmnbObMmfzrX/867PWXXnqJGTNmONVXTEwMWq2WukNSderq6khISBi0TWJiInl5eQdJIUyYMIHa2lrMZvOgbQICAggLCzvoR0UZ+lOv2ksdmjwEzllI/OUnE3XerzxsmWfJipEckGVN3Vhtdp/aog0NRdDrEW02ujb/SOWtt1J1zz2YKyt9ahcMOPNjAuMR7QYijHpiQkbookHFLXp27qLub3+nbflyX5uiMgTa8PWE5D7GR/uPvBHqNuZu2PxPePcy5RdJonjAAt9z0ghdli70Gj2mHkn2RY5uG2k09Tbxj23P8FLPau9EwCRNh/A0sHRLVa9dRY7UlSN33aTihhup/N3vMJWUDPp+YUsh2xu2s69pnyLjqah4Co1GGNC1revsD64obfdA4R1RhIof4Yt73duEGYrGImgsAI0OchZ6Zgxgf1MXoihlnEQHDz1PFUUR0eY7B1/3Tz9Rfv31tPz3Tbf7krPhRnqBxdwoyf4mk5Nrn/jJ0nNp0jlg9n32pMPog0Af7HWnrdjdTfgZZ2CcNQtDSorT7W2dnZjLyrAoJdPiICa7JJOpRtm6T0ZGBsuWLfO1GX6Pz522jzzyCK+88grHH388Dz/8MA8//DDHH388r732Go899phTfRkMBmbMmME33wxM3O12O9988w1z5swZtM28efMoKirCbh9wlBUUFLhd4U3FNdLD0qXUK3MHTb1Nw15vOGo2Ufc/T/jNzn1W/I3kiCACdBrMNjuVLZ6PMB+O9tWrKVqwkPIrrqDjy9W0f7qc/edfQPvq1T61S54MhuukSI+c2BA1RX6M0vPzVppff5225St8bYrKEGRHpiOKAh0mD97XNDr49hHYuxyqtirbd90uaKsAXZBUZdxD3Hr0rWy5ZAudddKCSc4g8OVC3hUywzI5LfM0fj3+19i8EWUkCDDRTYmE1nKpoBmCVAzJTexmM53r1tHx1ddHjESckzSHJ45/gssmXub2eCoqnkaWoSqs6+xPe9/ftp/Wjz6m/Lrf0L7qS+UG+/Ba2PwiFHyhXJ8HIm/CZcyHoAjPjAEU10uZc9mxwcPOU00FBRTOP46ahx/2mD1Djl9UTNeatbT8739uRdseKGGWGTaynbbTE8YB0EONcw21OvjN93DGMxAYrrxhnuLCN+HeMsnp7EX0yckk/e2vpL/xb9c6sNmwdXRg73DMQW6zi2wsbuLTbVVsLG7CZnft826ySU7bAG2AQ9dv3LgRrVbLEoUK/vkD33//PYIg9P/ExsZy+umns3PnTl+bNirxudN23rx5bNy4kdTUVN577z1WrFhBTk4OO3bs4LjjjnO6vzvuuIOXX36ZN954g71793LDDTfQ1dXFVVddBcDll19+UKGyG264gebmZm699VYKCgpYuXIljz32GDfddJNi/0cVxwnQBnDVpKu465i70Gv0vjbHa2g0Apl90bYljb7dmW1fvZqqW2/Dekiaiq21lapbb/Op41ZOu9Lbpcj5kRqNpuI+wccdR/h55xJxwfm+NkVlCE5MnUdn/l8Iav6N5wbRGSRtQhjQKlQK0S5F2I4/HQzG4a93gx6LSE2btIGcEys5SSpvvZWixafQ8d13Hh1bKYx6I38//u9cn3sFTc88R9kVVyJaLJ4dVJZIyF8lVcB2lr19Gz/p8yDE/WrngkZD+uuvEX///eiTkwe9JiE4gVMzT+W4FOfnuSoq3iavb65VUN8xEGnbVoqpoICuH36ga/MmZQYSBJjc90zf+YEyfR5KbZ9DwcNFJfuLkDmgZ9v5/RpsLS1Ya70bLSgTfsaviLrmalL/+ZJbgRCyhBlAWliaUub5hLnp46UTbRdlLQ2+NcZbaPUwwjTYNUYj+oQE9MnDF8letauG+X/7lote3sSt72zjopc3Mf9v37Jql5OOeSAmMIa0sDQiAiIcuv7VV1/ld7/7HWvXrqW6utrp8XzJkTLPZfLz86mpqeHLL7/EZDKxZMmSYduoOI9ffDOnTZvGW2+9xe7du/npp5947bXXyM11rXLhhRdeyJNPPslDDz3EtGnT2LZtG6tWreovTlZeXk5NzcCXMzU1lS+//JItW7YwZcoUbrnlFm699VbuvfdeRf5vKs5z+4zbueKoKwgPGHqHUrTb6XrzMSy/rEa0enhR6AVkzSt5d94XiDYbdY89LqWoHfam9FrdY4/7LPpLlkcw90iag45MhlVGJwFZWSQ98ghhixf72hSVIRgXHwmijpLGLpcjGhwbqE+bUI6iUorEqXDR/+DcV5XtdxCK66VFfmxoAOFGadPStHcflvJyNEblCsV4AyEwkJZ33qF782Z6d+/27GDJx0BoklR5vsnJojEAhV9Jx4nKSCMIOh3GY44h6vLL1EwQlVFBXrwcaTvgtO20dBJ22qnE/+EPRF50kXKDyU7bwq+gu1m5fmXOfRlu/gkmn6d83wdQ1Hc/d0TPNvrqq0h74w1irvfg5uYQaIKDib/7bgzp6W71I8/Rk4KTCNIFKWCZ74g2hiLYIgDYVLHX+Q5sFqjcOvh6yt8wdfhkWEt1NdbGRrf6EPR6dDExaIKG/ryt2lXDDW/+TE1b70Gv17b1csObPzvtuNVr9YQaQgnUBQ57bWdnJ++++y433HADS5Ys4d///vdB78sRq9988w3HHHMMRqORuXPnkp+f33/N9u3bOemkkwgNDSUsLIwZM2bw008/IYoisbGxfPDBwCbXtGnTSExM7P/3unXrCAgIoLtbkpxpbW3l2muvJTY2lrCwME4++WS2b9/ef/2f/vQnpk2bxiuvvEJmZiaBgUP/H+Pi4khISODoo4/mtttuo6Kign37BqSf1q1bx3HHHUdQUBCpqanccsstdHUd2dcxlH0FBQUIgnBQ/wD/+Mc/yM7OBsBms3HNNdeQmZlJUFAQ48aN45lnnjno+iuvvJKzzjqLJ598ksTERKKjo7npppuwHBBkYDKZuOeee0hNTSUgIICcnBxeffVVRFEkJyeHJ5988qA+t23bhiAIFBW5MA91AJ1Heh2G9vb2fh3Y9vb2Ia91RS/25ptv5uabbx70ve+///6w1+bMmcOmTQrtEqt4DWvZXsof+S8I/2Hc1i0IupEdmZvtB5G23T9tPSzC9iBEEWttLd0/bSV4lhcq/B6CrKHW3BoBQG7fQkJFRcU/SY4MwqDTYLbaqW7tITXKQ9GquYskmYTGAkmzMCZH2f496HzbVr+NxzY/RrgwHph1UBGyjPffw1RQQNCkozw2vtLY7Daqu6rRXnMR8dGp6N10BAyLRgNXrZS0bbUuTGsvfhdKvpd0CL2AxW7hf3v/R2Z4JvOS5qH1UHE7FRWlkOVaShu7OCpqNpsu3kSwXpqzBk2dquxgceOlFO26nZLkzYwrle0fIMa1wCBncCbSVtDrfTKnVprRUoRMJlhIpJNWttcVciHHO97QboMn86CnGW7aArF5njPSXUQRnpkKQZFw6YcQmeG1oRv/+S9a33uP2DtuJ+a665xqK4oiPRbHAohsdpE/Lt/NYO5zERCAPy3fw7ycGLSa4ed6QXqtUxuy7733HuPHj2fcuHFceuml3Hbbbdx3332H9fHAAw/w1FNPERsby29/+1uuvvpq1q9fD8All1zC9OnTefHFF9FqtWzbtg29Xo8gCBx//PF8//33nHfeebS0tLB3716CgoLYt28f48ePZ82aNRx77LEYjdL8+/zzzycoKIgvvviC8PBw/vnPf7JgwQIKCgqIipLqKhQVFfHhhx/y0UcfHVT3aSja2tp45513APolRouLizn11FN55JFHeO2112hoaOj3073++uuD9jOUfXl5eRxzzDG89dZb/OUvf+lv89Zbb3HxxRcDkjRqSkoK77//PtHR0WzYsIHf/OY3JCYmcsEFF/S3+e6770hMTOS7776jqKiICy+8kGnTpnFd32fx8ssvZ+PGjTz77LNMnTqV0tJSGhsbEQSBq6++mtdff5277rqrv7/XX3+d44//f/bOO76p6v3j75vVkXTvlg662HsJyEYBFcUvIiDKUHGhfkH5ugf6cytOFDeKC7coIojIBpmyZ0t3S/demb8/0gQK3U1y0/a+efWV9N5zz3lSkptznvM8n2cksbE2Xn/UIIrT1sfHh6ysLAIDA/H29q7zjW8ymRAEAUMb03OTaD1ag5ak4iTKdeX0D+pfbztD8hFUHnqQK5G5t33nnTNE2upzm5YC1NR2tkRn0JFeai4IkJln/v+ObUZFXon2iS47h+qTJ9CMGiW2KRJ1IJcJBIbuo0D4h5VHc3h85Dz7DOTqZdYoPLvZHG3rf3/r+zx3FFRq8LWvNl9CUQInCk4QonIBhtSSfVH4+qK47DK7jm9rVp1axUt7XmJc3DjeHPNfxwzqG93yaxUuED/BZqYU/fADyk7huPXvh6wOTdv00nRe2/cabgo3/rlJChiQcH7CvN1wV8mp0BrILNJaNW7tRq8bzE7bIz/Y1mlrNIADNkmMRhNnc89r2rYVtOnpFH7xBXJvb/zvvrvZ15dUl6CSqdp8ETILga7hlGlPkFjUzKJ7MjkE9YDkbZC607mdtrmnoCLfXPjPo3GJAVuiz80FkwnXrt2afW2lzkD3p2yjpW0CzpVU0WtJ0+T/Dj49Fq2pFDeFGx6qxu+Fn3zyCTfffDMAEydOpLi4mC1btjB69Oha7Z5//nlG1axlHnnkEa6++mqqqqpwdXUlNTWV//3vf3TtapbtuDAjffTo0XzwwQcAbN26lX79+hEcHMzmzZvp2rUrmzdvtva7fft29uzZQ05ODi4uZj3e1157jV9++YUffviBO+4wR/trtVpWrlxJQEBAo6+vU00BOUv07LXXXmu188UXX2TWrFksXLjQavfbb7/NqFGjWL58+SVRvE2xb9asWSxbtszqtD19+jT79+/nyy/NhRSVSiXPXKAP3rlzZ3bt2sV3331Xy2nr4+PDsmXLkMvldO3alauvvpqNGzcyf/58Tp8+zXfffceGDRsYP95csDI6+vw8c+7cuTz11FPs2bOHwYMHo9Pp+Prrry+JvrUlosgj/P3331ZP/qZNm/j7778v+bEcl+h4HMo9xA2/3cATO55osJ2rRwUxV+cQfW93B1lmX6L9zQt1MSNtFU24OTennS1JK0vDYDLgKndHr/VArZIT6tV4WopE+0WXnUPCqFGk3bMAQ1kbqtLbwfDQVCB3T+VY/nH7DmTRKLSVRMKGp+DtvrCv7mgAW2Ep3GLSmu+rbX0z6sJCRQ7HaISqhjO47Dp8RQVZTz1N6ty5GAqL6mxj+btEeUYhE5xCpUxCokFkMoG4mvvSmeza37WG4mLKtm2j8uBB2w3Yc6r5MXk7FGfYpk9dJSztCt/Nhqpi2/RZD1klVVTqDCjlAhGNZJdkv/Iq+R9/3Oo0cVtQffo0BZ+vpOCzzzFWVTV+wUXM7TmXPbP2cH8/G2yaOgGW77KsitTmXxxpLipKyk7bGWQPUsyRnIQPMtcHcCDh771L7JbNbS7KvFJfSW5FLrkVjQcwnTp1ij179jCzRkJGoVAwffp0PvnkUsmt3r17W59b5A1ycnIAc82m22+/nfHjx/PSSy+RmJhobTtq1CiOHz9Obm6u1Rk8evRoNm/ejE6nY+fOnVYH8aFDhygrK8PPzw+NRmP9SUpKqtVnZGRkkxy2ANu2bWP//v189tlnxMfH8/7771vPHTp0iM8++6zWWBMmTMBoNJKUdOlmSFPsmzFjBsnJydYs+a+++or+/ftbHcUA7777LgMGDCAgIACNRsOHH35Iamrtz3GPHj1qRRGHhIRY/94HDx5ELpdbnd0XExoaytVXX82nn34KwG+//UZ1dTXTptmvzoookbaWP4Ber2fLli3ceuutVi+9hESUZxQeKg8C3AIwmoz1L2ryzgAg+DvxDmYz6FyzG59XpqW4QmfVNHQk7gMHoAgORp+dXbcOkyCgCArCfeAAh9vm7eLN40Me59/0bL49KhAbqJH0Ajs4yqBAVJ07I1Or0efkINe0bWdXeyXSM4r0Ysgsb8HCpzl0mQR/Pg4uHmDQtyxV3kJVCSRtNT+Putw29tWDJaW0tMQHwCqPUPzrrxiKitGMGtlqrUFHYomySilNoSovB+2+A7h06YJLZztHXx39CdY/ZpbKuPadxtuX5cBnV0PXa2DskzYpwGIoLcNj/Hh0586hDKq7qJlF5qe9pBBLdAxiAz04lF7M6ewytG6r+S3xN8ZHjmf85mJy33wTz6smEda3r20G8w6HiGFQlAKFSeBVd0G/ZnF2C5TnmHVGXZovvdccLPrkUX5qFPL67yuGsjIKvvgCdDo0Y8ei8Pe3q12NoRk1Cq/rr8dz0kSEOrIEmoJcJsddZt+inY6iX3BP/kyOQyu0wEfRVpy2qbvMj5HDRRleWVNzqLm4KeUcf9acIWMymdAmJiG4uqAMDkJQ1J777UkqYO6KvY32+dm8QQzu7NtoOxPVGPFGJW/8M/LJJ5+g1+sJDT0fxWwymXBxcWHZsmV4eZ2v36NUnl/3W9a3RqO5OO2SJUu46aab+P333/njjz94+umnWbVqFddffz29evXC19eXLVu2sGXLFp5//nmCg4N5+eWX2bt3LzqdjmHDzO/HsrIyQkJC6pQL9fb2tj5Xq5ueIdC5c2e8vb3p0qULOTk5TJ8+na1bt1rHu/POO7n//ks3ciIiLi1W2BT7goODGTt2LF9//TWXXXYZX3/9NXdfkBmwatUqFi9ezNKlSxk6dCgeHh68+uqr7N69u1Z/F/69wfw3t/y93RrRSAa4/fbbueWWW3jjjTdYsWIF06dPt0pQ2ANRnLbWwRUKXn31VWbPni2mGRJOhr+bPztm7GjcIVfjtLW5dqFIaFwUBHu6cq6kisS8MvpH+DjcBkEuJ+ixR8n470KzhuOFjtua/4+gxx5FaKK+jS3xdfVlRtcZ5GacAU43qbiDRPun8+pf6kxBlnAeuvvHsKMYivU2ipiqD+9weDjZLGnQWhL+AqMO/OLsrn9oibzMKzJP3mNr5BEKv1lF5b//Ivf1bVNO22B1MK5yV6oMVaQueRLDX1vxv+9eAhYssO/A7r5QmgUn1sDVbzTutD+5xqyBfHYzjH/aJiYogwLp9PZbDbax/H939mwfKcQSHYP4mvvS6ZxS3AKz2X1uN0HqICb3nYwyMgJFcEgjPTSTGz8Hd3/bVbM/9bv5scsku2qUQ3P0bAWCHn2EqqPHUNl7U6sJCHI5oS++ILYZTsMVMZex5PsK9HIBncGIsgEH/CV0GmTW2S9Og6JU8L7UQSU6JhMk10TaRgx17NBGI0IrPtuCIOCuOv8dr+5efwDXiLgAQrxcOVdcVaeurQAEe7kyIi6gSZq2oEDdhHmmXq9n5cqVLF26lCsvKpo8ZcoUvvnmG+66664mjGcmPj6e+Ph4Fi1axMyZM1mxYgXXX389giAwYsQIVq9ezbFjx7j88stxd3enurqaDz74gIEDB1qdsP379+fcuXMoFAqioqKaPHZTWbBgAS+++CI///wz119/Pf379+f48eNN1nltqn2zZs3ioYceYubMmZw9e5YZM2ZYz+3YsYNhw4Zxzz33WI9dGEXcFHr16oXRaGTLli1WeYSLueqqq1Cr1Sxfvpx169ZZHdX2QvS8rLFjx7JlyxaxzZBwIgRBaFIEZernx0nf7oNW73jnpr2Irom2tWhhiYHnlVcS9tabKC7a/VQEBRH21pt4XvTF42gSaibDcfbWVJNoE0gOW+dnYGgXAPRCMWVaO8tY2MJhC+clFrpMsk1/9aA1aEkvM2t1G6oD8HRVEKAx63hpxo5BM24crt3blgSQTJAR4WleoBb26IRLfDxyHwd8T0deDm6+5uIvlpTPhji+2vzY/Vr72nUR7a1Yj0THIL6m8GtCdhmjw0fz3PDnmNNjDurLLiN2/XqCHvqfbQfUBNrOYWs0wql15uddr7JNnw1gddoGNvx9JNeo8b3pJkJfeL7NZ44lFydz4283smTnErFNsRkhnq64KeXoDCbSCiqad7FKDSF9zc+dNdq2KAVKM83O5U6DHDasNi2NM5ePIOuppzHVldVpY+Qygacnm+dRF3/KLL8/Pbl7Ex22TWfNmjUUFhZy22230bNnz1o/U6dOrVMioS4qKyu599572bx5MykpKezYsYO9e/fSrdt5LeDRo0fzzTff0LdvXzQaDTKZjJEjR/LVV1/VSvEfP348Q4cOZcqUKfz5558kJyezc+dOHn/8cfbt29fq1+zu7s78+fN5+mnz/+3DDz/Mzp07uffeezl48CBnzpxh9erV3HvvvXVe31T7/vOf/1BaWsrdd9/NmDFjakUyx8XFsW/fPtavX8/p06d58skn2bu38UjrC4mKimLOnDnceuut/PLLLyQlJbF582a+++47axu5XM7cuXN59NFHiYuLY+hQ+258iO60nTRpEo888giLFy/mm2++4ddff631I9Gxqe9mbiwrpjzNSGm6G7KgLg62yn5YduUtEz6x8LzySmI3/kXE558T+tprRHz+ObEb/xLVYbs1fStHco9wOrsQaPu6jxK2xWQ0YtLrxTZDog56hQZj1Js/r8fymrfb3WKKM0DXfE0+AAw6OFNTkKLr1bazqQ7SStMwmoy4yNwx6T2IC/KwLt79588n/N1luESLH4HVXCxagCdHhBP962p8a6r62hW54vz/l8UhWx8VBZC0zfy8m+2ctsbq6kbbWDSM20uxHomOgWXOdTavjM6esVwXex3xPg6QJzPooaCZhaAuJmOfWRrBxdO8uWNnLAWFG4+0dU6M5eUUfvMNuW83QWamhsTiRE4UnOBEwQk7WuZYZDKBzv5qkFVxLCu7+R04u0SCxa7Q/qBynKRF2aZNGAoK0Kak2HyzwlRPAfuJPUNYfnN/gi+qhRLs5crym/szsWfTMgVMJhNV+iqMJmOjbT/55BPGjx9fSwLBwtSpU9m3bx+HDx9utB+5XE5+fj6zZ88mPj6eG2+8kUmTJtUqtjVq1CgMBkOt4majR4++5JggCKxdu5aRI0cyb9484uPjmTFjBikpKQS1UKriYu69915OnDjB999/T+/evdmyZQunT59mxIgR9OvXj6eeeqqWk/VCmmqfh4cHkydP5tChQ8yaNatWH3feeSf/+c9/mD59OkOGDCE/P79W1G1TWb58OTfccAP33HMPXbt2Zf78+dZiaxZuu+02tFot8+bZqcjyBQgmR2xxNICsgV1UQRAw1PPhcyZKSkrw8vKiuLgYT0/76iR1FP5I+oNl/y5jYPBAnhn2zCXnTZXllP/6KdozJ/B5bFmrUiyciRU7knjmt+NM6BHEB7cMFNscp8FkMnH5qssp0ZagS1lIVUUwmxePJsq/7VTllbAfOW+9RdG33xH06CN4TZ4stjkSddDnwykYXRK5q/sTLBg03b6DrZplTn2fuaplkbJnN8PK68ypuYtP27Xa+MaUjSzcvBB/ZQxJh+czfWA4L9/Qu/ELnZxl/y7jg8MfMDVuKkuGLXHcwGf+gq+mgiYIHjhR///dgS/g13shuBfctd0mQxtKSjg9bDgu0dFEffctMtdLC2UWVhUy8tuRAOy+aTfuyvah/SjR/jEaTfRcsp4KrYG/HhhJbB3ZTiadDkFpw3oMGQfg6xvNOuX3HWi5rMFfS2D7G9DjPzDNvoUlAQY9/xe5pdWsXjCcPuHedbapTkhAm5qGetjQOu8VYlJ55AjJ025EUCqJ3bwJhZ9fo9cUVBVwKOcQMkHGqPC6i/e0Ra5ceS9Zpi0M85nDB9cubt7F6fsgaQvEjIPQvnaxr1Wk7YF/v4SArjC0+U6tlmLS6ajYvx9kMtSDGy9CVlVVRVJSEp07d8a1ns+KWdc2EWNVFS5duiCr5z5kMJrYk1RATmkVgR6uDO7s26wIW51Bx+nC0wiCQFffrlIx0Q7Otm3bGDduHGlpaQ06vRt6DzfVjyiqpi2cF1iWkLgQmSAjtTQVb1fvOs8Lbmo00+9zrFEO4HykrXjyCM5ItaGarr5dSSxMIrnSF5VCRngjFXklOg4mrRZDQQHlu3dLTlsnxUsRSiGJHM91QKStR03ExKm1LXPaWtJou0y0q8MWzhelkunNkz1LNJu+oAC5RtPiYjBiY0n9t0SVmkwmjGVlyD3sLGvTeSS4ekFZNqTtPh/pdDGWSNxu19ls6KqTJ0Gvx1heXq8TxiKNEKwOlhy2Em0KmUwgLlDDofRizmSXUUYiJwtOMjR0KP7Hszi35BmUoaFEfNq0lN8m4R8P2nIoz4XMAxDWwiK4J2vkbuycOQFQXKkjt9QccW+RPKuLwu++o3DlF3hPu4GQ//s/u9vVHNx69cLzmmtw6927yQ5lX1dfxkSMsbNljidIHURWGWSWtiDSttNA84+zEj7Y/ONgBKUS9WWX2bbPCzZ0TJWVUI/TVi4TGBrT+CZEfWgNWgCUMqXksO3AVFdXk5uby5IlS5g2bZrNopQbQvR328qVK6muI51Mq9WycuVKESyScAYsaYNJxUkO0btxFiwTvJT8cvQGaUPDgqvClU8mfMKTfb4Bk4pof7XNtYck2i4+06YR+eUXhDz1lNimSNRDqLtZ4zSlNNn+g1k0C0+tM2sZNpexT8CNX8Cg221rVx1YnJqV5eaKxZYiZOeWPMPJ/gMo+vEnu9tgDyxFtpJLkinfs4eE0WNIu+vuRq6yAQoVdKn5/69PIqGyyBxNDdDddk5b9eDBxG7ZQtibb9TbRipCJtGWsUTXns4u4/3D7/P87ufZe24vck9PtMnJVB45gsmWwTgumvOf5yM/tKwPowH6TIfwIRBbd0EZW3K2Rt4syNMFD9f6o44Vvr4oQkLQjHLOqNSw117Fd/YtyJpRRb49cnXkNEpPLcG19HqxTZFoBGWnTrh27YrcjlnP1Uazz0olb5sb6hK24ZtvviEyMpKioiJeeeUVh4wputN23rx5FBcXX3K8tLTUIfoQEs5JhEcEAgKl2lIKqgouOV/+7RuUr3oFQ+YZEayzH6FebrgqZWbR+8JKsc1xOhJyzJNhSc9W4kJUUVG4DxzYZqMSOwKxPtEA5Fal2X+wyMvN2oXlOWYtw+biojEXpwrtZ3vbLsLixMsvMmuexdZkW+gyMkCvRxFs/917e2CJtC2oKqDKT4M+O5uqEycwarX2H7zPTLjsHuh9Y93ndZXQ9ybz+yTAtpqcyqBA3Hr1qve8JbJaKkIm0RaJr9lUOp1Ten5jpjgZl/h4wj94n5g/19terqzXNPPj0R/NDtjmIpPDiAfhtj/BzdumptWFJVOusXmq/113Efv3RjRj2n50qslk4tOjn/Jn8p9UGxrX9W5L9AwJAaNryzMgy/PN791jP9vWsNaSexrS95s1/B057DvLyP/kE/S5uTbvW+bqiqCwbxK55f3tInex6zgSzs3cuXMxGAzs37+fsLAwh4wputPWZDLVKUKdnp5ep3CzRMfAVeFKqMYsUm1JJ7yQnOUrSF2ygvI/vnWwZfbFLHpfU+xB5GJkzoShZqJ+psZpG1eHlpqEhITz0ifY7ByrMGU3qYBDq1CoIO4K8/OTv9t3rFZgMpmsTjxdVQBuSjlh3m4ARP3wPbGb/sZ9QAvTgUVGrVQT6BYIQLqHloiVnxO/YzsyR2ysRI+CiS/Wn0rtGQLXvg3zHP/ekIqQSbRl4mqctgnZZecz4kqSEBQKNKNGofDxsf2gMWPBzccseZK8zfb92xhLIeGmFCETBAFBbl8JntZgMpko37mT3GXvNtiusLqQN/a/weIti+3//e5gOtfUzigo11JU0YJNx+St8MOtsHWpjS1rJXs+hI/HwoanHTakUaulYMUKcl59Dd25FshNOAEWeQQp0lbC0YjmtO3Xrx/9+/dHEATGjRtH//79rT99+vRhxIgRjB9v/zQWCeflYk08KyYTKtdKlGo9Lj2cWCuohVgkEhIlp62VhZsWMv778RzK3wGcXzhISFjQZWWR98GH5L7b8OJCQhwGdYrBZJSDoCO9NNP+A1pSak/90bzrfroD/n4eymwfBXIxBVUFlGpLERAwav2ICVQjq5F9EQQBZUiI0xWoaQ6W7/DkkhTUgwcjc3MT1yA7oi8sJOuppyn64YcGJZ0skdVRnlGOMUxCwoZYNszP5pXRSWOWvLG8p+2GQgXdp5ifH/m+eddWFJhlFSqLbG1VvSTmNO601WVlOcqcVqHPyiL1ttvJW7aM6rNn621neQ+EqENwU7Sv+7zaRYFvp024hX/MxrMHmt9BRI2uevZRqCy0rXGtIWWn+THCttqyDWIwEPDgA3heNQnXHt3tM0RxMdr0dIwVFXbpX4q0lRAL0QqRTZkyBYCDBw8yYcIENJrzX24qlYqoqCimTp0qknUSzkBnz87syNhx6YSwLIewy3JAkMGA0WKYZlcsE72zUjEyK0klSWRXZGMqNO/gS/IIEhejz84m9403kHl54X/XXU4dvdIRCffRYNL5I7hksy/jFBGenew7YOx4kCkg7xTkJ4JfTOPXFGfA4W8BAQbPt699mCf9zw57ljXHTrHRpLRKI7QXOnt1Zs+5PZwrP+f4wY0Gc1Te2c0w7unzVeezDoFea47CtWEad9WRIxR99x0Ve/bgfcMNdbYxmUyEacKo0FVIkbYSbZIwbzfcVXIqtAYUBrN0S3pZOlqDFnlpBcW//44+L4/A//7XtgP3mgb7V8Dx3+CqpaBs4mbWqT9g9T1mqZs7NtvWpnpoLNJWl51NwpixuMTFEfXjD47JPmghytBQPK+5BrmHR4MbiJaMyPYq++KiTkUnS+BA1nGm9mymk9MjCPxiIT8BUnebC5yKTUUB5BwzP48Y6rBhZW5u+M6aBbNm2W0MQ0kJhuJiBJUKmbtti30aTUZ0NXISUqSthKMRzWn79NPmcPyoqCimT5+OaxuOJpGwDxemXtUiv0bH1jui6RO3NkRMTaSt5LQ1ozPoSC9NB6C83Be5TCDKr2MXRpC4FNeePfG8ahJu/Qdg0uslp62TIZcJ+OrHc66gHKM2wP4DunnD6EfApzNomqgLe7omKjd8MGgC7WaaBY1Kw/Vx17PtwEEgg7ggcxRbwZdfoU1OxmvyNbj16WN3O+zFPX3vYWH/hWhUGkwGAwWfr6Rizx5CX3sVucbODmp9NXwzE3QV5mJjFn3ira/BiV9hzBMw6n82G04ZGorf7bch86i/AIogCLx/xfs2G1NCwtHIZAKxgRoOpxeTV+SCWqmmXFdOWmkaERVuZP/fc6BQ4H/nnbbNEogYCqMeMReZVDQjwu3UWvNj3ATb2dIAOoORlHxzhF9MYN3z1KqjR0GhQKZWO7XD1kLYq40X2Wnvsi+BruGUaU+QUFR/tHGDRA6rcdrudA6nbeo/5kf/eNA4YD7mQOReXggqlV3mGBZpBJkgQyGI5kKT6KCI/o6bM2cORUVFfPnllyQmJvK///0PX19fDhw4QFBQkMPEfSWcD0v64CWRtnmnzY9+cQ61x1FYducleQQzaaVpGEwGXGRulOo9iQxwR6UQXY5bwskQFArCXn9dbDMkGqC315WkpWZRVOKgiNKRzXTKnaxZ4FukFRzEmYvSaUvXr6di715ce/Zo005bX1df63NBLqdw1Sp0qalU7NuHx+jR9h1c5W7WNT6+2vwT2g+05ZDwl/m8RfPYRrjExhK4eLFN+5SQcEbiAj04nF7MmZxyojyjOJZ/jOTiZKIjxuJ51VWooqIwabVgS6etTAZjHm3eNbpKSPzb/LzLJNvZ0gAp+RXojSbcVXKCPet+/R7jxhG/Y7tdCjGJRXuXfYn0jOJsHmRVtLCQasQwOLDyvCSB2KSYpeaIHOawIbUpKVQnJaG+7DK7yj7JPT2Re9a/edoaLpRGqKsek4SEPRHdaXv48GHGjx+Pl5cXycnJzJ8/H19fX3766SdSU1NZuXKl2CZKiIQlzSajLAOtQWtNRcj5fDVlewPwvcoVb/HMsxsW0fv8GtF7b3fn34m3J5ZIa29lGHkIxEnSCBISbZIYZ9brriqBpK3m512vdsiQW9O34iJzJTEvD1Batbp9Zs3CtVcv3Pv2dYgdjsJ39mxMWi2ucQ7acO1+3Xmn7binzQ5bXYU5SyfE8c5wo8mITJA2HCXaNvE196nTOaV0Du/Msfxj5mJkgkDY605UbOnsFvPn3bOTwz7vF0ojNOTUkXt5IW9jxba16emUrluH7223XfLa2rs8Qo/AGDblQYm+hXr8Fudo5r/mzUOVyNmCFudx5HCHDVn088/kv/8BnldfTdjS1xw2ri2xRNpKerYSYiD67HHRokXMnTuXM2fO1JJIuOqqq9i6dauIlkmITYBbAGqlGoPJQFrp+d3NqqRMqouUmFx8G7i67aJ2URDiZf4sJEoSCdYdfIXBnK4s6dlKNISxvJyyLVswVleLbYrERUT6uyJzTeVA/ibHDZqXANuWwuk/G26XuBGMOrP2nL9jnIov7XmJ2zfcRrU8FaVcINLXrL/mOXECQQ/9D1VUlEPssCev7X2N2/+8nYyyDHxvnoXfrfNQOiqDKu5KULhCwVnIPmZ23oLZmWvDKBlDWTnVCQmYDIYG2z2z6xnGfz+e1QmrbTa2hISjsWwuJWSXWSMrLykYbC9S/4Gf74KjPzbe9tTv5scuk2z6eW8Ii9O2vc1TjVVVJF03hZzXllKxZ2+tczqDzrpG6+zZPuURBod1A0Avz6VSp21+B94R5s0Dox4yD9rWuOZSXWrWdgfH6tmq1SiCg9GMGmn3sUwmE8bKSoxVVTbt1xJp6wg9288++wxvb+9G2wmCwC+//NJgmx07dtCrVy+USqW1plRjLFmyhL7tLHCgrSO603bfvn3ceeedlxwPCwvj3DkRildIOA2CINQpkRD88jt0euJ2NNfNEccwBxBt1bV1wog0B2PZwa+u9AfOVy+WkKiLs5OvJe3Ou6j896DYpkhcRJiPHHXn98hUfUyZ1kH3tiPfw8Zn4cDnDbdzsDSCyWQixjsGf5dQjNoAovzUKOSiT8lszq6sXezO2k1iUaLjB3fxgJhx5ufblsKJNebnXSfbdJiKPbs5e81kkmfe1GC7s0Vnya7IRilT2nR8CQlHYpmDnc0rI9wjEqg9RzfpdFSdPGmfwZO2wqFv4N8vG25nNMKpdebnDpJGAEjMMQdaWLJKLibrmWdIv/+/VB495jCbbIHM1RXPydegHjYMmWvtKMO0MrOEmbvCnUB3+2vBi0GvoAhMRiWCYGB/Rgu+ywQBbvgUFh2HKMdFt9aJwhXmroEJL4J3uMOG9Z8/n9hNf+N5lf3nWPrcXKoTE+uWIDEaIGkbHPnB/GhseLP1Qi6UR2gqc+fORRCES34SEhIavG769OmcPn3a+ntrnKgPPPAAffv2JSkpic8++6xFfUiIj+jyCC4uLpSUlFxy/PTp0wQEtC9xbInmE+UVZU29sqDqcRmqHs2s3tnGiAnQsCMhX4q05XwER2GxN9D+IhgkbIvbAHNVeEPppd8rEuLSMzQYQ1UoJoM7mSWFxPs74LPc9SrY8pJZ21BXCUq3utu5epl/HCSNIAgC74x9h4+3neW5gyes0WvVSUkIMhnK8HAEWdt34s7rOQ+9UU8Xny6AOWKr8uBBFP7+uMTG2t8Ar5pF6bGfzh/7YS5MfBm6X2uTIfR5eQju7o2+nmXjlpFUnESkZ6RNxpWQEIMwbzfcVXIqtAZcTMGAWcbKZDJhqq7m9NBhmCoridu+DYW/v20H7zkVNj0PZzdDWU79BSNzjkF5Drh4QtQI29rQABfKI1yMSaejZO0fGIuL8Z0z22E22YrgJ5+ss8CrxWEf6RnZbnU+FXI5KlMQOtLZl3mKy6O6Nb+TiCG2N6wlyJVmuQYH6tlaEAQBFPZ3Pcnc3c3zp4vnUMd/hXUPQ8kFMheeoU2aD5hMJqs8QnMjbSdOnMiKFStqHWvMx+Xm5oabWz3z1WaSmJjIXXfdRadOnWzSn4Q4iL4iuPbaa3n22WfR6XSA+QOdmprKww8/zNSpU0W2TkJspsVP47VRr3FVZ8cWhhGbaH8p0taCJdK2pMQbQah7MiwhYSHk/54l9q8NeF5h20JDEq1H46LAM/8hKlNvp6zCQZ/j4N7mtERdhVnjsD6ufg3+lwidBjvGrhoSaoqQxdbc1/KWvUvihIkUfPqpQ+2wF9dEX8OU2CkEqYMAyHnlFVLnzqPw2+/sP/jxX2HPB5ceL8mC72abz9sAnxtvpMvePQQ9+kiD7bxcvOgb2BcfVx+bjCshIQYymWDdPC8v80ZAoFRbSkFVATJXV5Rhoci8vNCmtbBoU0P4xUDYADAZ4djP9bcL7mWOapy2AhSOqQthMpnOO23rCi5QKIj4+GP877sXtzaYdlyXwxbOB1a0Vz1bC94Ks6zPyXwRskbaOLqcHIeOJ1OrcenWDdWFUkzHfzV/75dcpEvcxPmAIAh08e1CjHdMs522Li4uBAcH1/p566236NWrF2q1mvDwcO655x7Kys6v+S+UR/jss8945plnOHTokDVS98KI2by8PK6//nrc3d2Ji4vj11/NryU5ORlBEMjPz+fWW2+1XleX9MIvv/zS4KbL3LlzmTJlCq+99hohISH4+fmxYMECq/8OoLq6msWLFxMWFoZarWbIkCFs3rzZej4lJYXJkyfj4+ODWq2mR48erF1rznIrLCxk1qxZBAQE4ObmRlxc3CWO7o6O6E7bpUuXUlZWRmBgIJWVlYwaNYrY2Fg8PDx4/vnnxTZPQmQGBA1gQtQEQjWhAFRt/YWiF26namMjqVFtnOiaBfzZvI4daVtYVUhxdTEARq0/nXzccFPVPXGUkADsWpVWovWcl35x0L1NEM6nx1o0DutDrrw0MsNO6Azmia7VaRtUI/tiMiK4uOASH+8QOxyN+6BBKIKCkGnsXIjFaDBH1GCq42TNsXWPNCs1siEEuRy5hyTdI9ExsEgkJOXq6ObXjT4BfSjVlgIQuXIl8f/swr1fP/sM3mua+fHI9w238wqD2PH2saEOcsuqKa3SIxMg0s/9kvOCIODWqycBCxbU6wBtCxgrKyn66WdMWnPUoSWworNX+9SztRCmjgAgteb1toi9n8BX0yB9v22Mai66Klj7kHnDw0bffY1hKCoiYcxYzk6+FkOZHeZ92vJLfgRdBYKuwvx6oYnzgYdr/03q6Femq8RV4WqTgqIymYy3336bY8eO8fnnn/P333/z0EMP1dl2+vTpPPjgg/To0YOsrCyysrKYPn269fwzzzzDjTfeyOHDh7nqqquYNWsWBQUFhIeHk5WVhaenJ2+++eYl1zWXTZs2kZiYyKZNm/j888+tDmAL9957L7t27WLVqlUcPnyYadOmMXHiRM6cOQPAggULqK6uZuvWrRw5coSXX34Zjcbs73jyySc5fvw4f/zxBydOnGD58uX42zpTo40jujyCl5cXGzZsYPv27Rw+fJiysjL69+/P+PGO+6KVaDuUrf2B3F/243XkDKHjbhbbHLth2aVPyS9HbzC2S63DpmDZwfdUBFJqUlmj0SQkmoJJr0dwQCqWRNOJCdCwMzGf0zmFgINStbpeBXs/MmscGo21HbMmE+ScgMBuDitWA7Bk1xK2pm+luGIC0M96bwt7/XVzQStTXYuLtofOoOPfnH/JKMvg+rjr8bjySjwmTrR/Gm3KzksjamphgpIMc7vO9k+f3pm5k23p27gs5DJGhY+y+3gSEvYkvkbO5UxOKd/e9G2tcwpfOxcJ7nE9rH8M0vdCQRL4Ooez0LIBF+Hrjoui7TplG8JkMpF8441Un0lAcFHhdfXVVnmE9lqEzEKcbzQHyyCvOr3lnZzdBGf+hIjLoNMA2xnXVDIPmLNPjv0M3ac4ZEirfrMgILfHZu0LofWfi7sS003fITRpPpBZez7wZi+oyL+06ZLiZpu4Zs0aq3MSYNKkSXz//flNp6ioKJ577jnuuusu3nvvvUuud3NzQ6PRoFAoCA4OvuT83LlzmTlzJgAvvPACb7/9Nnv27GHixIkEBwcjCAJeXl51XtscfHx8WLZsGXK5nK5du3L11VezceNG5s+fT2pqKitWrCA1NZXQUPP/yeLFi1m3bh0rVqzghRdeIDU1lalTp9KrVy8AoqOjrX2npqbSr18/Bg4caP2bSNTGaVazl19+OZdffrnYZkg4IVvTt3K26CzXx12PUlmGOqga124xYptlV0I8XXFVyqjSGUkrrKSzv52jkpwUyw6+K+YvmrggKZJJonFK/viD3LffQT18OMFPPC62ORIX4OqRhjr2BVbnePEYax0zaOTlZm3D8hzI2A/hg86fyz4G7w8H/y5wzz8Oi7RNLkmmqLqIyio5gnA+AhnqT0Nti2iNWm778zYAxkWOw1Pl6ZiBy7Jt264eSjdtomDlSjyuuALfm+ovRPZP1j98eeJLDCaD5LSVaPNYNLjPZIsg4eURDJ1HmnVtj/4AI/9X+/zuD+H0Ohh8B3SZ6DCzLDUo6pLwqj6bRPFvv+J5xRW4du/uMJtsjSAIeEyYiLFqtVVzvZNHJ/Kr8tt9pG2/4Hi+T4VKWlEkPXI4nPgNUnaB46SWz5Oyo8aOYQ7bpNZcPpz4HdvRiVBc3lBRgT4pCZdy280HynXlqJXNW5OPGTOG5cuXW39Xq9X89ddfvPjii5w8eZKSkhL0ej1VVVVUVFTg7n5ppH5D9O7du1bfnp6e5NhBkqJHjx7IL5ifhoSEcOTIEQCOHDmCwWAg/qIsserqavz8/AC4//77ufvuu/nzzz8ZP348U6dOtdp+9913M3XqVA4cOMCVV17JlClTGDbM8brLzoxoTtuVK1c2qd3s2W1PrF3Ctryw+wUyyjLo6d+TgREleI3JhxuuE9ssuyKTCUT7azieVUJiTlnHddrW7OAbq80pElKkrUSTkMvRJiUhtNNol7ZM14AQZOklVJgqMZqMNkkzaxSFypwme+ZPKDhb22l7qsZx7BfjMIetyWQ6f2/TBhDh646rsn2+V9VKNYFugeRU5pBcnEzvgPOLC6NWi0xlJ71JTZBt29VD5f79VOz6B1Vkw8XFLP/fUZ5RrRpPQsIZsMgjnM0rQ2cwopTLMBgNyGXm+1jOW29RsXsPIc89h0u0HZx5vaZBcQao6yhEduxnSN0J8RNsP24DJObUr2dbun4d+cvfp/rEScLfX37J+baE3+234X/3XdbNxRdHvCiyRY7hsvCusAeQl5FalEuEdwuKpVuKf6X+Y07Flzn4ez9lV40dwx06rNzbG/lFGqo247G6I2iNWi3axLNQUYnJPYAmuagvnA8sPFLrVGppKmXacoIN1c122qrVamIvKFSanJzMNddcw913383zzz+Pr68v27dv57bbbkOr1TbbaatUKmv9LggCRqOx3vYymQzTRdlcF2rTtmScsrIy5HI5+/fvr+XYBaxRxrfffjsTJkzg999/588//+TFF19k6dKl3HfffUyaNImUlBTWrl3Lhg0bGDduHAsWLOC1115r1K6OgmhO27lz51pDvS9+41gQBEFy2kowstNICqsKcVO4QZ5ZFwW/OHGNcgDRAWqOZ5VwNq8MaN3Csq1yU7ebGBA0gMWrkgGIDZKcthKNox46lE7L3sF9sGOLSkk0zsBOMZj2yxFkOtJLMojwCnfMwBNfguvfB4VL7eMWp61F99YBFFQVUKItAQSMWv/zRcje/4CKA/vxmTEDj7FjHWaPvens1ZmcyhySipPoHdCbqlOnyfzf/8BkJPq33+wzaOQwc1Xokizq1rETzOdbWUHba+pUlBERuMQ0nP1jyRpp78V6JDoGYd5uuKvkVGgNbEk6zOuHH0MuyPntevPnuWLvXioPHKDy4EH7OG37zIS+sy6NFizPh7R/zM8deE8HrEXI6goucO3RA48rrsDjirYv/ddR6wYEaDxRlo2mtNyd5LxKIrxb0ElQT3PWT3UJnDsCoX1tbGUDGPSQttv8vJXfe06Fqm4HqkylRhkZg8zNDRRdmj8fuKhfH49OuOorcVc0z6FaF/v378doNLJ06VJkNcEC333XcHFWlUqFwWAbHeKAgABKS0spLy9HrTa/zoMHD7aqz379+mEwGMjJyWHEiPrDyMPDw7nrrru46667ePTRR/noo4+47777rHbNmTOHOXPmMGLECP73v/9JTtsLEM1p261bN7Kzs7n55pu59dZba4V2S0hcyGNDHgPAVJyFqarEnJLjG93IVW0fS4pVYk7HLUYWrA7GXe5Hbn7NZLiuirwSEhch9/DAQ9JFd0rCvTWg8weXbPZknHKc09ajjo2vkkzI/BcQIN5xabQWB567zJ9Sk9K6GVWxZzflO3fhccUVDrPFEUR5RbH73G7r61YGBVJ95gyYTOgLCuyjgSmTw8SXzVWhEai9UKtx9Ex8qdWRTi6dO+PSuWGnlM6oI60kDYBor/Y/d5Fo/8hkArGBGg6nF5NfoiCtNA2ZIENr0KKSq/CdMwfjDTegHjrUTgbU87k9sx5MRgjuBd4R9hm7HizFNWMCL3UiaUaORDNypEPtsTcmk4nSvbtx79INhZeX2OY4hG4us9iRlk92UQulBWRys57tmT/N+qmOdNqeOwzaMnD1gkDHSHRkv/oquoxM/ObNxa1PH4eMeSGKC6N7Wzkf8FB54KGyjURfbGwsOp2Od955h8mTJ7Njxw7ef//9Bq+JiooiKSmJgwcP0qlTJzw8PHBxcWnwmvoYMmQI7u7uPPbYY9x///3s3r27VkGxlhAfH8+sWbOYPXs2S5cupV+/fuTm5rJx40Z69+7N1VdfzcKFC5k0aRLx8fEUFhayadMmunXrBsBTTz3FgAED6NGjB9XV1axZs8Z6TsKMaNWNjh07xu+//05lZSUjR45k4MCBLF++nJKSErFMknByKras5fSPwaT9EwLK9r/Ta62ynieCZpgTYSnuEOTpgqerspHWEhISzoxMJuAuhABw6NwZcYyoNlc5t0bZhg8GTR1ptnbCUmBRpjePaYnMClj0AMFLltjP0SESFkkAi0SA3Nub8A8/IG7HdvsWLep+Ldy4EjxDah/3DDUf736t/ca+gIzSDPQmPW4KNwLdHfc+k5CwJxaJhKx8FZ9O+JS/bvgLpcw8R/O84gq8p0xBGWTnLDFdpVkj1JIKfPJ382OXq+w77kVUaPVkFFUCEO3fMYILMh9cTMbseTz31Fg+P/a52OY4BGswTW4rgmks0ZwWfVlHkbLT/Bgx1CFSUCajkZLf1lC6bh2G4uYX77I5TjIfAOjTpw+vv/46L7/8Mj179uSrr77ixRcblhmZOnUqEydOZMyYMQQEBPDNN9+0eHxfX1++/PJL1q5dS69evfjmm29YsmRJi/uzsGLFCmbPns2DDz5Ily5dmDJlCnv37iUiwryBZjAYWLBgAd26dWPixInEx8dbC6+pVCoeffRRevfuzciRI5HL5axatarVNrUnRC1ENmTIEIYMGcKbb77J999/z4oVK1i8eDFTpkzh008/bfEOgkT7w2gyknd0D0a9DJO8Y+i72mRy0IY5V36OXxJ+ITffD3CXomwlmoWxooLi39ZQdfQowc8+Y/9q9RJNxt+1E2nGgyQUnXXswKm7YfU9Zt2yeWvh1B/m4w5Oo7U4LyvLzcUZLPc2t149cevV06G2OAJLgRqLsxpA00D6nE3pfi10vdq8YC3LNv/fRw6ziZZg1cmT6DIzcevdG4W/f73tLK87yjPKMRrOEhIOwFKMLCG3jIXBgxppbQeMRnhnAJRkwLw/ILQfJP5tPudgp60lytZPrcJHXVunu/yf3bh274bc00FFGB2E++DB5G9Yh1BehYu8Y6zXI/xUyFzOcTCnBOjask4ih4PCFWQOdsHknaoZ30HSCIJAp2XvULppE+6XXeaYMS/CZDRiLC3FWFmJIigIoYXzgWpDNVqDFle5K0p584KH6otgXbRoEYsWLap17JZbbrE+nzt3LnPnzrX+7uLiwg8//HBJP3VJjBYVFTX4O8CUKVOYMmVKrWPz58+3Pl+yZEktR25dr+PNN9+s9btSqeSZZ57hmWeeuaQtwDvvvFPncYAnnniCJ554ot7zEiI7bS24ubkxe/ZsoqKiePrpp1m1ahXLli2TnLYSABRWFXLFD1dgDNGy9fM3UBjqF9duT1iKjxWUaymq0OLtbqeCLU7K8fzjvHvwXXwUnYE7rVEdEhJNQiYj+/nnMWm1+M6bi0u0lJbsLER6RpFWBFnlqY4d2DME8hPMxcgKkyFpq/l4l6sdaoZFJqC8zBxl2t43pCw6rqmlqeiNehSOXqzK5NDZ9k7iop9+onDlF/jMvoXgxx6rt51Vz1YqQibRjoivcdqeya47G0x37hyVBw/i2r07qgg7SBXIZBA9Gg5+BUe+h6oS0FWAZxiEODYV26JnG3ORnq2hrJy0+fMxmUzErl+HMizMoXbZE68p16G8YjQzhCL83PzENschGFSJqKPf5KQ+CJjZsk7CBsAjaeYCqY7k2ndg5EOgdHPIcIIg4Na7N25iyl8KArqMDExGI3JvbwRX1xbNB0qqS8ipyMHbxZswj/bzGZZoW4i+5Z+RkcELL7xAXFwcM2bMYNCgQRw7dgwfHx+xTZNwErxdvFHIFOhkJrK7xOE6zLE76GKhdlEQ4mWWgeiI0bZ+bn5cF3MdbrpeQPt3bEjYFpmrKz4zZ+J/773I1B0jOr+t0MPfXEW32FB31V+74R1h1jo0GWHvJzD0XnNBG7+Gi0jZGkvkpVEbQLCnKx6uSioPH6Zs61b0+fkOtcURhKhDcJG7oDPqyCw7/39e/OuvZD78CLpz50S0ruUo/ANwiYvDrXfDDiKpCJlEe8SykX42r4xjeSd4Y/8bfHXiK+v5c//3HBkLF1H610b7GdHrBvPj4e8hdRcE94Gu11xaoMzOJNbIeF2sZ6vPykQZEYEyLBRFaKhDbbI3MldXNH5BdPHtgr9b/ZkG7YnBnbpiMrii07qj07ewKJRM7niHrQXvcFB3jP8rMDuOZd7eyH19W3VPqDZUA6CSd6zgKQnnQjSn7XfffcekSZOIi4tj7969LF26lLS0NF555RW6dm1hyoFEu0QQhEs08ToK5yUSOp6ubZ+APjx3+XNU5IwDJKetRPMJevQRAu5dYH9dPYlmMSisCwB6oYgyrYPvbb5mhzE734btr8Ohb+DNnnD8V4cMrzPoyCjLAMxOW0uKceFXX5N2x50UfvutQ+xwJDJBRqRnJHDegQlQ8MWXFK9eTcXu3SJZ1jr875hP9G+/4nVNw5HaFie9RSZCQqI9EObthptSjs5gYn/GGT49+ilrz661nncfMADX7t2Re9oxS6qyGAQZaEthx5tw7hCc/M1h93MLlsCKiyNtXeLiiPl9DZ2//75dSzTpzp1Dn5cnthl2p3dQJPqkZyhPuZP0oqrWd6irbH0fToguI4Pct9+h6vhxsU1BFRqKKjQUWSuyt7UGLUCHkQGRcE5Ek0eYMWMGERERLFq0iKCgIJKTk3n33XcvaXf//feLYJ2EsxHtGkqvLYcpO/s6pseHI6jafyEyMBcj256QZ9XL6mhUag2kF5onNXGS01ZCol3QKzQYo16DTFHGkewEhob3dczAx3+F4z9ferwky1xV2AHFKNJK0zCYDCgEV0x6D+siXxEUhCo2Btd2Wi03yjOK04WnSSpOYmQncxV176n/QTd0KC7t9DVbsGw2S/IIEu0JmUwgLkjD4fRi9NXm6L2kkiRMJhOCIOB36zz8bp1nPwOO/wo/zKN2JXgcej+3UJ88goX2pmcLcCD7AL+d/Y3xm4vx/fJP/ObNJXDxYrHNsityuYzO/h6cyCohMafMKmPXbPIS4NubzUVRFx21f2T4mkVQkgmXPwARQ+w7FlD611/kvfceFfv2EbmybRepM5lMUqSthFMgmtM2IiICQRD4+uuv620jCILktJUAoFuBwMCdJrSqRHi649w0O3KkbVJxEsUlHphM4OOuxE8j7XBKNB+TXk/VsWMoAgNRhoQ0foGE3XFXKVAagjAoytiXdcoxTlujAdY9XM9JEyDAukfMRSpsUKSqPixRlypjMCBYMwgCH1hE4AOLGriybVNXMTKfGTPEMqfVmPR6BEXjU+iiqiIKqwsBrNHGEhLthbhADw6nF1NU4omAQKm2lIKqAvtrnFrv55cW4XHk/RzAYDRxNs8cWHFhRpixqgrBxaXdRtj+m/MvP5z+AT91f8YZDGhTHKxRLxIxAWqz0za3lPG0MIvLMxTyz4BRD0Up4BNlUxtrYTKZi66WZsGw++w3zgWoYmLxuGI86uHDHTJeUzBqtQhyOYK8efcDvVGP0WSupSM5bSXERDSnbXJyslhDS7RBQuVK/uwn4GeS00cmuhSzw4gOMO/inu1gTtvCqkKu/eVa5IIChKeJC/QV2ySJNkrmQw9TsnYtAYsW4X/nHWKbI1GDlzKMAhI5npvgmAFTdpojTerFZK5CnrLTLkWrLCSVmJ2WuiqzU6OjZBBY9FwvlEdoy5x77jnKt27D//778L6oAvOFWF5vsDoYd6W7Y4yTkHAQFnmXszlaQjWhZJRlkFScVMtpazKZQKdDUNnQ4eEk93OAjMJKtHojLgoZod7nizzlvvEGpRv+ImDRQrwmT7arDWJgubfJRwwmetITuMTFiWuQgzCp/0Ud/SW/pvfhTt5sWScqdwjtD+l7zO9RezptC5PMDlu5ylwEzQFoLh+O5nLncdhWJydjLCtDFR6O3MurWddqjWZpBJVchUzoOP4HCedDevdJtAk6ecn4eKKc9yYpzBPADoIl0jYlvwKdwSiyNY7DEo3lKviASUlMB3FsSNget/79kXl6YtLpxDZF4gLC3MMBSC1NdsyAZdm2bddCpsZN5d0xH1KcPRQwR2Z1hO+0zp6d8VB5oFHWvpebjEaqTpyg6vRpkSxrGVVHj6HLzGxUJ89d6c51MdcxLmKcgyyTkHAc8TVO2zPZZXVuzBR8+RVnRo4k9933bDuwk9zP4XwmXGd/NXLZ+aja8p270GVmmivWt0Mssi+dvWM6jMMWIMjTBZlLHrnVKa3rKHKY+TFlR+uNaoiUnebH0P6gdGu4bTtFplIBAiatttnXStIIEs6CaJG2EhLNIbI4B8FkolQwOCb1ykkI9nTFTSmnUmcgraCC6Hr0stoblkm/0hgIdJxoNAnb4z3tBnxmzmh2SpSEfRkSPJrdW5X4RfR0zICaJqYxNrVdC/Fx9cFX3h1DZYFV9iXn9TcoWb8Ov1tvw2f6jXYdXyy6+3Vnx4wdl6QK5y1fTt47y/C8djJhr7wiknXNJ2LFp1QdO4ZLly4Ntov3iee5y59zkFUSEo4lLtBcZOxsXhmjPaLYwY5aBYMFlRJDbh5VRw7bdmAnuZ/DBXq2F81To777lvJd/6C+zP4aomJgyRq5sMCisboaY1kZCr/2u0brGxzH96lQybnWdRQ53Fw8L2WXTeyqF4vT1uIktjMVBw7gEheH3MOOBQibiSIwEEVQUIvWARanrVSETEJspEhbiTaBMjOBUJ0BaD/plU1BJhOsQvcdqRiZJdK2utJc3CJWctpKtBCZi4vksHVCBoTGYijvSnqegybCkcPMOnLUpy8ogGeYQxY2lkW+xeFRdfIEupRUMLXfbApBEOrUdnTv3x+ZuzuCQimCVS1H7uGB+rLLUPj4iG2KhIRohHm74aaUozOY8JCHAuedeQAeY8cS+fVXdHrPxpG2Tng/v7gImczNDY+xY5C5tz9ZlMKqQoqriwGI8IwAoGT9nySMHkP2yy+LaZrdGRpeUzhTXkZqUW7LO4oYAghQkAilrXQAN4TVaWt/uQKjVkva7fM5PWw41WeTGr/AQQgKRYvXAVrDeXkECQkxkZy2Ek6PyWgk4cNzvLjMRFChqVYhk46AZfe+IxUjs0RqFJeYF8QW3TQJidZgMhjENkGiBoted6qjpF9kcphoWUxevNCv+X3iS3YtWlNcXcxbB95iXfJa4Py9PfSll4hY8Sma0aPtNraz4j5oEPG7/yH0hefFNsUupJakojNI0iwS7ROZTLDOz0zaAIBakbYKf3/zxoytJQKc4H5uISHH4rRV230sZ8GyDgtVh+KmMKfcK0OCMRQWUnnoULuWowrQeCIYzLqou1JPtrwjVy8I7mV+bnGs2pqSTLOmrSCD8MH2GeMC9JmZKIKDUXh7o4pqH4U3pUjbtkVycjKCIHDw4EEANm/ejCAIFBUViWqXLZCcthJOjyEzCaMOXKqhwKP2hLAjEN0BI20t0dT6Kn80LgqCPdunJpiEY6g6eZLkmTeRcvMtYpsiUUOwpyvuXgnIfDewJfmQYwbtfi3cuBI8Q2of9ww1H+9+rV2HTyhK4OMjH7O3+GvgvOyLwtcX9dChKIOD7Tq+2Kw6uYqrf7qaZf8usx4TFAoEZduKsi387jvyP/sMbVpag+10Rh3X/XIdg74aRG5FKyKyJCScGEvGQFmZuWBsRlmGNTrNroh8P7eQWDM3t0Ta6rJzSL19PoWrvm23euWWObpFxxjArXdvwj/5mJjff29z9/TmohbM77nD2Wda11GP66HvzeDVyQZW1UFlEUSNgPAh4OppnzEuQBUVRcza3+n862oEJysabigtRZuaSnVeDnvP7WXt2bXsPbcXg7H+YA6jyWjddG1NpO25c+e47777iI6OxsXFhfDwcCZPnszGjRtb3GddjB49moULF9q0z+b0O3r0aGtWlaurK/Hx8bz44ovt9j7oaETTtNXr9RgMBlwuKOKQnZ3N+++/T3l5Oddeey2XX365WOZJOBGKTjF02buLX/9+G13RDx1KHgE6XqStzqAjrdS8GDZqA4gJ0dSZVish0VTkPj5U/vsvCAKGkhLknvafvEo0jEwmoAnYS6XyEFtS4hkf088xA3e/FrpebY5sKcs2ax5GDnNIRJanypMb429kzaFCoOPJvuiMOlJLU0ksSqzzvMlgaBNSJoVffkX16dOoOnVCFR5eb7u8ijxcFC4YTcYOo8Mv0fGwRNqm5ypwV7hToa8grTSNGO8YALTJyZSs/xO5lyc+M2bYdnAR7+cABeVaCsrNDmqL07Zs0ybKt2/HUFqCz4zpDrHD0ViCZ6I8o2od1wy3fwq+MxDgGk6Z9iQJRa3M/BzxgG0Mqo+g7jB3DTjYaeaMskEmrZa/Uv5iafpKcrT51uNB7kE8MvgRxkeOv+Qay+aTTJChEFrmMktOTmb48OF4e3vz6quv0qtXL3Q6HevXr2fBggWcPNmKaG0nZP78+Tz77LNUV1fz999/c8cdd+Dt7c3dd98ttmltHtG2QebPn8/9999v/b20tJRBgwbx7rvvsn79esaMGcPatWvFMk/CyZB5+BAyeCJAh5NHsEba5nWMSNu0sjQMJgNKwRWT3lMqQibRapRBQYS9vpTYvzZIDlsnIty1H7qiARhq0modhkwOnUdArxvMjw5a4Mf5xPHI4MfJTzUvDmIDNZTv2kXBF19Sdeq0Q2wQk3ER4/j4yo95/LLHax3XZWWRMnsOiVdc2SYiMjwnX4Nm/Dhce/VusF2IJoRdM3exbuo6ZIJzRR1JSNiK+BqnbUJOubUo1YUZcVWnTpP7xhsUrvrWPgaIdD8HOFsTTBHm7Yabyjyu+vLhBC5+EN+bb3aYHY7Gsg67sAjZhZhMJgxl7XfNYnFWZ1WkimtIU3FA4ItRq3Xq7++/C/fwyNk3ajlsAXIqcnhg8wP8lfLXJdcYTAYUMgUucpcWBw/dc889CILAnj17mDp1KvHx8fTo0YMHHniAf/75x9ouNTWV6667Do1Gg6enJzfeeCPZ2dnW80uWLKFv37588cUXREVF4eXlxYwZMygtLQVg7ty5bNmyhbfeessa7ZqcnAzA0aNHmTRpEhqNhqCgIG655Rby8vIAs3yASqVi27Zt1rFeeeUVAgMDyc7ObrDfunB3dyc4OJjIyEjmzZtH79692bBhg/V8dXU1ixcvJiwsDLVazZAhQ9i8eXOtPnbs2MHo0aNxd3fHx8eHCRMmUFhoDnZYt24dl19+Od7e3vj5+XHNNdeQmFh3IEB7Q7RZ5I4dO5g6dar195UrV2IwGDhz5gyHDh3igQce4NVXXxXLPAknpJd/L7666iu+ueYbsU1xKBbtx4JyLYXlDkg5ExnLZNCFYEDocNFoEvbB86qrUIaFiW2GxAWMCJ5MVdY0dKVxYpviMFLyy9EbTahVckK8XCn+/Xeyn3+e0vXrxDbN7oRqQhkSMgR/N/9ax+V+flQeOoQuMxPt2bMiWdd0/OfPJ3zZMpRBgY22FQQBX1dfB1glISEOFnmEs3llRHqYdSwvLEbm1rcPHhMn4v2f60Wxz55Yi5BdME9VdeqE3+234zV5slhm2Z265BEsVB45SvIN00i/717HGuVAegTEAlCiz2x9Z0YDZB6EAht/9+kqoaLAtn02QN7y5SSOv4Kin3522JgAFbqKRn9Kq0t5+d/XqMulbKr599Kel2pJJVToKhAQCPcIJ9A90NpXcygoKGDdunUsWLAAtfpSzWtvb28AjEYj1113HQUFBWzZsoUNGzZw9uxZpk+vHamfmJjIL7/8wpo1a1izZg1btmzhpZdeAuCtt95i6NChzJ8/n6ysLLKysggPD6eoqIixY8fSr18/9u3bx7p168jOzubGG28Ezksf3HLLLRQXF/Pvv//y5JNP8vHHHxMUFFRvv41hMpnYtm0bJ0+eRKU6Ly1x7733smvXLlatWsXhw4eZNm0aEydO5MwZs9TIwYMHGTduHN27d2fXrl1s376dyZMnY6ipSVJeXs4DDzzAvn372LhxIzKZjOuvvx6jsf0W8rUgmjxCRkYGcXHnF2obN25k6tSpeHmZxb3nzJnDihUrxDJPwonIf3g6xvJSvOYuovfAK8Q2x+G4qxSEermSWVzF2bwyBqjb9wLQEqFhqDZH30mRthIS7RNLOmlHySI4U3iGY5nmaKzYQLPsi1uv3hgKCnHr5yB5CCdEplIR9vpSVJ07o+pcd+SWhISEcxLm7YabUk6lzkBnTR8mRZlqRWAqg4Lo9OYbIlpoP87r2XacImQXSphdLI8AoPD1oerECQSlEn1eHgp//0vatHUGhnWBE6CX51Kl0+KqbLneKX88BHs/hqH3wgQbFuRM2AjfzoKu18CMr2zXbz2Ub9+BLiMDQe7YeMAhXw+xST/ZFdkcyDnAoOBBAEz8cSKF1YWXtDsy50iT+0xISMBkMtG1a9cG223cuJEjR46QlJRkdYiuXLmSHj16sHfvXgYNMttkNBr57LPP8PAwb5TdcsstbNy4keeffx4vLy9UKpU10tXCsmXL6NevHy+88IL12Keffkp4eDinT58mPj6e5557jg0bNnDHHXdw9OhR5syZw7XXmjXB6+u3Pt577z0+/vhjtFotOp0OV1dXa2Z9amoqK1asIDU1ldDQUAAWL17MunXrWLFiBS+88AKvvPIKAwcO5L333rP22aNHD+vzCwM+La8lICCA48eP07Nnz0bta8uIFmnr6upKZWWl9fd//vmHIUOG1DpfVtYxNDwlGqZw02Hy/kpCl5EitimiYdW1zWn/zg3LDn5pqTfQ8XQfJexH6V9/kbVkCboLUo4kxCM6QA2CnoSiBMcUrhERrUHLtN+m8eSh6xAUJcTWRKf5TL+R8PfeRTNihMgWOobNaZt5c/+bHMs/Vuu4x7hxuERHO71+uTY1FWN1dZPaPrbtMRZuWsipglN2tkpCQjxkMsGqaxupGsMro15hXMQ4ka1yDIk5NZG2NRuQxb/9RvnuPZj0ejHNsisWCTM3hRtB7kGXnFeGhRH2+uvEbt7ULh22AH2CozAZlQiCgf2ZrUzN7jTY/Jiys/WGXYilP7Vj5KciV35Op3eXoRk92iHj2QNbFwxtqlzEiRMnCA8PrxXB2r17d7y9vTlx4oT1WFRUlNVhCxASEkJOTk6DfR86dIhNmzah0WisPxYnskVWQKVS8dVXX/Hjjz9SVVXFG2+0fJNt1qxZHDx4kB07djBp0iQef/xxhg0bBsCRI0cwGAzEx8fXsmfLli1WWyyRtvVx5swZZs6cSXR0NJ6enkRFRQFmh3B7R7RIW4sux4svvsi2bdvIzs5m7Nix1vOJiYlWL7xEB0ZfjU9MCdXFclz6Xs6uzF1sTttMv6B+TIyaKLZ1DiPaX822M3kk5rX/jQyLPIKuMgAXhYxOPu4iWyTRXsj78COqDh/GrU9fvK+fIrY5HZ5ofw3qmFcwKkvYn9WHoZ36iG2S3UgrNS905bhi0nt02M2o3xJ/48+UP/Fx9aGHX4/GL3Ay0u68C21aGpGfrcB94MAG227P2E5hdSF39L7DQdZJSIhDbKCGw+nFnMkpY1I9bQxl5eizMnGJaz9yOAm55522Jr2e7Oeex1BcTOQXK3GviY5rb6hkKqZ3mY7RZKx3k81z4gQHW+VYFHI5KlMQOtLZn3Ga4ZHdWt5ZpNmhRdYhqC4DFxvNDVJrnLaRjikOJ3Nzw6MBZ5u92H3T7kbb7M/ezz0b72m0XYD7eQf3H//5g6SSJBSCglBNKApZ811mcXFxCIJgs2JjSqWy1u+CIDQqC1BWVsbkyZN5+eWXLzkXEhJifb5zp/n9UlBQQEFBQZ1yDk3By8uL2FizfMh3331HbGwsl112GePHj6esrAy5XM7+/fuRX1R0VqMxv+/d3Nwa7H/y5MlERkby0UcfERoaitFopGfPnmi17TvwA0SMtH3qqad46623iImJYcKECcydO7fWm+fnn39meAepQinRAAVn8etSRugIE4qILhzKPcTXJ79me/p2sS1zKNGWNOLc9h9pm1pi3i0zav2JDtAglzl35JVE28HrumvxufnmdrVobMu4qeQojeZJ8t6M9lVB92Issi8yfSAWrW5DWTmmDjDRvBCLBqIlo+JCyrZtJ/vFF6l2Ul1bY3U1htJS0OtRRUc32La4utiaWllXCrGERHsiPsgc/XU6uxSD0UB6aTrVhvMR6ZWHDnF60CBS77hTLBNtTpXOQFqBWeMyJlCNsawMzdixqKKj27XcTSePTjxx2RM8NfSpJrVvr1HH3gpzYNnx/IRWdhQOXhFgMkD6HhtYBlSXmp3AAJFDbdOnk+KudG/0Z1joMILcgxCoez0pIBDsHkz/wP7WYyq5CgEBg8mARqWx9tUcfH19mTBhAu+++y7l5Zeu34uKigDo1q0baWlppKWlWc8dP36coqIiunfv3uTxVCqVVfvVQv/+/Tl27BhRUVHExsbW+rE4ZhMTE1m0aBEfffQRQ4YMYc6cObWcwXX12xQ0Gg3//e9/Wbx4MSaTiX79+mEwGMjJybnEFov0Qu/evdm4cWOd/eXn53Pq1CmeeOIJxo0bR7du3awFyjoCojltR40axf79+7n//vtZsWIFH330Ua3zffv25YEHHhDJOgmnIc8sTI1fLAgCQ0KGMK/HPK6I7FjatpbUK0vRg/bMnzf8yY0hb2KsDpT0bCVsiu+sWQQ/8ThuPdtehF97xVtpLg53Iq+VCx8nx1KYp6rCDzBrdRd8+gknBwwk9+13xDTNoVgcmBdWl7dQ8PnnFHy+kvILqhg7EzIXF+K2bTWn/fo2rC1vyRgJcg9q9kJPQqKtEV8jj5CQU8b1v17PpJ8mcTTvqPW8S2ystYK9oZ1I36XkV2A0gYerggCNC3Jvb0JffIHo39cgKERLZHUadOfOkfHgYpJuvLHJaeJtiQG+V1OZfhOqqr6t78wSbWsriYS03WAygnckeHWyTZ/1YCgqIvXOOylctQqTkxaDksvkPDL4EYBLHLeW3x8e/DBymbzWNZ29OtPJoxMyoeXusnfffReDwcDgwYP58ccfOXPmDCdOnODtt99m6FCzQ338+PH06tWLWbNmceDAAfbs2cPs2bMZNWoUAxvJ6LmQqKgodu/eTXJyMnl5eRiNRhYsWEBBQQEzZ85k7969JCYmsn79eubNm4fBYMBgMHDzzTczYcIE5s2bx4oVKzh8+DBLly5tsN+mcuedd3L69Gl+/PFH4uPjmTVrFrNnz+ann34iKSmJPXv28OKLL/L7778D8Oijj7J3717uueceDh8+zMmTJ1m+fDl5eXn4+Pjg5+fHhx9+SEJCAn///XeH8hWK5rQF887Cf//7X6ZPn45MVtuU22+/vdaOg0THRJfwL4ZqAfzjAegX2I8HBj7AqPBRIlvmWKJrihyk5legMzjnl6KtcFW4UlTkDyg6bAqxhERHIVRtrjaeUposriF2xuLE01f7o1LICPd1pzopCXQ65D4+IlvnOKK9zBGqlr/HhXhOmoT39Om49nDeTRVBEFA2oRiH5fVdWJBJQqK9Elej0X02t5wQdShKmZLcyvP6kDK1mrjt24jb9DdyTfuY11mCKCxFJS04uy53a0kqTqJCV9FoO5m7O6WbNlF9/ARVR4812r6tcXmnIehLe3Mu3wabcrZ22qY4ThqhbNs2yrdspfCrrxFkorqVGmR85HheH/06ge6BtY4HuQfx+ujXGR85vtZxmSDDXemOl4tXq8aNjo7mwIEDjBkzhgcffJCePXtyxRVXsHHjRpYvXw6Y7xmrV6/Gx8eHkSNHMn78eKKjo/n222+bNdbixYuRy+V0796dgIAAa8GvHTt2YDAYuPLKK+nVqxcLFy7E29sbmUzG888/T0pKCh988AFglkz48MMPeeKJJzh06FC9/TYVX19fZs+ezZIlSzAajaxYsYLZs2fz4IMP0qVLF6ZMmcLevXuJiIgAID4+nj///JNDhw4xePBghg4dyurVq1EoFMhkMlatWsX+/fvp2bMnixYt4tVXX23W36gtI5icbPsrISGBTz/9lM8++4zc3Fx0Op3YJjVKSUkJXl5eFBcX4+npKbY57YrMmaMp/jebwBsG4ffcSrHNEQ2j0UTPJeup0BrY+OAoa+Rte+W6Zds5lF7M8ln9mdQrpPELJCSaiMlkQldTTMg1Pl5sczo8SzZ8z4+Zz+JOJ3bP+UNsc+zGrLWzOJx7mMr0m4hVD2fdwpGYTCb0mZkI7u4oOojjtkxbxtBvzNElO2buwFPVPudMb+x/g0+PfsqMLjN4/LLHxTZHQsKuGI0mejy9nkqdgV/u60evkKBaUWvtkXc2nmHphtPcMKATL42PwKTToQwMbPzCNs6IVSMoqi7ix2t/JN6n4TlU8erVqGJjcXPijbiWcjSjmGve2Y6fWsX+J1uZ/ZmXAMsGgNwFHk0DhUvr+vt0IqTugmuXQf9bWtdXI2jTMyhZuxa5jzc+06bZZYyqqiqSkpLo3Lkzrq6urerLYDRwIOcAOeU5BKoD6R/Yv93fqyTEp6H3cFP9iE6xJVJZWcnKlSsZOXIkXbp0YefOnTz11FOkp6eLbZqEyBjKKwFQRp2fGBRUFbDv3D5yKhqumNiekMkEOvubo23bs67td6e+4+kdT5NQehDAWpFYQsJWFH75FYkTJpL75ltimyIB9A0239srTNkYTe0zi8BkMlnlAIzaAGsGgSAIKMPCOozDFkCj0hDoZnZs1CWR4Myk3Xsv5559Fn1u4xWmLa/NouErIdGekckE63ztXKHQIZwgiRcUISv69lsSRo4i+6VLi/20J8p15dZU8U6axtPuva67rl06bAE6+6uRq09S4vIXGcUFrevMLwbGPQ03/wCCDT47A+ZCn5sg6vLW99UIqk5h+N8x324OW1sjM0HvMh/GVkUxMKB+h21BVQEFVQXoDM4fPCjRMRDVabt3717uvPNOgoODefPNN7nuuusQBIH33nuPu+66i6CgIDHNk3ACwn/dTZddW9BMX2A99ti2x5i3fh5b07eKaJnj6Qi6ttsytvFTwk9oZedQyAQi/VpWvVJCoj7c+vYFpRKcK8mkwzKoUwwmoxwEHanFGWKbYxcKqgoo0ZYAAkatf4eXfWmoGBmALjOT6jNnHGdQE9AXFlL210YKv/4GwaXxKCiLhrEkjyDRUbDc105n1z1HNZSWkvnEEyRNn46pBUVtnI3EmgCKmAA12pogI1V0+/68q5Vqtkzfwo6ZO5qt1e1kib2tRu2iQB32M65Ba9mecrx1nQkCjHgAOo8EuQ30kPvMgOuXg2/7fj+2BEEuN99/TCaMVVX1tsurzCOrLAutsWMVi5VwXkRz2vbu3Ztp06bh5+fHzp07OXDgAA8++GC71wKSaD4yn0BkHucjkSyLoLYWpdNaLLq2Z9ux03Za/DQmdZqNoSKKKH81SrlTJANItCNce3Sny+5/CF/+ntimSACh3mrQ+wOwN+OkyNbYB4tzUmH0AZOSuEAPSv/exLnnX6Bs+w5xjROBhoqRFX77HQljx5H92muONaoRZC4uhC59jYCF/0XeiAyWzqgjrdRck6Gzp7RolugYxAeZdW1PnMvjoa0PMX3NdLSG8w4Pmbs7JWv/oOrQYaoTE8Uy0yaYTKbzkbaBGkKfe47YLVvwvOoqkS1zDM2RtTFWVJD7zjKSpk7FpG1fDjAf+qAr6U12sV5sU0Sh5M8/qdi7F5O+bb1+VXg4Ll26IFfXHRhkNBmtEbYqucqRpklI1ItoHpFTp04xcuRIxowZQ/fu3cUyQ6INYl3w1ROl016Jtkbatl95hJGdRtLF5QaM1SHEtnPdXglxEGQyZO5SNXdnQRAE1IJZt/pQtnNFV9oKi3NSXxUAmCPSyrZuofCLL6jYvVtEy8TBsvFaVzEyt969QC4Hnc6pIrNk7u54XX01/nfd1WjbjNIM9EY9rnJXgtRSxphExyC+Rh7hbK6WrelbOZ5/3Lp5AeYIt6CHHqLTsndQhoaKZaZNyCquokJrQCETiPA1zyeUQYHtpsiaLRGUSoq++47q4yco3bhRbHNsykjfu6nKuInK8saLUzaKQQ/Hf4X1j5uft5Tjv0LWITDaN5rdZDKR/fwLpNwym/KdNiqg5iBk7u7IlMp6z1s2m2SCDIVgg8hnCQkbINo78ezZs3z22WfcfffdVFZWMnPmTGbNmiVF2kpYKf3kWUrW/obHqFF43n8+6qahBV97JqYDRNoCJOSUApKerYT9MRmNTl3ttqMQ4BpOiuEgCYXt855u+a7SVvkhlwlE+bujGzsWmasb6uH2r+7sbDQkj+DSpQvxu/9p084Py+uK8oqy6j9KSLR34gLNkbZJuRX0i4nieMExkouTifGOsbbxmTFdLPNsiiXKNtLPHYWs46xbn931LJllmczvPZ8BQQOadI2gVBLw4APIVCo8xo2zs4WOxbIuS8yxwbpMEODXe6GqGHpOhbD+ze/DoIOf7wJdOdy1A4J7tt6uejCWl6O+7DIq9u7FfcgQu40jBtWGagBc5C6SX0rCaRBtNhkWFsbjjz9OQkICX3zxBefOnWP48OHo9Xo+++wzTp8+LZZpEk5CxZ49lBwro/JY7ZRZy4IvoyyjVupVe8dSiKywQkdBeft73WklaWzP2M6JHHNkRkfXfZSwH4ayctLuvoczl4/AWFkptjkdHkv2RFZFqriG2AmLE89YHUikrzsuCjmakSMJeuRh1Je1r8VOU7D8f6eUpGC4KBpIkMmc0mFbsm4d1WeTMBkbL5ZnLUJW8zolJDoCYd5uuCnlaA1G/F3NRaos2s7tDYuTrquXgoQxY8l85NEG9THbC3vP7WVH5o5mr728p0zB86qrEFTtK9XcnAFp5Ey+DfT4ZXKIGGp+ntLCyNWsw2aHras3BNo3i1mu0RD68kvEbPwLWRN03p0NfVER2owMjNXVl5yzvL9d5G3vdUm0X5wiBGDs2LF8+eWXZGVlsWzZMv7++2+6du1K7969xTZNQkQ8OpsI6F2CZvjAWscD3AJwV7hjMBlqpV61d9xVCsK83YD2GW27IXUDd/91N2eN3wKS01bCfsjU7lSdOIGhoICK/QfENqfD0yMgFoBiffssRJZeai5SY9T6EyPd1whRh+Aid0Fn1JFZlllvO2eRR9Dl5JCxcBFnr7kGUxMcM1IRMomOiEwmWDOk3DCni1+cEWcymag6dZrC77/HWFHhcBtthUWmbEj+GfTnzlHx74EmFShsy+iMOut3mXRvM+PtUYmmy9Pk+jxNlc4GwTSRw8yPLXXapuw434+DssjaaiSqobAQQ2EhxrJL19OWSFtJz1bCmXAKp60FLy8v7rnnHvbt28eBAwcYPXq02CZJiIi7Wwb+3ctQj6idTiMIglSMrB3q2lom91UVfggCxEiathJ2QhAEQp5ZQtSPP6AeNlRsczo8g8K6AGAQSijXtr972w/X/sA4zVIMlRHEBWrQ5eSgTU1tUtRme0QukzOq0ygmRE3AyKV/A0NpKen3/5eEceOconCNsbgYt759ce3Ro0l62BOjJnJXn7sYGirdWyQ6FpbNdot+98USKIIgkHbHHZx78imqjh1ztHk2wyKPoB49hojPPiPooYfarPOqqaSXpqM36XFTuBHk3nytbpPRSPFva0iZOw9DcbEdLHQ83QPDABAEA/szbVBcL7JGLil1J7RkfmBx9lqcv3bCUFyMPjfXrmPYG7m3Nwp//zq/06VIWwlnxGnVlfv27cvbb78tthkSYqGvhqIU83P/+EtOR3lFcSz/WLtNvaqPmAAN287kWSeM7QmLA95YHUC4jzuuSrm4Bkm0azSjRoltgkQNPUOCKT+7CKPWl0qtAnU7C25QyBRk5XmASUdsoIai778n751leN0wldDnnhPbPFFYOnppvedkGg0V+/ZhKCig8sgR3Ac0TTvRXrjExRG16psmR/4ODR0qOWwlOiTxQWZd26ISb8C8GW8ymWo5NNVDh6LLyhLDPJthmYPHhHqjDo8S1xgHYQmsiPKMapmDWhDI/+gjqk+fpujnn/GbO9e2BoqAQi5HZQpCRzr7M04zPLJb6zoM6QNKd6gshLxTENiM/oxGSN1lfm5np23x6tVkv/gSPjNnEvzUk3Ydy14ofHzqPG4ymaRIWwmnRLRI28TERG699Vbr7xEREfj6+lp/AgMDOXXqlFjmSYiMIeUwlflyDDJP0ARecr6zZ8csRmaJtE1sh5G2Vt1HbQBxUgqxhESHwVUpJ9Q9CkxK2xT0cEIsi/y4QA+M5RUIKhUu0TGNXNUxEQSB4CVPE/n117g5kUxWe4+kk5BoLZa5W1auGgGBUm0pBVUFtdqEvvQikZ9/hvugQWKY2GpKq3Rkl5idOpY5eUfgwgKLLUEQBPzuvAP/++7F86qrbGeYyHgrQgE4kZ/Q+s7kSggfbH5ukTpoKrknoKoIlGoI7tN6WxpAm5wMJhPKiHC7jiMGepMeo8kc5Sw5bZ2f5ORkBEHg4MGDNm3rjIjmtH3nnXcICjqfXlFYWMijjz7KG2+8wRtvvMHAgQN54403xDJPQmTKN68n+c8AUjd6mytqXkRD1afbMxbJgPamaVtYVUhRdRFg1n2U9GwlHEHlkSPkvPkm5f/sFtuUDo/13pbXvjakfk38lUV//48i/gUgJlBN0EP/o8uB/fjMnCGydeJiNBnJq8yr85znlVfi3r8fglLpYKtqYzKZmqWtm1ORw/aM7WSVte1IQgmJlmCJtE3K1RGiDgHa3zzdIk82I3M3ui8/R5dZvy53e8KSDWcJmmkJXldfTcCCBSgDLw3GaauEqSMBSLHV+9wikXDuaPOus0gjRAwBuX0TqYOfeorYLZvxnjLFruPYA5PBQPnuPRSv+Z2y3bsxlJdjvECGySKNoJKrkAm2cZOdO3eO++67j+joaFxcXAgPD2fy5Mls3LjRJv1bGD16NAsXLrRpn83pd/To0QiCgCAIuLq60r17d9577z2b23Mh4eHhZGVl0bNnT5u2dUZEk0fYuHEjn3zySa1jU6dOJTo6GoCoqChuv/12MUyTcAJMFeXI3Uy4hPrWed5SlTm5OPmS1Kv2jGVXP7WgAp3BiFLuVLLULcYyqVeafMGkkpy2Eg6h+NffKPziCwxFRagvGyK2OR0ab59zuIT8zOrkvcwcvERsc2zGnqw9/JW2DpnrFYQpBuKuMk+7BIUCQeG0ClV250zhGWb+PhONUsPm6ZvFNqde9JmZJE29Abe+fem0/L1G5xo7Mnbw1M6nuCzkMj668iMHWSkh4RyEebvhppRTqTMQ7B5BZnkmScVJDAi6VOLEZDCA0Sj6xkxzScgpA5OJyac2k7MnF2VEBMrQULHNsjuWzEapCFlt4nyjOVgGedU2KqTafzb0vhG8I5t3Xd9ZZjlBmWPmFcqg5usai03Jn3+S/cKL6M+dsx6T+/kRcP99+EyfDti+CFlycjLDhw/H29ubV199lV69eqHT6Vi/fj0LFizg5MmTNhnHWZg/fz7PPvssFRUVrFy5kgULFuDj48PMmTMvaavValGpWvd3lsvlBAcH27ytMyKaxyc5OZnQC77kbr/9dry8vKy/R0VFkZ6eLoZpEk6A130vEv/vSUI+W1/n+UjPSAQESrQll6RetWeCPV1xV8nRG02kFrTdyrsXY5kMGqrNxSskp62EI/AYMxrPa65BPXy42KZ0eHw8qlF57+NsRfuKer4u9jpGB8xBXxYv3dcuIEQdQrWhmhJtCeW6uqOrK48cJW/5cqqOH3ewdbVtMBQVoc/JadLmsEKmINY7lnifS7X4JSTaOzKZYL3PuQs1kbZ1FAzOeuppTg8aTOnmzQ60zjYk5pYhMxlJGH0t6lEj0XSQ+UNr5REupPLIETIfeRRtWlqr+xKbPkFxAFRio+wKj2Dwiaozy7RBVO4QPQqi7Pt+bE7miTNR8uefZPx3YS2HLYAhP59zTy+h5M8/gfNOW1sVIbvnnnsQBIE9e/YwdepU4uPj6dGjBw888AD//POPtV1qairXXXcdGo0GT09PbrzxRrKzs63nlyxZQt++ffniiy+IiorCy8uLGTNmUFpaCsDcuXPZsmULb731ljXaNTk5GYCjR48yadIkNBoNQUFB3HLLLeTlmbOcNm/ejEqlYtu2bdaxXnnlFQIDA8nOzm6w37pwd3cnODiY6OholixZQlxcHL/++itgjsS99957WbhwIf7+/kyYMKFR+wCMRiOvvPIKsbGxuLi4EBERwfPPPw9cKnlQWFjIrFmzCAgIwM3Njbi4OFasWFFnW4AtW7YwePBgXFxcCAkJ4ZFHHkGv11vPjx49mvvvv5+HHnoIX19fgoODWbJkSVP+622OaE5bmUxG5gUpJW+88QZ+fn7W37Ozs1G2sd1XCdsjuLjWedxV4UqoJhSNUkNORY6DrRIPQRDO69q2I+1Hy6S+qsJ8D5CcGxKOQD1sGKEvv4Tc04viNb9TvnuPOfpHwuEMDu1Fde445CVXim2KTRkUPIgAw1UYq8KJDdRQ8scfpN55J0U//iS2aaKiUWlY+5+17Jm1B7Wybl3Igs8/J/ettyn96y8HW3cezdgxRH3/PYGPPNyk9pNjJvPzdT/zv0H/s7NlEhLOSVyQef5m0po34essGGwyYqyooOpIM1PAnYDE3DKMMjmma6cS8cEHyNTtX9f2QgmzSM9mRoDWQe6bb1H8yy8UrlrV6r7EZmhEV/MTeRlpRXXL/bQXjFotiRMmkvnY4xjKnGMNaqyowFhRUcuZbNJqzcdrZA9MBgPZL7wI9TmcBch+/gVMBgNKmRI3hRsqrcncr9F4vl+drlm2FRQUsG7dOhYsWIC6jvuEt7e3+TUYjVx33XUUFBSwZcsWNmzYwNmzZ5leE/1rITExkV9++YU1a9awZs0atmzZwksvvQTAW2+9xdChQ5k/fz5ZWVlkZWURHh5OUVERY8eOpV+/fuzbt49169aRnZ3NjTfeCJyXPrjlllsoLi7m33//5cknn+Tjjz8mKCio3n6bipubG9oL5Cc+//xzVCoVO3bs4P3332/UPoBHH32Ul156iSeffJLjx4/z9ddf15JYvRBLmz/++IMTJ06wfPly/P3962ybkZHBVVddxaBBgzh06BDLly/nk08+4bmLCgR//vnnqNVqdu/ezSuvvMKzzz7Lhg0bmvw3sBWi5eb16NGDv/76i8GDB9d5fv369W1Wc0LCMXw3+Ts8lB4dRhrBQrS/hqMZJe1K+9EyqTdqAwj2dMXDVdqwkbA/daVKKYKDCXrsUTyvbF/OQ2dnYKcotHlXkC1Atd6Ai0Iutkk2I8FahExDxZ8HKN+yVSpCBoR7NDzx14wZjUmnw7VHD8cYVAcylQq3XtJcVEKiqVh0bcvKzPJmdUXa+s67FZ+bb8Eltu3dBy2FgGM6UHCBJco2RB2Cm8Kt1f35zr4FuZ8vnhMntrovsQnUeCEYvDDJi9mVdoJw7xGt7zRlF+xaZpZImPhC4+1P/QFJ26DbNRA5rPXj10PF7j3oUlMpr6xE5v5/dhunOZzqb5Zeidu5A4Wv+Z6T/+mn5L75Ft7TbiDk//6Pin37L4mwrYUJ9NnZVOzbj/+Qwfi7+XN66DCyCwuJ/u1XXOLM0dRFP/+MzwXOxMZISEjAZDLRtWvXBttt3LiRI0eOkJSUZHWIrly5kh49erB3714G1RRtNBqNfPbZZ3h4mO+xt9xyCxs3buT555/Hy8sLlUpljXS1sGzZMvr168cLL5x/H3366aeEh4dz+vRp4uPjee6559iwYQN33HEHR48eZc6cOVx77bUA9fbbGAaDgW+++YbDhw9zxx13WI/HxcXxyiuvWH9/7rnnGrQvJCSEt956i2XLljFnzhwAYmJiuPzyy+scNzU1lX79+jFw4EDAnLlfH++99x7h4eEsW7YMQRDo2rUrmZmZPPzwwzz11FPIZObY1t69e/P0009b7V+2bBkbN27kiiuuaPLfwxaI5rSdN28eCxcupE+fPlx99dW1zv3222+89NJLvPnmm+IYJyEq+tRTpN8yBVWINyFfbUeQ171491R5Otgy58BSsKc9RtoaqwOIC+s4E2EJ8bCkSl28867PzjYff+tNyXHrQAI8XNC4KCir1pOSX2Fd+LdlssqyOF5wnDP5eYCG2EAN3tNuwCUmGtdu3cQ2z+nxuvpqvC6aHzozlkifjraRLCFxIXE1zszsPG/mXT6PGK9LHbMu0W1TF1VnMFKYmc2Q3GSiPeznHHM2LHN0Sz2R1qIZNQrNqFE26csZUAshlFHMoXNnuLGXDZy22nI4ucYsk9AUp+3x1XDoG1C62dVpq75sCBErPsVQWIggazs1VfS5uTZt11SaKiVx4sQJwsPDa0Wwdu/eHW9vb06cOGF12kZFRVkdtgAhISHk5DScbXzo0CE2bdqERnPp2joxMZH4+HhUKhVfffUVvXv3JjIykjfeeKNJdtfFe++9x8cff4xWq0Uul7No0SLuvvtu6/kBA2rrmzdmX1FREdXV1YwbN65J4999991MnTqVAwcOcOWVVzJlyhSGDav7M3HixAmGDh1aa842fPhwysrKSE9PJyIiAjA7bS+kKX93eyCa03b+/Pn8/fffTJ48ma5du9KlSxcATp06xalTp5g6dSrz588XyzwJEdEe2kllNugriup12HZkLPII7SXSVmfUkV5q1q82agOsTmkJCXvRYKqUyQSCQPYLL+Ixbpx0D3IQgiAQGajnZMEptqV4ER9U9y56W2Jn5k6W7FqC3j0e8m8lNlCDq7svrjXznY7O4dzDfHXiK4LVwSwasEhscy5Bl5VF0U8/4da3b5N0K1NKUpi+Zjrd/Lrx2cTP7G+ghIQTYtlwS8mRcX/fhSjaScFcgLSCCganH+H+gz+gW3wEvvxCbJMcglSErGECXDtRpj1JYlEdUiAtIXwwCDIoTIaSTPBspNBdyg7zox0dtgCCUol66FC7jtFcuhzYD4Dgdj4C3O/WW/GdPRtqir0qAgKa1Jc8wA+jyYhMkBG70SzLJLiel2n0vv76ZtkWFxeHIAg2KzZ2sWyoIAgYL5BvqIuysjImT57Myy+/fMm5kJAQ6/OdO3cCZkmHgoKCOuUcmsKsWbN4/PHHcXNzIyQkxBqtauHifhuz7+zZs80af9KkSaSkpLB27Vo2bNjAuHHjWLBgAa+99lrzX0wNLfm72wNRv0m/+eYbvv76a+Lj463O2ri4OL766iu+++47MU2TEBGVpoKwYQUEjA1rsF1aaRr3/30/d/91d4Pt2hvWSNvc9hFpm1aaht6kR2ZywaT3tOqhSUjYi8ZTpUzoz52jYt9+xxklgcxrO+4RK9iQtlpsU2yCZaFr1Prjr3HB29021YjbC8XVxaxNWsvW9K0NtjOUllJ91kaL4WZQsW8/ee8sI++dZU1qn1ySTIW+ot7CahISHYEwbzfclHK0BiMpDRTMrThwgJylr1O2teHPvzNhkUYo1PigqSc9tz0yLnIcC/svZEzEGJv2qy8oIO+DD6k8esym/TqaSE+zMzurItU2Hbp6QnAv8/OUnQ23LU6HolQQ5GZnbwdD5u6OzN29VrSkoFKZj6vMcy73gQNQBAc3WNxN7ueHvlc8J/NPklaadr7fC5yOQjNrLfn6+jJhwgTeffddyssvnRcUFRUB0K1bN9LS0ki7oDDf8ePHKSoqonv37k0eT6VSYbioLkf//v05duwYUVFRxMbG1vqxOFATExNZtGgRH330EUOGDGHOnDm1nJJ19VsfXl5exMbGEhYWdonDti4asy8uLg43Nzc2btzY5L9DQEAAc+bM4csvv+TNN9/kww8/rLNdt27d2LVrV62I6B07duDh4UGnTp2aPJ6jEH37c8aMGfzyyy8cP36c48ePs3r1ambMmIHRaGTNmjVimychAgptJp4RVXiNGdRgO5VMxaa0TezK3IXO0Dxx8LZMZ3/zTbaoQkdBubaR1s5PUVURfq5+oA8ABGKlSFsJOyNWqpREw0R52XjhIzIWHUBjdSCxgWq0qamUbduOPq99FytpKpaordSSVAzGuhcEpX9v4vSQy8h89BFHmgaAMjgIr+uuRTN2bJPaW5z0tkohlpBoi8hkgrWY7KGMDPae28vJgksjzco2bSL/o48o3SBeocHmkpBTxh9Rl/Htg8vwvXWe2OY4jD4Bfbit121cFnKZTfvNeW0puW+8QcHKz23ar6PpERALQLE+s5GWzSCyJrujMadtyi7zY0hvcLGfrFTRTz+T/8kn6DJt+BodhCCXE/TYozW/XOS4rfk96LFH0WHAhAmZDd1j7777LgaDgcGDB/Pjjz9y5swZTpw4wdtvv83Qmqjl8ePH06tXL2bNmsWBAwfYs2cPs2fPZtSoUVZt1qYQFRXF7t27SU5OJi8vD6PRyIIFCygoKGDmzJns3buXxMRE1q9fz7x58zAYDBgMBm6++WYmTJjAvHnzWLFiBYcPH2bp0qUN9msrGrPP1dWVhx9+mIceeoiVK1eSmJjIP//8wyeffFJnf0899RSrV68mISGBY8eOsWbNGrrVI0d2zz33kJaWxn333cfJkydZvXo1Tz/9NA888ECTHM6OxuksSkhI4LHHHqNTp05c38wwdIl2Qt4Z86NfbIPNAt0DeXzI47w3/j3oQBJybio5Yd7mNJD2EG3bP6g/a6//i+LEOwGIawdalhLOTVNTpZraTsI22GXhIyJWp63Wn7hAD0r+WEfa/Plkv3RpGlhHJEQdgkqmQmvUklle9/+5a9cuYDRiLC1rduXm1uI+aBChL7+M/x1Nk+qy/H9LKcQSHR1LxtSvST9w6/pb+fL4l5e0UQ+/HO9pN6AZNdLR5rUYy5w7JtDDGsUn0XJ8Zs7AtVcvNCPbtr7tmKiBlCfdQ3nyXegNNnJoWaQOGnXaWqQRGpfwaQ0FK1eS8+prlO/ZY9dx7IXnlVcS9tabKIKCah1XBAUR9vZbeF19Nf5u/sT7xBPgbru5f3R0NAcOHGDMmDE8+OCD9OzZkyuuuIKNGzeyfPlywJxuv3r1anx8fBg5ciTjx48nOjqab7/9tlljLV68GLlcTvfu3QkICCA1NZXQ0FB27NiBwWDgyiuvpFevXixcuBBvb29kMhnPP/88KSkpfPDBB4BZkuDDDz/kiSee4NChQ/X2aysasw/gySef5MEHH+Spp56iW7duTJ8+vV5NWZVKxaOPPkrv3r0ZOXIkcrmcVatW1dk2LCyMtWvXsmfPHvr06cNdd93FbbfdxhNPPGGz12dLBFNTVZLtSGVlJd9//z0ff/wxO3bsYMSIEcyYMYPrr7+eoIs+XE3h3Xff5dVXX+XcuXP06dOHd955h8GDG08ZWLVqFTNnzuS6667jl19+afJ4JSUleHl5UVxcjKdnxyyOZUtKF/VCaczE5d4fEOJsm4rTXrjlk91sO5PHy1N7MX1QhNjmtJqjGcVc8852/NQq9j/p2GqMEh0Pk8FAwrjx6LOz69a1FUARFEzsxr8kTVsHsj8tg7l/m6tJ75q5C42q7Ubd6ww6Bn01CIPJQNmZR1ly1TAmJ2yjcNUqvKf+B7/bbhPbRKfgP7/+hzOFZ3h33LuM7FS380aXnYMyKNDBljWfOX/M4UDOAV4e8TJXRV8ltjkSEqLx/pZEXvrjJJf1yqDY9WfGRYxj8aDFYpvVam5aup6duXrevak/V/cOafyCdkBBVQH/Zv9LtHe0tCFVD0ajie5Pr6NKZ2Tz4tFE+bdMD7QW5Xnwak0Rv4eSwN237nbLBkPeKZjxNXS1T+FOk8lE4ddfU7bxb0KXvobCx8cu49RHVVUVSUlJdO7cGdcLNGZbgslgMEuk5eaiCAjAfeAAaZ4vYXcaeg831Y8oaqTt3r17ufPOOwkODubNN9/kuuuuQxAE3nvvPe66664WOWy//fZbHnjgAZ5++mkOHDhAnz59mDBhQqNV3pKTk1m8eDEjRtig6qNEizFWlJK+TkfS+kD0CudfpImFRdf2bG770M47k1MKQExg23XSSLQdGkyVAjCZU6WkiZxj6REcjElvvgccPHdGZGtaR1ppGgaTAcFo1uqODdTgO/sWYtb+LjlsL8AiJWCpTl4XYjhsDWXlGIqLm3WNVR7BK8oOFklItB3iauZyRbld+WPqH+3CYWvU6Vj4+WO8v/FVYoylYpvjMA7mHGTh5oU8vPVhsU1xWmQygc7+Nq43ovY369qGXwbl9Uh1acuhuua9GGG/AmGCIOA7axYRn37icIetrRHkctRDBuN1zdWohwxGkMsxGY0YSkrQ5UiSaBLOi2hO2969ezNt2jT8/PzYuXMnBw4c4MEHH6wlJN0SXn/9debPn8+8efPo3r0777//Pu7u7nz66af1XmMwGJg1axbPPPMM0dHRrRpfonUYs9NwC3FB6QGKyLo1SC7kXPk5fkv8jb9S2o4mli2ICagRD28H8gjTfpvG8pOPIChKrBN9CQl7U1+qFDIZvvPm4nnlleIY1oFxVcpRGs3/H/syTotsTeuwOPD01f6AIN3b6sHqtK2RFnAWStev5/SQy8h4sGnOpuLqYgqrCwFJ01ZCIr5G5upsbnmj6eL63Fy0Nky3tRfZh0/grqvCp6qUyC6RYpvjMOSCnB5+Peju1/SCSM3FpNNRsm49FfvbbvFXH78kXIJ+5c9kG65H79gKt62HgC51n1ep4YHjsPBo/ZG4Eo1jMqFNTUWfk01uSRZOkIQuIXEJCrEGPnXqFNOnT2fMmDHNqozXEFqtlv379/Poo49aj8lkMsaPH8+uXbvqve7ZZ58lMDCQ2267jW3bttnEFomWoejcnai/DzW5/cHcgzy2/TH6BPRhfOR4O1rmXES3k0jbwqpCa4EKk2GqtXiFhIQj8LzySjzGjauVKuXavRtyD0lXWSy8laHkkciJ/ASxTWkVSSVmp61RG4CHq4IADxeRLXJOLOm2Fid3XZj0es49+39U7N1L1KpvkHt52d0ubZrZiaQIbFqUr8X+IPcg3JXudrNLQqItEObthptSTqXOQEpBBTEBGowmIzKhdqxQwVdfkf1/z+FxxRV0eudtkaxtGik+Ydw+aQmDZMWscOs49/NR4aMYFW5fzdm8Dz8k751lqIcNJaKBICtnRuaWhMp3J0cKXIBZNuq0CbF1ggDe4bYZrw4MpaVU7N+PeuhQZC7t830vyOWYNO6UGSooqy4hwLNjSJ9ItC1Ei7Q9e/YsXbp04e6776ZTp04sXryYf//9t1WRtnl5eRgMhktkFYKCgjh37lyd12zfvp1PPvmEjz76qMnjVFdXU1JSUutHQhw6e55f8HWknTGLPEJKQQVave2qODoatVLN5xM/x6NkNphUxAVKzjIJx3JxqpTksBWXMLU5gim1NEVkS1qHJd3fqA0gNlBDyW+/kThxErnL3hXXMCfD4rRtKNJWUCio2LsXbVISFfv2OcSuwIULif9nF3633dqk9hb7JWkECQlzurhlE/71fa8xYtUIfj7z8yXtXOLiQBAwljt/AEJibhllKneEnn3ENqXd4T1linnTvE8fTDasTO9IBgYNRps/ElN5D9t3XlkE+upLjztg3Vu2eTPpd91Nys232H0sMakK9ibXS0Du0jrNXAkJeyGa0zYsLIzHH3+chIQEvvjiC86dO8fw4cPR6/V89tlnnD5t/9TI0tJSbrnlFj766CP8/f2bfN2LL76Il5eX9Sc83H47XB2OZn5ZR3pGIiBQoi2xpiZ2BII8XVCr5BiMJlILKsQ2p8Wo5Cp6+PUhO8s8yZEibSWchfI9e0i5+Rb0hR3nvuIMxPmaC2/kV6eLbEnrsEbaVgcQF6ih6vgJtMnJzdZJbe9YpATyKvMo1davExnw3//S6b33cB8yxEGWgdzbG0UT54aWSFvLRrKEREcnLsg8n8uvqKKouqjOaHr3fv2I37uXiE8/cbR5zSYxx+xYtsiTdRT0Rr3dx1CGhRG7eROB//0vQlOiS52QcVHDqM65itwcG8ssfjMTXo6C5IsygauK4bV4+Pbmuh26NsKk1aIICkI91H6auc6A1qAFwEXePqOJJdo+TnFnHDt2LF9++SVZWVksW7aMv//+m65du9K7d+9m9ePv749cLic7O7vW8ezsbIKDgy9pn5iYSHJyMpMnT0ahUKBQKFi5ciW//vorCoWCxMTEOsd59NFHKS4utv6kpaU1y06J+kmbPICUMd2p/GtVk9q7KlwJUZvTGBpKr2xvCIJglUho67q2yXkVGIwmPFwUBHlKX5YS4mMyGsl+7nkq9u0j773lYpvToegXHAdAJecwmtpmxI3JZLog0taf2EANfnfMJ/yTj/G+4QZxjXMyNCoNAW4BQMPFyDwnTsBj7BjkGufc2LPYLkXaSkiYsWROaSvNGx91RdMLSiVyjfM7QQtWfkGvj16kd26CNdOtI1BUVcSgrwYx5Zcp6Iw6u47V1gu/Rtc48/PKtBRX2PBv5eoNmCBlZ+3jqbuhPAfOHQWF/dZO3lOnErt5E/733G23MZqK0Y5R2NUGs+PbxSRvs9HeEs6LLd67omna1oWXlxf33HMP99xzDwcPHmT58uYtllUqFQMGDGDjxo1MmTIFMP+RNm7cyL333ntJ+65du3LkyJFax5544glKS0t566236o2gdXFxwaWd6rqIiclopCKtEqNWQHD3bPJ1nb06k1meSXJxMgOCBtjRQuciOkDNkYziNq1r+/OZn/k3LR9B4U5MYGSrCxFKSNgCQSYjeMnTFK/+lYB7F4htTodicKdYTHvlCIKOlOIMOttRq81eFFYXUqI1yyYZtf7EBXqg8PVFM3y4yJY5J1FeUeRW5pJckkyvgF5im0PJuvWUbfobjwkT8Bg7tknXWBxSUqSthISZ+JpI28IiL/BwvmKDzaH49zV0PX2YMPcoYjpQRlhySTJ6o55yfTlKmdIhY1adPImhuAT1kMEOGc9WqF0UBHnryatO5UBmKmNiY2zTceQwOPQ1pFxUmye1xokbaf95hSAICK7iyQaoVCpkMhmZmZkEBASgUqlsvl6sqKzAv8CAoMuiIkyOXC1p00u0HpPJhFarJTc3F5lMhkqlanFfTuW0tVBdXc3ff//N6tWr+eCDD5p17QMPPMCcOXMYOHAggwcP5s0336S8vJx58+YBMHv2bMLCwnjxxRdxdXWlZ8+eta739vYGuOS4hAMozyVidB7aEgWq3k3/EoryimJH5o42PSFsCTHWYmRtN9L206OfklySjEx1G3GBdtCBkpBoIe79++Pev7/YZnQ4gjzdEXR+oMphT/rJNum0za/MJ1QdSkZRFZhUkuxLI3T27Mzec3sbzZbRpqRQtmMHrt264d6vn93sKdu+jeLVv6IIDmmS01Zv1JNaai5cZtHolZDo6MQHmSNtM3I8cPGA9NJ0dAYdSnlt51/V6dPkLV+OoFAS9uorYpjaKD6PPMbSpz/mn+DuPN+BIm0dLftS/PvvZD64GFVsDNG//dbmAjmEoC9xl51kY7LGtk5bgIx9oKsCZY3z1BJ5azlvBwwlJcg9mx5EZS9kMhmdO3cmKyuLzMxMm/dvMpnIKs+itAJc9SDXapGpnT8DQKLt4O7uTkREBLJWyL+I5rStrq5myZIlbNiwAZVKxUMPPcSUKVNYsWIFjz/+OHK5nEWLFjW73+nTp5Obm8tTTz3FuXPn6Nu3L+vWrbMWJ0tNTW3VH0zCfgj5Cbj56nCLDgVN06tDX1iMrCNhScVpq/IIOqOO9FKzbqVRG2DVP5OQcEb0+fko/PzENqPdIwgCalkopfpKkgoKxDanRcT5xPH+6J8Y89pGXJUy/HPTKfhjL259+uLWU9qcupgRnUagVqoZHNJwZFXByi8o/OorfG6aaVenrff116MMDkE9vGmLYQGBTyd8SnJxMkHqoMYvkJDoAIR5u+GmlFOp1eAld6fKUEFaaRrR3pdqfpb+sQ6Zuzsmg8Ep0+QzgqL4ovtEfNUqfNUtj5Rqa1i02R0l+6IZMQKZpycucXEYy8udVg6nPgJdw0nSniSh8KztOvWNBk0wlJ2DjP0QNRy0FZBxwHzeTk5bk8nE2WuvQ+7hQad33kYVFWWXcZqKSqUiIiICvV6PwWCwad8pxSm8fOhlgqtcee/aFSgkh62EDZHL5SgUilZvQonmtH3qqaf44IMPGD9+PDt37mTatGnMmzePf/75h9dff51p06Yhb+EX97333lunHALA5s2bG7z2s88+a9GYEjYgr6b4nH98sy6zTCY6WqRttL9F07Yck8nU5nak00vT0Zv0CCYVJr2nFI0m4ZQYq6rIfv55Sn5fS/Sa31CGhoptUrvnMs1/+eXfbNRRzfsucCYScsoAOTEBGip27CDnlVfwmDiRTm++IbZpTsfo8NGMDh/daDv15cOpTkjApWtXu9rjPmAA7gOaLrUkl8npF9iPfoH2cyRLSLQ1ZDKB2EANRzKK8XcJI73iDEklSZc4bV1iYgh48AHceokvjVIfibkdswiZVau7pmCkvZF7ehK36e82G+UY6RlFUh5kVaTarlNBMDtmj/1kjq6NGm6OujXqwCMUfKJsN9YFaJOT0efmYiguRhESYpcxmosgCCiVSpRK20p1pGankqXNwj+wFxopOEPCSRHNafv999+zcuVKrr32Wo4ePUrv3r3R6/UcOnSozTmfJGxD+c6dGNJccevWiebcji2TifpSr9ornf3VCAIUV+ooKNfip2lbOsuWyaCh2h+QWYtWSEg4E4JKRfXZJIwVFZRu3ozvTTeJbVK7Jy7QG8jmbF7b1es+k1MKQGygBqV3KJrRo3EfNFBkq9o2HmPG4DFmjNhmSEhINJG4ILPT1pUQ4EydGXGCXI7//PmON64JGMvLyf/kU3LVMWAydagiZHCBPIIDZV/aqsMWoHtALJvzoERv4xR+q9N2B/C/8/q2kcPMTl074NK5M/E7tlN16jSydl7Hx9GbExISLUE0p216ejoDaiIZevbsiYuLC4sWLZIcth2Ygg2HKDvjS3CcAZ9mXBfoHoi7wp0Kff2pV+0RN5WcUC83MooqOZtX3uactpa0K0N1AK5KGWHebiJbJCFxKYJMRsizz2AoKmpW9J1Ey4lp49Ivt6y9heT8CmSqa4gLjMdzbD88J04Q2yynJr8yn6TiJOJ94/FUiaehV3XqNCatFtcu8QhNLBixLnkdpdpShoYMpZNHJztbKCHRdrBsxuur/YHzzpG2QtmOHeS99x49fAJh5P86lNP2QgkzMbS6DUVFVJ9Nwr1/28lgGBzWhfdOgE6eS5VOi6vSRlIanUdBrxshpkZj3TMUwodA9Cjb9F8Pcm/vNlcQriVcuDlR+tdfFK/5HY/x4/G65mqRLZOQOI9o4q4Gg6FWBTWFQoGmjWnXSNgWl8hQXIOUuPRp3heEIAhWiQSLI7CjYKlim5jT9pwblsm7URtATIAGmUzasJFwTlxiYiSHrQOJ8HPBLfxjzro+Spm2bd3bdAYdR/KOUGQ8jcnoIsm+NJH5G+Yzb/08DuYcbLStSadDm55uFzvyP/mY5GnTyPvwoyZf882Jb3h217Mczj1sF5skJNoq8TW1CkqKzaEY9c3RTVotFQcOUPTTzw6zrSko/PzwuPJK/okeBIJATGDbjQJtLhYJMzeFG4HugQ4du/LIUc6MGk36/fdj0modOnZr6BMchcmoRBAMHMi0oa5tQDxM/Qj6zjT/3v8WuO1P6D/bdmN0YCzyilFeUVQdP07punWUbd0irlESEhchWqStyWRi7ty5uNSE3FdVVXHXXXehvigt4qeffhLDPAkRCFz2S4uv7ezVmeP5xzteMTJ/NVtP57bJNGLLl6RRG0BsJ8mxIdE2MJSUULF3Lx7jxoltSrslJsALues5BEUZ/2adYURk24m0kQkyvrrqK2789FdMek9ivFWYdDoEG2uwtTeivaKp0FWgNTS8QK88eJCUW29DGRBAzPp1NrdD5uqG3MsLt149m3zNZSGX4a50J84nzub2SEi0ZeKDzJG25/I8UWnMm/V11WDQFxWRctMskMnwnHCl06TIuw8YgEu//ix/ah3ojR0q0vbClHGZ4NgYL9euXZB7eiL380OXnY0qPNyh47cUhVyOyhiITpbBvoxTDIu0r/66vSj64QfKtu/A58ZpqIfZp9CZs2AymWq91zVjQxFULqiHDRXXMAmJixDNaTtnzpxav998880iWSLRHoj3ieeU9ynUSueY6DmKthxpa3GwG6sDiJOi0STaALqcHJL+MxVDURHRP/+ES5zkpLEHLgo5nmWzOFckw1AdILY5zUIuk+OjiKa8oDcKmYDf/h2ceuIJPK+5htAXXxDbPKfllZGvNMkxoIqJwVRVhaG4GENJCXJP20ophDz7DMHPLAGjscnX3N33bpvaICHRXgjzdsNNKaeyyhcXBEq0JRRWF+Lr6lurnTIwENdevVAEBGAoKXEapy1AZlEl1XojKoWMTj7uYpvjMKzRhyLofApKJVE//IAiMKDNySZ6K8PINWVwIj/Rth0bjZB9DA5+CUE9wKezWdNW1rKi7Q1RvOZ3Kv75B7c+fdq901Zv1DOj6wySS5KJ8IzAxccFt549xDZLQuISRHParlixQqyhJZwQU3UZgsIFWlhE7Naet3Jrz1ttbJXzE+Nvnti2tUjbwqpCiqqLADBq/aUUYok2gTIwELc+fdCePYuxqkpsc9o13byGkJmVQ1qBQWxTms2Zmk20KH81+rP/miNtXduW5rijaWokl9zDg+jf16CKjESQ2Sf6SxAEkNt+ISwh0dGQyQRiAzUcyTDgrQqkUJtNUnHSJU5bgM7ffyeChfVTdeIEyrAwEnLN3/XR/mrkHUjGS4wiZBeiDHKsJIOtCFVHkFu2h9TSZNt2vPMt+GtJ7WOeoTDxZeh+rU2HCnzwAUo3bsTjivE27dcZUcqV3N//frHNkJBoFNGcthISF5K/5G4K1+3GZ1xv/F/7QWxz2gzRNalaqQUVaGsiAdoClh18k84bTCpia4pVSEg4OyHP/R8ytRpZE4sUSbSM6AA1G0/C2TZWjOy3xN9YfzoJQelFXGAwATfdj/e0G4COs9i3Ny6dxXEi1EVBVQEyZHi7eottioSEUxIXpOFIRjGDPOZxQ/9ouvh0EdukRjGZTKT/dyG6zExyFz4HKDqUNALU1vkUE5PBQPWZM7h2bRtSA/G+MRwqg9wqG+quH/8V/nrm0uMlWfDdbLhxpU0dt269euHWq5fN+mtrGLVaqo4ew6TVor5siNjmSEgAIhYik5C4EG1yGvpKOchaF41kNBkxmpqe1tjWCfJ0Qa2SYzCaSC1oO9G2Fv0gQ3UASrlApF/HSTmTaNsofHwkh60DCPExovTezc68H8U2pVl8e+pbtuR/hNw1k9hADYJMhqpTJ1SdwsQ2zanRGXTM/mM2I1eNpERbIooNmY8+RvLMmyjbsaPJ13x+7HNGfDuCpfuW2tEyCYm2S1zNpry2tDtDQ4eiUTXs/DRWVzvCrAYxFBUhqJQIgsARdQgAMQHOI9ngCCyRtmLII1jQ5eSQeMWVJM+YiaG4WDQ7mkOfILNsViVZ/8/efce3Ud//A3/daW8vSd7bjrPIXmxICJuyoVD2KCMFfqwyyigbSmnoF8qmzABhz4ZASMIK2cw4w3tvW9Oad78/ZCkx8ZBtSSfp3s/How9q6XT3VuJId+97f97vyOyQ8wOr/gqAH+LJgcdW3RLYjoxZnaUOrfbWQbkD2+erUX/OOej4J32vk/hBSVsSF8wHMihc0gn9seMf7rNszTIsXLEQ2zu2RzCy+MYwTKjatrozcZK2wQnCnCcDhekayCT0UUQSC8/zsK5ahY7ly4UOJSkZUzgos95HC/NBwtyI43l+b69uavsyJjKJDI22RvS6e1FvqR91+85//x9qzzoLnqbmiMXg3LwZ/du3j6mHYvAGZKYmM2JxEJJMys2Bz8E97bYRt+P9ftSeeRZ2zZkLb1tbLEIbljQ1FSWffIKSNV9ilzWQDCsR0ec5z/N45qhn8PAhDwvWHgEApEYjWK0WrEIB9549gsUxFgvzByqCJXY09nVPfIf13wPWlhE24AFrc2C7CeI9HnQ++ST6f/sNPD9Ukjj5PLz5YSx9dyne3bO3QEA9exYkaWmQ5eSI5s+BxD/KlBDh8TwkjmqoMryQTxn/MgQf50O/rz90ESUWwbv/1Qm0jNioMsKsKIPflY0ys3hOhEnycO/eg+br/h+6n34G/T/+KHQ4SWd+bgl4XgIwXtT1RS4xF0297t5AlSjPBJK21ha0P/ggrKtXCx1aQggmB4I39Ubi+P57uH76Gc6NGyN2/Lxnn0X2Px6B8oADwn5NMNYiffy0bCAknpSbA5W2Nd3deH/Ph3jp15eG3I6RSMB7vYDPB9evv8YwwuHJTKZQix4xtUdgGAZT0qfguOLjoJQqBY0j5/HlKF2/Duq5cwWLYyzMWgMYvwG8X4FtLaN/l43K3h7Z7Ubg2LQZXf/3BBqvuAIQSbKS53lIGemginJpdjbKvvsWucv/lXCD8Ejyop62RHiOLsBlAcAAacXj3s2Nc2/ELewtyNGJaxlqsNK2JoEqbc+bch5+q5yBKksjSkV0IkySh3JSOVLP+SMkqWlQTJ4sdDhJx6RTg/FmAPJ2/NC4E8WpeUKHNKrgDUPOawADOTJqdqDn5VegbWqGfulSYYNLAIX6Qmxu2xzWjde0iy8C53BCe/BBETu+orgIiuLwk68+zodGWyMA4Yb1EBLvclJUUMkkcPH9uPP7ByFhJDh38rmQDTF4OOveeyFNTYE0O1uASAN4nw+QSMAwDPqcHnTZPQCAogxxtUeIF/HUwzxcFf6/Y1N1P7wzIrACQ2uO7HYjkGg10B21BLLs7KgN+ow3Tx/1NHycb9BjlKgl8YiStkRw7u1fw16phTIvBRqZatz7KU0tjWBUiaMk1B4hcSptgb0T1kvNNISMJKbMO+8UOoSkxTAMNGwmHGjHLx17ABwldEij2tsawYS8VDV006cBF5wPRQUl9cMRrHQJDsAZSTwkwZvtzfBxPiglSpg1E79gJiQZsSyDUpMWvzT7UKGfhwMyi9Dv7x8yaauaNlWACAfre+89dD39NNIvuAC1h58IAMg2KKFRiOeSeU39GrQ4WrAoa1FcXVt529ogy4z/VjTlRhM2VTdEZpBqwYGAPjswdGzIvrZM4PmCAyd8KNXMmcj9v/+b8H4SjZQd/t827/WCke3/WUVIrInjNgqJa84fvkHHT3p0V44/YStmxQPtEWo6HQnRe8fLeeHxe1AVTNpSpS1JEpzLJXQIScWkzAcAVPdFYIlhDASTjcF+tuo5c2C+9VaknHKyoHElilB7BEvs/7773nsflk8/ha+3N+zXhAb1GArBMnQ6TchwykxaAAwO1d+KOxbdAb1cL3RIw7J//TV8La3g+l2o7gisYBNTP1sA+KD6Azyy+RFsbt8sdCgAAr1WGy6+GFVHLoanqUnocEYV0WIaVgIc8/DAD7+vAB34+ZiHAtuRiPG2d6D29DOw57DDwXOJMVeBJDc6yySCkxWUQj89HZq5Mye0H6/fi2d/fha3f3s7vH5vZIJLAEUZGjAMYOn3otvhETqcUX3b9C3mvz4f7rQXwTJ7k86EJCpvSwsar7wKzTfeKHQoSaXIUAgAaHM2CBtImELtETxGGkI2DoUDf98N1gb4w5iE7evpgfWzz+DcsmXCx+58/HG03HAjPNXVYb8m+Pct5HR1QhJB2cCKqt0doyexrKtWoe2ee+CuEeZmXc6jjyL3P/+B4cQTQkk3MfWzBYCFWQtxVMFRmJI+RehQAACMXA4M3BhzboqPRPJItNo+KHNfwVbXPyOzwyknAWe+AuizBj+uzw48PuWkCR/CvWcP/LaRhwUmm1d3vIo/fvJHrNy1cr/npOlpcNfUwN/TA09NjQDRETKYeNZ6kLilPXMZtGcum/B+pKwUL/zyApw+Jy6ZfgmKDePvj5tIlDIJclJUaOrtR02nAxlahdAhjajeWg8/7wd4KfLS1FDK6O4wSWx+mx32b74BALhraxOyB1s8mmIsxVfdgNU/0uTk+BEcSsW5jSjXMvA0NUGWk0P90cKUrcmGnJXDw3nQ4mhBnm7kPsa9r69A15NPQn/ccRMaUsN7vdAtPhL9v/4G5Rj6Uwcrq4PJZkLI0MoHBs7uabfB6XXC6rEiUzP0MvfeN9+C84cfoJhUMaYe05HCKpXQHXkEAKD6y8B3j9gqbc+dfC7OnXyu0GEMYr7lr2A1GsgE7HccruIMHWS6HXDxEri8Hihl8onvdMpJQMXxQP33gaFjWnOgJUKEKmybb7oZ7upq5P3nP9AecnBE9hnvKrsr8Wv3rzgy/8j9nmOkUuQ9+QTkBQUJ8TtHkh9V2pKkwTAMCvQFAIRZXimkvcPI4r+v7flTz8efi16Eu/PogSVzhCQ25aRyZN19F4o/eJ8SthE0P2cSAMDP9sHuie/PNq/fiyZbYNkm5zGitOE3VC85Cg3nXyBwZIlDwkqQrw+0xAhnGJlm0UIoKiogLy2Z0HEZmQyZd96JopVvgdWEv/IjeJ5RpKd/84SMpMwUqLStc27CghULcOP64Vel6I87FmkXXADl5IpYhTes6oEBvyW0IkxwirKyhEmezcgsgr/zD+hvvBDNvf2R2zErAYoOAaafHvhvhBK2nMMB3usF/H4o46CvdKyMduNVs2hRwvzOkeRHlbZEULzHDb6rBmxmeUS+fIoMRajsqQzrgi+ZlBg1+Hp3Z0IMI2MZFh29KvDeNJSaaAgZSQ4pp58udAhJZ2pmFnifBozUga2tu3FYwWyhQxpWo60Rft4P3i8H79PD5OuBVSaDvLBA6NASSpGhCFV9Vai11OKQ3ENG3FY9dy6KP3g/RpHtjyptCQlPbqoKKpkEbrcBMgRuePA8P+QqhNQzz4x9gAB4nw/NN9wIzcIFMJx6KrwSKRp6nADENXuh19ULt98Ns9ocl6tEeL8f9m++BWe3QWo0QT13DhhJfK3Yk0okyJcuxU6HDfU9LpSYDEKHNCJWo0HJp5/A29oKaWqq0OHEBM/ze/vSU4sjkgCo0pYIyvPj19h15CmoOWgKEIEhWsGLp3CmTyeTvZW2DoEjCc+e9oEhZFRpS5KQt60NnobE6MMaz+RSFjLeDADY1rJb4GhGFmqN4DHCrFci+8LzMWnbVphuukngyBJL8OIplt/hvt7eMQ/xtLgt6HH1AKALPkJGw7IMSk1acJ4MMGBg9VjR6w5/6F8s9G/fDtvnn6Nz+eNgJBI0dDvh53joFFIYdfHddiyS3q96H0e9cxRu//Z2oUPZj3X1auyaNx9NV1yBlhtvQsMFF6Bq8RJYV68WOrT9BFtqBIfZJQJZVtboGyWJblc37F47WIYNrfAZiuWTT9F6993wdXbGMDpC9kdJWyIoz46tAJiBJvMTv6Mr5PRpIQWXbsV7pa3FbcH1665HpWslAJ7aI5CkY/tqLWqOOx6tt/9tzIkgsr9UWS4AoLIr/AFRQhhqCBkjk0Gij98p6fEo+B0+lqQtz/PjvqDieR41x5+APQcfAndt+OcNwXMMs9oMtUw9rmMTIiZlJi3Ay6GRZAAYuQUKz/Nw19bC29ERo+gAWX4+TDfegLSLLgIjlaJqYGhasUkblxWn0RL8e8nV5QobyO9YV69G87XXgXc6Bz3ua29H87XXxV3iNjPVBal+O75vXSd0KCPi/X5RnqsGv8OzNdlQSIa/KdP93HPoe/MtOLdui1VohAyJkrZEUNpiKcpObkP2uTMisr9gb7ng0iuxCE62bezth9s3+tRtodRYavBF/RfwqjcBYEQ33IEkP+WkcvA8D97nA2exCB1OwsvVBCogGm31AkcysgxVBkyyKfD354b6N5KxG+uNV099PaoOOxw1J54EnuPGfDxfZyf8Fgv8FsuYetfVWwO/j9QagZDwlJkDn4syLrB6YqR/46233IqaY4+D5YMPYxIbAMjMZqRfeikyrvgzgL1FEGLrZxtq+xJHKwh4vx/tDzw49IrMgcfaH3gQvD9+rn84RTVUOW9hh/MToUMZkfV/q1C95Ch0v/CC0KHEVLjtjQx/+APSLryQWl0RwVFPWyIoprsaUiUH6aTIJG2DSxyCS6/SlGkR2W+8M+kU0CqksLt9aOh2hk6O402oGs1tRLZBCa2CPoJIcpHl5KDwzTehKCsFw9J90Ymakl6BLV158HIZQocyoj+U/gEffpuN6t4OTHe2ovGKJ6CePx/pF18kdGgJpdhQjFvn34pCQ+GwPS/3JcvOht9uB3w+eJuaIM8ffpnjkK83mTBp6xZ46uvBKsJfAn1iyYlYkLUA/b4IDpkhJImVmwM36T39GYBy5Gp6xaRJYORycDZrjKLb394hZOIqLggNWDTEz4BF55at8LW1Db8Bz8PX1gbnlq3QLJgfu8BGMDOzHO80Ak5+hLjjgH3dOnibm+HvE1eRQbi/53QOR+IFZUyIsLr3BP6bURqR3amkKmRrstHiaEGdpU40SVuGYVBs1ODnJguqOx1xm7Tdt+8jVdmSZKWcVC50CEljSdEhePpzKfpTVEKHMqrgctqijlrY160Dz3N0wj9Gapka50w+J+ztGZkMhSteh7yoaExJ132xSiWUkyaN7TUMi0xN5riOR4gYBVcg9FlSIFOO3B4h9eyzkPancwOt02LAsWEDwEqgnjMbjDRwaRystBXT7IU+Vx/63H0AgAJ9/FQWhtv+Jp76ji7MrwA2A5DY0GTpQa4hPq9Hs+69B/pjj4G8uFjoUGIq+PkTTxXlhIyEyoCIYHiOQ9undejepQGnzovYfsU6jCxYDVDTFb99bYN3NjmPkZYQk6TH+/3oefU1OLdvFzqUhBX8XGvu64fT4xM4mqF5OS/6+u1o7A302stefBjMd/wNKaefLnBk4qCsqBh3wpYQEhu5qSqoZBJ4XYFVE8Gb+ENh1eqYJWwBoHP542i44AL0vfcegEBP3eqOYHsE8SRtg9dNmZrMuOrVLTUaI7pdLJi1BjB+AwDgh4ZKgaMZHqtSQbd4MRRF8VNZHQvB3/VwKsp5noe7phbe9tj12Cbk9yhpSwTjb6pCb6UMHdv1QEZJxPYr1mFkxRkDw8jieFLpUMN6CElWXU89jfb770fbXXeD93qFDichpWrkSFXLAPixq71H6HCG9GvXrzhk5SIo859CiloG85QypJ17LvRHHSV0aAmpydaEj6s/xvct30f1ODzPo/n6G9D19NPgHOF/b/o4H25YdwP+ve3fcPlcUYyQkOTBsgxKTVpwnkBircnWBK9f+O9F3u+HvKQEkrQ0aA87HADQbnXD4fFDyjIoSI+f5GW0Ba+b4q36UD13DqSZmSMOrJZmZkI9d04MoxqdmskCAPzYvlvgSMi+PH4Pmu3NAMJL2rbe/jfUHHccLO+/F+3QCBkWJW2JcFgG6cdOQ8qifLC61IjtNniyMdLSq2RUHOeVtl7OiyZbE4BAT9syMyVtSXJLPfccyPLzkfrHswHqbztu2uyPoK24Ayt3vSt0KEMKDqUCJ0eZyCaNR8PaxrW47dvb8M7ud8J+Te8bb6Dh8svh2rkz7Nd46+th/ewzdD319Jiq+prtzVhdvxqv7ngVcknsqgEJSXRlJi14nx4yRgk/70ejvXHYbW1r16Lh0svQ9dRTUY2JkUiQ/cD9KPv2G8jMJgB7WyPkp6shk4jnuztY/RxvSVtGIoH5tlsHfhji+5VhYL7tVjASSWwDG4VRmQsAqO6NvyIinufRfPPN6H3zTXD94urN3mBtAMdz0Mq0SFemj7q9sqICjEIBv80Wg+gIGRr1tCWCkeaWwfSvtyO+X9G2RzAFK23tYQ1wibUmWxN8vA88JwPv06NUREvOiDhJU1NR8tmnoR55ZHzS1Hr0eTnU9NUJHcqQTi49Gb9V5eD5qp2YMgmwf/cdlJMmQZoR38PT4lVFWgXmmudiUmr4fWbt67+G4+tv4FiwEMqKirBew+p0MP31r/BbLWBksrCPpZVpcfO8m+HwOsAy4knoEDJRgXkLDJTIhBd1qLXUotgwdC9Nf08vHN9+C87Vj4wrr4x6bPsODq0SYWsEYG+xSzwNIQvSL10KPL4c7Q88OGgomTQzE+bbboV+6VK4du0C53BCPXuWgJHuVagvRF0X0OJsEDqU/bh27ID1o49h++JLGE4+WehwYiqYHyjUF4Z1rZxy+mlIPfusmLZsIeT36EqSJJ0SQwkOzT0UJSmRa7mQCArTNWAYwOryodvhQYY2vnr87dsaIUOrRKqGvvxI8ts3YRuPN1MSwWHmU/Dz2mkwTgkvGSeEpi4WvDcNM3tr0XjffVBMmYzi92gp3XjMy5yH/x7z3zG9JuWM06FesADaww8L+zXS9HSkX3ThGKMD0lXpOG/KeWN+HSFiVz6wwsrnygAUdSOuiNMsWgjznXdAPXNm1OLh3G74LRbITKZBjwcrbcWWtA21Rxgofok3+qVLoVu8GM4tW+Hr7ITUaIR67hwwEglcu3ah/rzzAZ5HweuvQVku/EDYKcYyrOsCrL5moUPZj9RohPH668E5HGCVSqHDiakp6VNw16K7oJKGN+CWVYunRQqJX5S0JYLx/vQVJNmFYNMLI7p02Kg24snFT0Zsf4lCKZMgN1WFxp5+VHfY4y9pO3Bnk3NTP1siPq7KSrTd/XekX3kFdIcfLnQ4CWV6Vj54XwdqOuO3X3ewMitLI4O8sBDKiskCRyQuusWLhQ6BEDKK4ABamy0VUsXIK+Jk2dlIO+ecqMZj//prNP/lGuiOPQa5//pX6PG9SVtNVI8fT/ZtYTZc9XM8YCQSaBbM3+9xeUEBFOVlAMdDZjYLENn+5mWXA5WAl+2Ey+uBUhY/xSoykwkZl18mdBiCyNZm4/RyGhRLEgut6yKCqb98GXYdehyc/3tZ6FCSRnFGsK9t/CU3gnfwOU8GJW2J6Fg++QT9P/2Ezn8tB8/zQoeTUIIXzrVdDnBcfP3ZeTkvrvryajQybwGMF3mnnoiSVf9D1v33CR1awuv39cPhjc53Ge/3w/HDRvjtY+8Bv6FlAyq7K+Hxe6IQGSHJKzdVBaWMhat3Hp46bCXuWnSXoPG4dwUGRO1XaTsw0FdM56rNtmb4eB9UUhVMatPoL4gzrFKJvKeeQv4Lz0NiMAgdDgBgZlYReE4KhvVjW0uN0OGQCXBs2oSGSy9D2z33Ch0KESlK2hJB8C4H/P1+gGcgm7L/HdNI6HX1ot3RHpV9x6vgUq7qjvgbRhaqtPWYQtUWhIiF8aqrkPLHs5H37LPUImGM8tLUUGasB296CRuafhM6nEEabY34pvlrSFM2QiNTINsQWGZIf8cTc8+GezD/9fl4d3f4w+e4/n7Yv/sOjk2bRt3WU1ODhgsvRNXhR4DnuDHFdss3t+DMT87Enr49Y3odIWLHsgxKB4aR2e2pkLIjL/j02+2wf/01bGvWRCUe47KrUbp+PdIuvjj0mN3tQ5vVBWDvgF8xaLAF+q4W6AsStle3RKcDq9q75N3+3XfwWyyCxSOVSCDjAlW/W1t2CRbH7zm3boVj0ybwPp/QocQcz/P4oOoDbGvfBh8X/vvn3R44vv0W9m++iWJ0hAwvMT+VScJjrI0oP6UNpafbIS2I/DLSF355AYe+dSj+b/v/RXzf8ax4oCItHittQz1t3VRpS8SH1WiQddddoenUJHwyCQt1ym7I9L9hY9OvQoczyN7PNSNKTDpK1kZIiiIFwNgGiva98y4aL7kU3U8/M+q2vu4eyLKzA1Ohx9CeyeK2oMfVAwAo0sffsB5C4l35wE37Pe2jT2J3btqExsv/jM7ly6MWj8xsGrScvmagNYJRp4BBFf6AwkR3aO6h+Oasb/CPQ/8hdCgRYfn0UzRe/mc0XnEluP5+weJIkWUDACq74qfStvOJJ9Bw/gXoff11oUOJuW5XN+747g5cuOpC+Hl/2K9TzZoJ89/+htzHl0cvOEJGQElbIozuPWAYQJZfMqYLpnDl6HIAADbP6CeFySSUtO2Mr0pbH+fDKaWnwWedDs5jRBklbYnIeerrqU3CGKTKAp/plV3VAkcy2N62L0Ys9LSj+rjj0fr3vwscVeILTi8P/vmGQ71gPqSZmZDl5426rWbhApR+tQZ5Lzw/priCSWST2gS1jIaTEDJWZeZA0nZd64e49ZtbsbNn57Dbqg44INAjfNr0MVfEj5cY+9kGpShT4nYI2VgpSsvAajSQFxSAkQmXfM/TlMDvMqPPER/nezzPQ5adDYnBAO1h4Q/uTBb93n4cmH0gZptnQyEJf/aLRKtF2p/OhXLKlChGR8jwaBAZEUZXoI8UMqIz3fOIvCOw6dxNYU+GTBalA0u5GnqccPv8UEglAkcUIGWlODb3IjzeXAqdUgqjLr6GpBESS53/9wS6nnkGWffdi5STTxY6nISQoy1Ahx1otNcJHcoge9u+ZGCyqw2emhrIMjOFDSoJBJO2Y6m0VZSVoXTtV2OqdmYVY/suCiaRg/ERQsYmeNO+3rUJu2sqMdc8FxVpFUNuK83IQMmq/0U8Bp7nUX/OuZAXFsJ43XWDVsAE+9mWiKg1QjJSTipH0bvvQJaTE5XioHCdWnwhvt48E76iNMFi2BfDMMi+/37wf/87GKn40kB5+jw8c9Toq3EIiTdUaUsE0f3eV2jbakC/LSUq+1dIFKJL2AKB5VxahRQcDzR0O4UOZ5A97YHqhTKTlpYQE1FjlArA50P/1q1Ch5IwytMC06y7PU0CRzLYvu0RdEuOQt5zzyH98suFDSoJFOoLAQBd/V1hr5hhGCbq3y3Bv+9gfISQsSkfqLS1d83EspnXYLpxesxj8NTUoH/7dlg/+QSsZnBFbVVHsNJWXEnbm9bfhEc2PwKLW7gesJEmz8sLJWx5nof92+9ivsJp7wrI+GpbJ8aE7URxLhfsX3+NnhUrhA6FiBAlbYkgrD82onePBl4vDaSKJIZhQku6quOoRUKjtRHbW+sA8NTPlohe+oUXIu/555F1L02hDdeszMCqDBffDo6PzTLZcOzbHqGkNAfaQw6GZuECgaNKfFq5FhmqDAB7E6Vj4bcP//3nqqxEzYknoe2BB8a8X6q0JWRiclNVUMpYuPpm4KjsP6I8NbwVd5HsSyrPz0f+Sy/BfPttkGgHJ21D7RFEdK7a5+rDqrpVeHXHq5CxydnHt+PRR9F46aXoeuLJmB43OMyuy+5Cr9Md02P/Hu/1wtfVJWgMQnP5XON+ra+rG42X/xntDzwoaJ9kIk6UtCWCSD/7VKQvqYBy0VFRO8brla/jwlUX4n+1kV9aFc+CJwjVcXRX9x9b/oG32/8MWeoPKDNRop6IGyOTQXvwQUKHkVAW5JWC5yUA60Vtb7PQ4QAAel29sHgCVUlSvwl5qeJb3RFNwWrWsbRI8PX2oubEk7DnoIPBuYe+QO7/+Re49+yBp6pqzDEFY6EhZISMD8syoZv3u9tHLy7w1NWh+uhjUL306IhVSTIyGTQLFyD17LMHPe7zc6jrDrZHEE9PWykrxR0L78AVM65I2l7dsuzAQLDfV1ZHm1YhhaHoZWgn3YU11Vtieuzfc2zahD2HHIrGZcsEjUNIp350Kg576zBUdleO+bWynGyo5syB4fjjwY1wY5iQaKDaeCII/Z/vhj7Kx2iwNmBr+1ZMz5iOY4uOjfLR4kc8Vtr6OB/As+DcGSg1i6d6gZDRcC4X+lauROo559BytRFkaNVgfBmArB0bm3aiJG30YVPRtrfKNgXT1DLYVr4FRUUF1LNmCRxZcigyFGFL+5YxDSOTpKTA19cL3u2Gu7ISqpkz99tGt/QoSI0ZYMbYz9bH+dBgawCApBnWQ4gQyk06/NpsweamSsj0v+HQnEMhkwxd4Sk1m+FpagL8fvg6OiAzm6MWV2NvP7x+HiqZBNkG8dyE08q1OHPSmUKHEVVp554L9Zw5UFYM3T85mlRyHg7Wgx/bd+P06cLdsHft2AHwPKSpqYLFICSP34NmezM4noNRbRzz6xmGQeHrr0UhMkJGR1eIJGkFL6rGs7QykQUrbeOpf9K/j3gCk+/8FH4/HxqWRojY8TyP+vPOh+uXX8BzHNIvvFDokOKals2CHe34uX0PzkH0VmmEa+8QMiMWelrR9vdHoSgrRfHHHwsbWJIYT6UtwzDIe/JJyHJzIU0bevCLNDUVuiOPHHM8zfZm+DgflBIlMjU0bI6Q8Sob6Gv7bvtNWNnuxod/+BDFKcVDbsuqVCh45WUoSkogSUmZ8LFta9fCU10N3dKlkOfnD3queqCfbbFRA5al2QvJZt+ELe/1wrVrN1TTpkb9uPP0F+KTnzqhNc2N+rFGknHZZTCcdBJ4r0/QOITSYG0Ax3PQyrRIV6YLHQ4hY0LtEUjMeX/9Fp5v3wFvaYvqcYI952qt4VfpJIOSUNLWHvOG+8Op73HC62ehksmRkyKe6gVCRsIwDFLOPAPSzMz9Lh7J/szKQHVtdV98fKaHhpB5jDCbUqA57FCoFy0SNqgkErzxOpZKWwBQHXDAsAnbiQj+fRfoC8AydPpMyHiVmbQAGLA+E4DRz9PVc+ZEJGELAH1vrUTHo/+E9dNP93su1M9WZMUFG1o2YHvHdji98TXAOFo4lwtNy/6C+j/9Cc5t26J+vFnmKeA9RtR1CdvTFgBkZjPkuTlChyGI4OdMkaFowkNLfT094Ln4ma9Akh9V2pKY63nyEfSs3YPUQ4uR+ez+J02REqzSabI1wev3Drv0KtkUpKvBMIDV5UOX3QOjbmxLQKMhNI3XRNULhOwr5bTTYDjuuJj3WUtERYZCVHcBbf0NQocCYPAQsvTFi5B/1WkCR5RcgjdeG6wN8HN+SFjJhPfpqa+HY9MmqGfOhKKsbEyvDVb8UmsEQiamfKDS1tWfDomuMaYr4nRLFoP3eqFdvHi/58SatH1g4wOos9bhmaOewYHZBwodTtQxEgl4BIpaYtGbNDjULp7a1olR8HMmmB8YD57nUXvaaXDvqETxxx+N+TyCkPGiUgESc7y9B4yEg6IouoM8zGozVFIV/LwfjfbGqB4rnihlEuQODMSpiYMThDd2voGHf74SstQNNISMkN9hWJYStmGaagycHNv8LQJHEhCs2uDcxtBgHRI52ZpsyFk5PJwHLY6x/Z1bP1+N5ptvhnPr1kGP27/+Bm133ImOR/855niCSfpgMpkQMj65qSooZSx8rgwAo1fTcx4Pel57HS233gbeN7Gl3Smnn478F56Hsrx8v+eCA3xLTOL5TvZyXjTZmgAAxYahW1QkG0YmQ+7y5Sh47TVoDz006sfLTZVCnr4WzdL/wjPB39/xarj0MrTcfju8LfFx/iSESNx4ZRgGEr0BAODavTsCURESHkrakpjLnOfEpNPbkHLqKVE9DsMwobtpY11emeiCVQLVcdDXtrK7Ep2eKjASByU2CBmB4/vv0XDJpeD6+4UOJS4tyJ0EAPCzfbC7hf9sOyrvRHj7ZoFxm1CYIvyKhmQjYSXI1wfahoy1Es/+1VewfvQx7Ou/HvS41JgB9aKFUM+fP+Z43H43pIx0QlU6hBCAZRmUmrTg3IFhQKP1rWakUnQ+/jgs778P9549UYmJ5/m9q8JEVGnbbGuGj/dBJVXBpDYJHU7MsCrVoH62/r4++Do7o3Ks/FQd5BlrIDFsxbaW6qgcYyTe5mY4vv0Wlvc/AKNUxvz48SJSN16z7r4L5T9sgOH44yMRFiFhofYIJLZ8HqC3DgwDIGtK1A9XaChEZU+l+IaRZWixbldnXFTa7jush5K2hAyN83jQcvvf4GttRc9LLyHjyiuFDinuTDZnwmeZC79Xj4ZeG6ZkClsNNS/lNPyrNReHoAd18+dDPXcO8l98UdCYks29B90LjUyDXF3umF6nP+F4SLMyofvdEmj9McdAf8wx44rlwUMexD0H3RM3veIJSWTlJh12dAWStrWWWvA8P2yfSYZlkXrOOWBkMkgMhnEdj/f7Yf/mG2gWLgQ7ROKq2+GBpd8LhgGKMsRTaRtMZIm5V7e3vR0Nl1wChpWg4NVXxv07NhyZVAoZZ4KPbcHmll1YmD8povsfjdRkQv5/X4Rr166o9HtPBDzPR6Q9AgDICyf2ekLGg5K2JLZ6awHeD8i1gC7605eDd9PGMn06GQSXdsVD/6RQ30d3BiVtCRkGK5cj8/bb4PhhI1L/9Cehw4lLUgmLXN+F2NNpR6dVAkT/K2REewaqsmb5usF7POB9fmEDSkLTMqaN63XaQw6B9pBDIhwNIGPF0RufkGgrNWvB/Rhoj2D1WNHr7kWacviEkun/XTeh4/X/+COarrgS0sxMlK79ar8EcfXA53mgdcPE+2cniuD1UZFevG1feI8HfosFDCuBr7sn4klbAEiR5aCLb0FlZ+wrbRmZDJpFi6AR8aDUblc3bF4bGDChFTyEJBJK2pKYcqz5FN3r06CdlIG0CU5uDEfwJESMlbYAUNMl7BLiPlcf+tx9AACJ34SCNLWg8RASz3RLlkC3ZInQYcS1YqMGezrsqO6w47Byo2BxNFgbsL11NwA/vEcuRck1p1JbizjHuVxgZDIwEvEkZAiJV+UmHcDLIfGnwi/pRZ2lbsSk7UT5e3shzcqCet7cISt6g+3ESkXUGgHYW1gh5gGL8rw85L/wAiQaDWQ5OVE5RrYmH132zai31UVl/2Rkwd/zHG0OFJKJt7Oyrvocti+/hOHkk6E9+KAJ74+Q0YhzHQQRTP/PP8LRqkR/ryomxwuehAQHxohFsNK2sccJt4DVX6HWCF4DitPTIJXQRw4h4fL39QkdQtwpzlCDkfZhe/sOQeN47pfnsMZ+A+QZa1Fq1kOenw/lpNgueRQDq8eKZ39+Fvf/cP+YX8v7/ej/7Tc4Nm0CAPS+vgK75s1Hx/LlY97XqtpV+OMnf8QLv7ww5tcSQvZXbg4MpvW6wutrCwQqIvt/+XVcN8h0S5ag9Ks1yLzzriGfD65ME1M/W2Cf4Uwi79WtLC8flLD1traC57iI7b8sNTDkrcsd28HYjh9+QPcLL8LTKJ6B3EOJxBCyfTk2/gDrJ5/A8c03EdkfIaOhDAqJKd2pFyLzkqNhOOOsmByvQF8AALC4Leh19cbkmPHAqFVAp5CC44H6bqdgcextjUD9bAkJl99uR8stt6L6+BMocfs7PuUOaMsewg/WJwWNg+M5gJPTZ1uUMWDwf9v/D2/uehN2z9ja/Vj/twp1p52OjocfAQC4du0E73SCVY19xcfOnp34tftXtDpax/xaQsj+Am0IWPjcgRYJ4QwMrvnDyag74wz0//jjuI7JMAwk2qH71YaStiL7PA+uRJzocKZk0v/zz6g95VS0P/BgxHqYz8wsBwA4+baI7C9cfSvfRsc//oG+lW/H9LjxJlJDyIL0Rx+NjGXLoD/+uIjsj5DRUHsEElOKuUdCMffImB1PJVUhS5OFVkcr6qx1SFWmxuzYQmIYBsUmLX5q7ENNpz1U0RBrwQpnGkJGSPhYuRz9v/wCf08P7N98C8OJJwgdUtyYlVWON+ol8Pj4EQfXRNtNs+/C658shMFtQ9orT8EyfSoMJ50kSCzJTCfX4exJZ8OkNoHD2Kqe1PPmgdVoIDWbwfv9yH7wQWRcdhlYvX7McZw56UxMy5iGLE3WmF9LCNkfyzIoNWmxyzlQaRtGGzNFxST4enrg6+4Z07E4hwOsZuThYmKstO1z9aHXHShoCRa5EMDT0Ah/Xx/6f/oJfH8/GPXEW7styq8ANgOQ2NBk6UGuITYDwTQHHQR/Xy90SxaPvnESi9QQsiDNwoXQLFwYkX0REg5K2pKkd8HUCwBAdBdbJRka/NTYF+rTJYTglyQlbQkJHyOXI/uB+wGeh2rmTKHDiSuL8ifB/tI9ACSwu33QKYUZDFXVYQfAYr63B/ZXX4GnqIiStlFy+8Lbx/U6mdmE8k0bB/WwVZSVjWtf2dpsZGuzx/VaQsjQyk06VO4Kvz1C1r33gdWox3yzruHiS8A5Hci67z6oZszY73mX14+m3kDLhRLjyMndZBL8MzerzVDLaOZEkOGE48EqFVAvXAQ2AglbADBrU8D4DeAlFvzQUInTp8emD2rKaaci5bRTY3KseHbnojtR3VeNkpQSoUMhZFwoaUtixt/dgv4Pn4F82jzI58eucuzcyefG7FjxpHjgxDNYPSCEUHsEjxFlZkraEhKuoS4sCZCqViBDq0aX3Y3aLgcOyE0RJI6qgUnj+rwspJ53XlSmTZOJo6FjhMSvUrMW3C9GAAw4ngPHc2CZ4Tv3DdfaYCR+qxX9O3YAXi+kmZlDblPT6QDPAylqGdI08jEfI1FFesl4Mvn9UFhfdzek6ekT2qeayYQDFvzUvidmSVsSkKnJRKZm6H//48V7vXDt3AkAUE2fHtF9E/J71NOWxIx7w2o0PrISDVffJHQoohBc4iVUpa2X86LRNtD43mNEUYZ4qhcIiSRfby8c338vdBhxQ+gbUh9Xf4wndl8BefoaGKZMRubtt8G47GpBYhEDL+dFraUWv3X9Nq7X834/2v/5GBqvvAo9K1aA949tOGensxMv/PICvm76elzHJ4QMrdykA+/TI7v3MXx66qcjJmzHS6LXo/zbb5D79FOQmc1DbrNvawShWu4IIdjCTOxDyEZj+eRTVC05CvavJ/YdYFTmAgCq+moiEdaIeJ6H/ZtvwbndUT+WWPW+8SbqzjgTnU88IXQoRAQoaUtihu+ug8LghSIrtv1VvZwXO7p3YG3D2pgeV2jFA0nbmk57xBrpj0WTrQl+3g+ekyHPkAWFlCqeCBkrT10dao47Hk3L/gJvW2wHWMQrVcovUBc+gZXVTwty/D29e2DxN4KROqjtSwz80PIDTvrgJPztu7+N+bWWzz7Drpmz0PPcc7CvXYv2e+5F1eIlsK5eHfY+KnsqsXzbcjy+7fExH58QMrzAvAUGtV1e+Pzh9azueeUV1J39R9i+/DLs40gMBugOP3zY54NJ21IR9bMFAB/ng0qqQqGhUOhQ4pr96/Xg+/vH9L0xlAJdoKK51dEQibBG5K6sRONll6HqyMXgfb6oHy+e/dz5M57Y/gS+a/4uovtVzZoF1mCARE8rrUj0UXsEEjOaTC+Kj+0EP/+0mB63u78bZ31yFqSMFJv+tAkyVpgeiLFWkK4GywA2lw+ddjdMOmVMj7+3n20GSo1jH/xCCAFk+fmQFxSAc9jht1ggG2Z5p5ik6xhIXE1ocgrzuRKsToIrHeW8TdCBaGIQTCg0WBvg5/yQsOHdALSuXo2WG24EfnfT0tfejuZrrwMeXw790qWj7oeWEBMSHbmpKihlLFxeDg09zlCxwUg8dXXo//FHOLdu228J+3gFV6SVmMS1IuzmeTfjprk3wceJO6k3muz77oN69myknHnmhPYzxViC9d2AxdcSociG5+3ogDQzE8ppU8FIxZ3u2dS2Cc/8/AxOKD4BB+VEri2FcuoUlG/4HgxLNZAk+ui3jMROdxUAgDGObxDIeJnVZuRoc3CA8QBY3JaYHltISpkEuamBBvo1ArRIqEirwAzVJfB0H0L9bAkZJ4ZlkfPvx1H03ntQTpokdDhxYZqxFABg80f/wmcotX11AICcTgk0F54eqGQRYDWDWGRrsiFn5fBwHrQ6WsN6De/3o/2BB/dL2AaeDDzW/sCDYbVKCCZtaQkxIZHFsgxKTVpItJX4f19fjkc3Pzrqawx/+AOyH34IqeeOPq/C/vXXaLr2Oti+GnmlXXXH3vYIYsMwDGQScRSzjBcjlyP17LMHJef89rG3Z1qYOxU++yS4rBXwc9E9Z9AdfjhK136F7AceiOpxEkFFWgVOKzsNB2YfGNH9MixLCVsSM+K+9UJiq2t34L8Z5TE9LMMwWHXaqpgeM16UGDVo6HGiutOOhcUTa6A/VlnaLHh7F8Bn7UEZLSEmZNxkJpPQIcSVBbkVwA7Az/bB5nZAp4hddZSX86LJHujVXeRSADIZZFlZVGkbRRJWgnx9Pqr6qlBrqUWuLnfU1zi3bIVvpHYiPA9fWxucW7ZCs2D+iPuiSltCoqfcpMNOqwc19l+g7xy9il41Y0bYgzqt/1sF2+efQ2o2QXfkEUNuw3E8arrEm7QlY8PzPDoffxzWz/6Hwtdfg9RoDPu1M7MKwbVeArePQ1OvEwXp0T13YRgGEj2tdDw452AcnHNwVI/BcxwlcElU0W8XiQm+34Hq1+xo/DoNnGr0Cy4SGXv72gozjCzUJ4yStoRMGM/zsHz8Mbpf/K/QoQiqwpwJ3he42NnSvCumx97bq1sO24wjUbF1C3KW/yumMYhRMGFaZ60La3tfZ2fEtgsek/o+EhJ5pWYt/P1FmCy5An9bOPa+1SNJO+9PSL/yChhOOGHYbVos/XB5OcglLHJTVRE9fjz7tvlbnPLhKXhs62NCh5JQOKsV1k8+hbehAfb168f0WpZlQkOZozlIlXO5orZvMpi7thZ1Z/8RNSedJHQoJMlR0pbEhOfXDfBYpXB2KMCYCgWLw8+NbWp0oivZZxhZrL2x4z30+ncD8FP1AiER4Ny0GS033YyOf/0L7ppaocMRjIRloOADvX23tu6O6bGDVZec24gykw6MXE6V0DEQbE0Q/PMfTbjVT6NtZ3Fb0OPqGRQDISRyyk068D49bN0HYFJaeC2AvO3tsH72GZzbto+4nXLKFJiuvRaqAw4YdpuqgdYIhRlqSCXiuSyu6q1CVV8VWu3htZwhARKDAfkvvoCs++9Hyumnj/n1JUYtIHHgl9bGKEQX0HTV1ag58SQ4t26N2jESRb+vH7t6dsHli04iW5qRgf6ffoKnqhq+rq6oHIMQgNojkBiRlc1E/gPXwt/VLsjygc1tm3HHd3fArDbj5WNfjvnxhVJsDN7RjW2lrcVtwQOb74K6ENC1PQKNgj5qCJko9fx50B19NJSTJ0OemyN0OIJKleegna9GZVdVTI8brLrkPBnU9iWGxlppq547B9LMTPja24fua8swkJrNUM+dM+J+gsczqU3QyMQ1pIiQWCg36wAEKg/9HA8JO3qrmd4330T3U0/DcPLJUM+eNaHjh4aQiay44ISSE1CSUgKdXCd0KAlHnp8PeX5+6Gfe5wN4Hoxs9N7ANuX/oCt/E6tbj8S1mBvx2Lj+fji3bQPvckGaHtu2ePHot67fcNHnFyFXm4v/nfa/iO9fotMh9//+DUV5OST0502iiDIpJCbYlAxoTr1CsOPr5Xo025th98a+4lRIwaRtU68TLq8fSll4U7cnyu61o1A9C9Vd3Sgz0pcYIZHAMAxylv+L+qcCyNUWoN0GNNnrY3rcOksdAEBjNWDWM/ejc+sByPjLX+jvJMqCVa7BP//RMBIJzLfdiuZrrwMYZnDiduDvynzbrWAkI38nBo9XpKd+toREQ26qCkoZC4+kAU9s/S8OLZiFWaaRE7Hq2bPhmDoV8sLCIZ/neR49/30J6nnzoJw2dcTP5+AydbElbTNUGTgk9xChw0h4nNuNlhtvBKNSIfuhh0YtTCpJzcd2O9Dr6otKPKxKhbJ1a+HYtGnYfx9iUmsdGCQaxfZGuiVLorZvQoLEsw6EiFq+PnBH1OK2oNfVK3A0sWPUKqBTSsHxQH23M2bHzdHmYJ7yFjjrr6J+toRE0L4XnzzPg/d4BIxGOJPSigEAPZ7mmB63pi9wAZDXLoF803ewfPwJJWxjIHjB1dnfCbsnvJuv+qVLkfP4ckjN5kGPS81m5Dy+HPqlS0fdR7AdA/WzJSQ6WJZBqUkLmWEbnt/xL3zV8NWor9EecgiK3n0HGVf8ecjnPTU16HjkEdSfcw44x8jnvtUdNHuBjJ/rt99gW7sOtlWfw7179HZNx5cshW3nPfC0nBe1mCQpKWF9v4lB8MYrtTciiY4qbUlMWJ+4CRJDClTHnA/WmBfz46ukKmRpstDqaEWtpRapytSYxyAEhmFQbNTip8Y+1HTaMSkzdsug9nTYAICWEBMSBZ7GRrT9/R7IcnOQdffdQocTc7OzJmFFPeBCGzieA8vE5h50zUASr1dXAtPt82hacIzo5DpkqDLQ1d+FOmsdpmVMC+t1+qVLoVu8GM4tW+Hr7ITUaIR67pxRK2yDgu0Rgu0ZCCGRV2bSYVddoL90uNX0I+H9fuiWLgUYBhLtyG1NxNgeweax4dUdr6LIUIRjCo+hG48ToJ49G9kPPQSp0QhlRcWo208ypwG8HF12Nyz9XhhUo7dUIOMXi+9wnuNgW7UKzu0/wnTD9WCVyqgdi4gXJW1J1PEch9ZnPwbnYVBUPBNKAZK2QOADu9XRijprHWabZwsSgxBKjBr81NgX1Umlv+f2u0PDHah6gZDI87a2wvHtt2AUChivvjrswUvJYn5eKfgNEjCsF1U9TShPzx/9RRPU6+qFzWsBABhypiP9vCOjfkyyV6G+EF39Xai11IadtAUCrRI0C+aP65hUpUNI9JWZteB2DSRtw+xbDQysNnE6wWoGJ2aV5eXI/ffjo77e4vSiy+4GsLedmBhU91XjqZ+eglltxrFFxwodTsIznHD8oJ95nw+MdOgUi04pg1mvQLvVjZpOO2blR66IyLl5M3peXwHDiSdAt3hxxPabyIKrZaJ645Vh0P7Qw/B1dEC/9Cio582L3rGIaFGJCIk6vqcFaqMLcp0P8gMOFCyOsfbESxbB6oGaGA4jO+3D02E1/Q2sspGStoREgWb+fJhu+SuKPnhfdAlbAEhVq8D6Av2yNzbujMkxQ0PIvCmYZKJe3bEWvOgKXoRFm4/zod5WP+jYhJDIKzfpwHkC32ONtkZ4/d5RX2NbswZ7Dj4EzTfeNO7jVncFiguyDEpRDcyNSSJLpHydnag980xYPv102G305h+gyn8Gn1R9HtFjW1d9DtuqVbCtXRvR/SYqj9+DZnughVY0b7wyDAPDH/6A1HPPhSQlJWrHIeImnm8oIhjW0YC8Q3oBQz6gE64tQbAnXawu+OJFyUD1QHVXbJK2Xs6LJnsjWKkfqYp0pKjlMTkuIWKTfuGFQocgKC2bDRs68EvHHgDR79+Wr8tHheRy/NTUhbm+Kvh68iBNS4v6cUlA8KKrz90Xk+M5vA7Mz5yPFnsLMjWZMTkmIWJUZtaC9+nBc3L4WQ8a7Y0oNhSP+BpJWhr83d1w7dgx6HFPYyMkBgMkev2oxw2uCBNTawRg7w1IWkEQeb0rV8K9oxIdj/4TusWLh1wqr1R1QyqtxY6eHUPsYfxSTj8NrFYLzUHCFUjFkwZrAzieg0amQYYqI6rHMt1wfVT3TwglbUn0dQ00Zs8oEzSMUKXtGJZeJYPiYKVthx08z0e9d1WzrRl+3g+ek6EsPSeqxyKEBHibmwGZDDKTSehQYmaK9iisrymGImNKTI6XrkqHrWsGjC3VOODVv6HqaSUmbd0Sdn9UMjGnl5+OMyadAZVUFZPjGRQGPHPUMzE5FiFilpeqhlImAec2QqJqRq2ldtSkrXLqVBSsWAHllMmDHm9/4EHYv/kGWffdi5STTx5xH8G2YSUiao0A0IDFaMq48kpwdgdSzz5r2N6mBfpC1HUDrY6GiB5bOXkylJMnj76hSIT62eqLqG8zSXjUHoFEX9eewH8FTtoGlwGFu/QqWRSkq8EygM3tQ+dA765oCi0h9mSg3Dx6pQMhZGIsH3+C6uNPQPsDDwodSkwtyj4E3r756OyJzeeMn+NR0+WA3uMEk5sH5aRJlLCNIbVMHbOELSEkdliWQalJC84TqIYLp40ZK5dDPXvWoMQYz3HwdXQAPl9YyavqjoEhZCJr40UDFqOHYVmY/3oz5AUFocd4nh+0zRRjCQDA4muJaWxiE6ooj9HNCZ7n4WlogL+vLybHI+JCSVsSdU1Pr0H1Z0bYm4W9uDWrzVBJVfDzfjTaGwWNJZYUUgny0tQA9p6gRlPwDj7nNlE/W0JiQFFWCt7rhb+7G1x/v9DhxEywOqqmKzZDFl/++R34FbtQk5mHstWfo+D112JyXCIMMd3cJURoZfv0tR3vijiGZVH07jsoWf05FOXlo25f0ym+9ghezotGW+AaqEhPSdtoc+3YgdpTT4OnqSn02PycCgCAl+2Ax+eLyHF6VqxA/2+/7ZcgFrNY925uvuZaVC89GtbPV8fkeERcKGlLos7dYoXHKgOTVjD6xlHEMIxoh5EVZ8QuubFvpS0lbQmJPmVFBQrffBP5r7wMViWeSsTiDA0kqlo0eNbC7nFG9VhezovHf74f6vwXUGAEJCwz7HRoEj3P/PQMLlp1ETa3bY76sS5dfSkOf+twfN/yfdSPRYjYlZm14NyBpG24sye8HR3oevoZdPzzn4Mel+fnj7oc2uPjUN8T+N4Q07lqs60ZPs4HlVQFs8YsdDhJjed5tN13P9yVlej4x6Ohx2dmFYHnpGBYP7a1VE/4ON7WVrTfcy/qzjgT/t7eCe8vWQSv9WPVu1leWgLIZPB1dsbkeERcKGlLoi7/pZeR9/eroDzkBKFDEfEwssAJaSwqbWv6BiptPUZRnQgTIiTV9Gmi69mVk6qGKvc1yDLfxaamnVE9lsPjQIFyDvyubEzKyI3qscjwdvbsxJb2LdjZE92/bwCot9aj29UNg8IQ9WMRInbl46i05ewOdC5fjp5XXgXv8YypyrChxwE/x0OrkMKkU4wn5IQU/LMt0BeAZSgNEE0MwyDnX/+C4Q8nIeu+e0OPSyUSyLjA/IEtLbsnfByuvx+6o5ZAc+CBNBx1AM/zqLXGtndz+kUXYdLmTTAuuzomxyPiQmUiJOpkk+dBNnme0GEA2LsUSLTDyGJQaVs9kLRVM1kwasVzIkxIPOB9PvS8+hq0hx0GRXFyL32UsAzUXAVsdhsae6NbaZuiTEEZrkHV7j04f+staN4yHdmPPEzVtjF2xqQzcGT+kZhlmhX1Y31yyieot9ajNKU06sciROzKzHt72lrcFvS6epGqTB3xNfLCAhhOPhmKiklwbNqMtjvvhP4PJ8F07bWjHq8q2M/WqBHVDc9YVx+KncxsQvbDD+/3eIosB118C3Z0Vk34GIriYuT+3/9Ra4R9WNwWOL1OMGBQoI/NSl+Jnua4kOhJyltsTz75JAoLC6FUKrFgwQJs2rRp2G2fe+45HHLIIUhNTUVqaiqWLFky4vYksS0tXIrlhy/H5QdcLnQoMRXs/RiclBstfa4+2Lx9gWOm0LROQmKt/eFH0PHww2i75x5RnMDPU1+D/saL4XVmR/1YVZ12FFnboG1rhHPbNkrYCuDA7ANxYsmJyNVFv9pZK9diasZUKCR085GQaMtLVUMpVYLzpgAIb0Ucw7LIuv8+KCdPQe/rr8Hb0gJPY9OorwP2ng+LqZ8tgFD1IQ0hE4Z19Wo0XXsdcpWB77B6W13E9k3XXHulKFOw+dzN+PiUj+k7nCSFpEvavvXWW7j++utx1113Ydu2bZgxYwaOPvpodHR0DLn9unXr8Mc//hFr167Fhg0bkJeXh6VLl6K5uTnGkScnx1vL0fv3C+D+5j2hQwEAlKSUYHHBYuTp8oQOJaaClbZNvf1wef1RO06on63XgEmm9KgdhxAytLTzz4M0MxP6448TOpSYiNUNKYfHgeoOG2r1WWAfXg7zX2+O6vEIIURMWJZBqUkLv7MAxdrpYb3Guno1qhYvQcMFF8C+dh0AwLlhA6yrRx8EVN0xkLQVWRsvqrQVjq+3Fy233Arb55/j6N8sAIAud3g3GYbjbWmB32aLRHhJRyaRxazKNsj+7XdovPIqdP7nPzE9Lkl+SZe0feyxx3DZZZfhoosuwpQpU/D0009DrVbjxRdfHHL7119/HVdddRVmzpyJiooKPP/88+A4DmvWrIlx5MnJ8tGHaHtjE6wfvyN0KKKWoZVDr5SC54H67ugtIw5WRnBu6mdLiBDkeXko/WI1Us84QxRVF8EbUns6u6N6nIs/vxwo+Bt8qY0oOv4o6I89NqrHI0PjeR7fN3+P1ytfh9Mbve+y9/e8j/t+uC8mA88IIQFlJh1cLX/EUal/x2zz7BG3ta5ejeZrr4OvrW3Q4/6eHjRfe92oidu9lbaaiQWdYILn6VRpG3vS1FTk/PNRpJx9FjSnnwkAcPKtE9pnx6P/xO4DD0LvypWRCJFMkL+nG/a1a2Ffv17oUEiSSaqkrcfjwdatW7FkyZLQYyzLYsmSJdiwYUNY+3A6nfB6vUijRt4RoTS4oMlyQTUtvLvmsfB98/d4/pfnQ3ebxYBhmFByI5oVaaFKWxpCRohgGJks9P+TvUVCis4JTdl92CW/ERzPRe049dY6MBI3MrUZkEuT6tQpoTAMg9u+vQ0PbXooqgNF1zetx1u73sKunl1ROwYhZLAy88BNuI6Rz1N5vx/tDzwIDPX9NvBY+wMPgvcPvbKM53lUdwZ72ornXNXr92KmaSaKDEUxr0AkAbojjkDW3XdjUeGUwAMSG1qsPePaF8/z8DY3A14vFKVlEYwy8T26+VHc/u3tqOyujOlx1QsWwnTLX5H5tztielyS/JLqyqOrqwt+vx9ms3nQ42azGW2/uxM7nL/+9a/Izs4elPj9PbfbDavVOuh/ZAg8j7T8FuQf1gPt0hOFjibkpd9ewuPbHsf2ju1ChxJTxQPVBDVRTNrWDAwh4zxGlJl1UTsOIWR0/b/8irozz0L/zz8LHUrUzMzJBSNxAqwHe7ontsxwOL2uXjh8VjA8j9PqG+Dcvn3YZACJvmCFWI2lJmrHoGo0QmKv3BQ4b9zdbofH7xl2O+eWrftV2A7C8/C1tcG5ZeuQT3fY3LC7fZCwDArSxVNpK5PI8O8j/42PTv4Iapla6HBELVOXCvh0OPU7Djuff3pc+2AYBoVvvYni/30G1YwDIhxhYvuq8St8VP0RbJ7Yto6QmU1Iv/BCqKZPi+lxSfJLqqTtRD300EN488038f7770OpVA673YMPPgiDwRD6X16euPqjhs3ZDbj6ADBAeonQ0YQclHMQji8+HlnaLKFDiamSUKWtI2rHOLf0WjgbLoLMPQ3ZhuH/DRFCoq/3tdfg+uUXdPzzMaFDiZoUlQqsLzBxfGPTzqgcI7iCIL1Lj6X/exUN518wdIUXiYlCQyGAvX8vkebjfGiwNQw6FiEk+srMWkDiQKPmdixcsRBezjvkdr7OzrD2N9x2wX62BWlqWjlBBHNkzQE4+2sOmc+9BveePePej6KoCIxEEsHIEt/1c67HspnLUJZKFcgkOSTV6OOMjAxIJBK0t7cPery9vR2ZmZkjvvbRRx/FQw89hC+//BIHHDDy3apbb70V119/fehnq9VKidsh8K07AB5gUvIAmUrocEIumHqB0CEIoiQGlbZ9NjX8jkkozTWIop8mIfHMdPNNYORyGK/5i9ChRJVOkg0rOvBLexWApRHff7CVjsyZCvuMUpj1SjDSpDp9SihF+kD1a7TaI7TYW+DjfFBIFMjSiOvmLiFCyk1VQ8kGErdezosmW9OQ1e5SozGs/Q23XbBNWLGIWiMAQL+vH0qJks7P44R2zlX4aJsXxbMmY0rZ2JKLwdZX9Hc5tCUFS7CkYPhV09HEuVzo374dvq5uGE48QZAYSPJJqtuLcrkcc+bMGTRELDhUbNGiRcO+7pFHHsG9996LVatWYe7cuaMeR6FQQK/XD/of2V/fBx9g1zuZaN0srpOieLVvpW20+lxWDVQvlIrsRJiQeCRNT0fWvfeEfYGbqMyqfADRWy4fTA7W63Kheuz/kP/cs1E5DglPtCttg/st0BeAZZLqNJmQuCZhGZQYdXDW/Rl/n/HOsO1J1HPnQJqZCQyXsGIYSDMzoZ47Z8inQ/1sTeJpjQAAt3x9Cxa9sQj/q/2f0KEQACUmHZ6acSrWTD5izK9179yJ6iVHJfVKqkTl3rMHDRddjLb77gPPRW/WAhGXpDsbvf766/Hcc8/h5ZdfRmVlJa688ko4HA5cdNFFAIDzzz8ft956a2j7hx9+GHfccQdefPFFFBYWoq2tDW1tbbDbo1eNKBae2mrwfhaMJlXoUPbj43xosDbAx/mEDiVm8tPVYBnA7vah0+aO+P5/6/4Nn7e8AolmF0rNlLQlJN54mpqFDiEqigaSeO39jVHZ/+6eagAA7zGKamhNvApW2jZYG+DnIt9bOJikL9QXRnzfhJCRlZt14Nw5aOkZ/hKVkUhgvm3gWu73iduBn8233TrskvFgpa3YPs/rrfVweB0wyA1Ch0IA5KfLwSqbscMSGJbOezxovfOusFol2L76Ct7mZrirq6MdZsL5ufNnrG9cjw5nhyDHV1ZUQF5cDO3BB4NzRK8lIRGXpFvfd9ZZZ6GzsxN33nkn2traMHPmTKxatSo0nKyhoQEsu/dE4KmnnoLH48Hpp58+aD933XUX7r777liGnnRMj72F1J2bwcjjq7cpz/M4fOXhsLgt+Ojkj0QzaEQhlSA/TY26bieqOx0w6SP797K5dTPq/B9AZjgApcZzIrpvQsj48TyPjn88ip6XX0be009De8jBQocUUdNNpVjdAdj8rVHZf/XAgMV0WTZUcuobJ7RsbTZkrAxuvxutjlbk6nIjuv9Q0pb62RISc2UDN/33dIxcPKNfuhR4fDnaH3hw0FAyqdkM8223Bp4fRnBVmNiStitPXIlGWyO1fYkTBl0/NEX/hx5OAo/vCvQufxx9K1fC8d13KPnfZ2Dk8mFfm37xxVBWVEBioAT877216y18VP0Rls1chj/P+HPMj8/IZCj57NOYH5ckt6RL2gLAsmXLsGzZsiGfW7du3aCf6+rqoh+QSDEKJeQzDhE6jP0wDINsTTYsbgtqLbWiSdoCgf5dgaStHYtK0iO675KUUvgt8+Fz5KPMrIvovgkh48cwDOD3AX4/HBs2JF3Sdn5uBfArwEl6YXPboVNE7kLcy3nR0d8ChYfH0/99BrXfr0XBq6+AVcVPn3axkbASFOgLUNVXhTprXcSTtsH2CGI6NyAkXpSZdGBk3dhk+QL/3PIVbph7w7Db6pcuhW7xYji3bIWvsxNSoxHquXNGHMpkd/vQanEB2DvrQSzkEjlKUuJnMLTYzcgsBO9Ngd+bgl2dHZhy+WVwbt0C41+uGTFhCwCsSgXd4sUxijSxBL/D6cYrSSZJ1x6BkHBEuydevNo7jCzyyzUKVLPhbDkVrH0+8lIpoUFIPMn4yzXIe/55mG++SehQIq7CmAneH/hs29i0O6L7brI1gYMfuR1SyL0eeNvaKGEbB4KtC6IxjCy4z2AbBkJI7JSbtWBYN+zKNfig6oNRt2ckEmgWzIfhhOOhWTB/xIQtANQOnP9maOVIUY+cGCMkmuRSKXLt96O//gp0WaSQpKSg8M03oT34IKFDS1g8z+/9Do+DG69+m03oEEiSoKQtiQrvjo1ov2wJLI/F59Ty4MVYcCq4WBSHhpFFvmdzVadt4BgaSCX00UJIPJFoNUl7IcCyDBR8JgBgW8uuiO47+B2xJ92EXf/4L3L/RUM/4kHwYizS3+FWjxU9rh4AVKVDiBByU9WQ8yYAQJ+7D72u3ojuX6z9bP9X+z/c8d0dWNuwVuhQyD6KB4ppgr+XzD49mn09PWi7515wLteg13Q8/jh633wTfosldoEmiB5XD2weGxgwyNflCxYH7/Gg5qQ/YPf8BfB1dQkWB0keSdkegQjPtWkter5phnJXOwzXCx3N/oIXfNGo0olnxRkDlbZdkU3aunwubGj8FWC8KDWJ60SYkETDOZ3o++BDyEuK4e/sCmtJabxLk+Wgja/Grp7IDuWotQa+I/weE/IOmAR1QfwN1hSjaK2WCSaBTSoTNDJxLZ0mJB5IWAYlGWmo86aAlfWhzlqHVGXkPndDSVuRnatubN2ID6o+QKYmE0fkHyF0OGRA8OZBVcfgikye59F4xZVw/fwzuP5+ZD/4AHi/H/ZvvkH3s88Bfj9Uc+dST9vfCV7XZ2uzoZQKN1OHkcsBngd4Hq4dO6A99FDBYiHJgZK2JCqkUitSy+yQFk8TOpQhibY9wsBJalNvP1xeP5SyyCRpdvbsxJst10FTkoJS0wsR2SchJPJ4nw/VxxwLX8fgqbrSzMxRh7fEs1xdAdqsQKOtIaL73dNTAwDgPEa6IRVHgqtlIn3jlYaQESK8crMONV1GsLI+1FpqMcs0K2L7FmulLbV9iVOqPdCU/ANrLUYA74QeZhgG5ptuROuddyH9sstgXb16v6F7jZdeltDnbdEQT/1ssx95GFKjEdKMDKFDIUmA1jCTqFBp+5A5x4qMU+Pzbm6BvgBAdJZexbN0jRx6pRQ8D9R1R66vbfBLkvNkoMxEQ8gIiVe2r77aL2ELAL72djRfex2sq1cLENXEVaQFhqv0eJsiut/jci6Fs/YyXLLRDqz+DJzbHdH9k/EpNBQiTZmGPF0evH5vxPabq8vFaWWn4bDcwyK2T0LI2JSZteA8RgCRb4FS3RE49xXbELJ4SmaRvYrS0sHKe+BEy37PqefNQ/HHH8FdtQfN1143KGELJP55WzQEPy/i4eaEcvJkStiSiKGkLYmOroFhMOmlwsYxDJVUhSxNFgBxVdsyDBOqtg2euEZCbV/gDj7npmo0QuIV7/ej/YEHh3mSBwC0P/AgeL8/hlFFxmEF8+FsuBj+1gsjut+2XglSe9Jx0i/fouX2vwH79JsjwtHJdVh/1nq8fOzLkElkEdvvHPMc3H3g3Th/6vkR2ychZGzKTLpQ0jbYoiYSfH4OtV3BpK14zlUtbsveXt0DQxxJfDgwvyLwfyQ2tFh79t+AYQLnbQPnaIMk+HlbNAQ/L+JhCBkhkURJWxJxPMfB21AV+C7JKBc6nGEFT1xEN4wsI3CiWhPBYWS7BpYQw2tEYYY6YvslhESOc8vW/So1BuF5+Nra4NyyNXZBRcj0rCz4HeXotWrQ6/BEbL9VA5+Tuw88BoYTTgArp2njhBASTeVmLTj3QNI2gi1Qmnr74fFzUEhZ5KSoIrbfeBf8MzSrzVDL6Bw9nmTqUgG/HgCwoXHnfs8n83lbNASv6ePl5oT1s8/Qcsut6P/lV6FDIQmOkrYk4vwt1ahaqcLu9zLB64Wb3DgasQ4jKzEFh5FFrtK2qi+QtDUqcqGQJu4wI0KSma+zM6LbxRONQopsQ2DoRKQGLVb3VeOr9hfRZ26A9bJrkf3QMFXKRFAcz0VkP37Oj109u+DyuUbfmBASNbmpasg4EwCgydYELxeZFijBfrbFRi1YVjyrJqg1QnzTMIGVnz+27t7vuWQ+b4s0j9+DJnugRVa8/K5bP18NywcfwLFhg9ChkARHSVsScd5d2wGGh0TJglHrhQ5nWMEP9EguvUoEwUrb6ghV2vo4Hzr7A72YSlKKI7JPQkjkSY3GiG4Xb0ymFsgzVuPzmvUR2d/PnT+jjVkFWcpm0U0aTwRf1n+JY949Bjd/fXNE9tdsb8bpH5+OQ986NGKJYELI2ElYBsVpOeA5Ofy8H422xojsd+8QMnH1sw0NWIyT6kMyWIYiFwBQPVAAs69kP2+LpEZbIzieg0amgVEVH38e+uOPQ/qf/wzNooVCh0ISnFToAEjyUS0+ExXbjoevcY/QoYxIrO0RgierNZ0O8DwPZoI9GpvtzeDgA8/JMCUzLxIhEkKiQD13DqSZmfC1tw/dH41hIDWboZ47J/bBRYBEUwmF9Cts6ZQBOGXC+8vRFMDbcyD0fVqUiqj/YaKQS+RotjdHbLlvt6sberkemZpMsAzVNBAipElmPep6jZComlFnqUOxYeJFAcFZDmKbvRAazkR9PuNSgb4Q9d1Ai6Nhv+eS/bwtkvZtjTDRa9tI0S9dCv3SpUKHQZIAnZWSqGBUGsjKZwodxoiCJy9N9sgtvUoE+elqSFgGdrcPHbaJT0IP3sHnPBkoz4zfympCxI6RSGC+7daBH353Qjvwc+j5BDQtfTY8vfOB/sgMwNSiFP6WY/HiylXoXXIYfL29EdkviYyZppl46ZiX8NxRz0Vkf7NMs/Dt2d/i1WNfjcj+CCHjV2bWgvMEJq9HamDw3kpbkSVtB/78ivSUtI1HU42BcxaLr2W/58I9b2Mk1JrO7XcjU5MZkRs8hMQbStoS0TKrzXjrhLfw3dnfQcZGbvp0vFNIJchLDQxgiESLhOCdTc5tRJlJN+H9EUKiR790KXIeXw6p2TzocanZjJzHl8Pf04OGCy8C198vUITjd1Th4XC3nYq+roqI7K+q044sRzfAMGAYBpKUlIjsl0SGXq7HHPMcpKvSI7ZPhmFoUA8hcaDMpAPniewwMjEmbX2cDw22QAUnVdrGp3k5gXMWL9sBj8+33/OjnbdRJWfAccXH4YvTv8B9B98ndCiD8H4/XDt2wL0nvlcgk/hG7RFIxLVdcDhYlQJpNz0MaclMocMZFsMwmJI+RegwBFFi1KKu24nqTgcOLMmY0L52dlcDADiPEcUi6xNGSCLSL10K3eLFganEnZ2QGo1Qz50Dv8WC6qOPAWezwfLhh0g9+2yhQx2T4OdPQ7cTXj8HmWT896V9nA8bmrahIcWAFXe9jLsXZsTNcjtCCEl25WYt/M4C+K2zMcc0d8L763F40Ov0gmGAogzxnKs225vh43xQSpQwa8yjv4DE3IzMQvCcFAzrw/aWGizIL99vm+HO26jCdn/x1t6o68kn0fWfp2A4+WQaaEvGjZK2JKL4fgd6N7UBPIPUG+kCN14VGzVYsxOoiUCl7e6eQNI2RZYDtZw+UghJBIxEAs2C+YMek6alIe+p/8Cx4QeknHWWQJGNX6ZeCbXCDxfa8VtbK2bm5Ix7X422RnzSdSu0ZXIUZa2EvKAggpGSSPmh9Qd81/wd5pjn4PC8wye0rz999iekKFJwx8I7KLlBiMByU9WQeibB2VyGGamHT3h/VR2B892cFBVUcvEkuoKr4Qr0BXGXzCIBcqkUMs4EH9uCzS27hkzaAkOft5H4p5o5E6xGA0ZG18hk/Oi3h0QU31UD80wrPE4VpMXThRlgC4gAAQAASURBVA5nVD93/owPqz5Evj4fF0y9QOhwYia4NKym0zHhfTUPNM6nqbSEJD713LlQz91b1cQPDL5IhCpTlmWgzn8REmk1Pq/RYmbOH8e9r1DbF08GyszU9iVebWzdiJd+ewn9vv4JJW2tHit+6vwJAPCw/OEIRUcIGS8Jy6DUpMVvLVbsbrdNuDpWjK0RAMCsMePcyeciQzWxVXUkulJk2ejiW7Cjs0roUBJSd383zvj4DBSnFOPZo56NqxsUmgMPRPmmjVQVTSYkfn6jSVJg7fVIm+RA5nE5YNj4//Vqtjdj5e6VWNOwRuhQYqp44KR1oj1tbR4b+v02AMAUY8mE4yKExA+e49B+733ofuYZoUMJW5o8UF27q6dmQvup7qsBeB5/+dSOvA9fg99qjUR4JMKCPRon2vMymKQ3qUzQyMSzdJqQeFZu1gHwY0vzLljclgntq7pDnEnbirQK3DL/Flw6/VKhQyEjyNYEVvPU2+qEDSRB1Vpq0dnfiSZbU1wlbAGAkUopYUsmjCptSWR1DzTZzhh6aUe8mZY+DZdOvxQVaZEZXJMoSgZ6Pzb39cPl9UMpG9+XiU6uwzTvf7ChYQ8mT6G7+IQkE8e336J3xQqAYaBbvBiKsjKhQxpVrrYArVagcYIXPr92VsHgAA7f3QNP1Qtg/nJlZAIkERVc4RFMuo5XMOlbaCicWECEkIgpNWmhynsZb7buxpSGe3BK2Snj3lewSKHUJK6kLUkMh2QvwcavVJAbxTlrZaKmZkzFiuNWwO6deNu/aOJ5PiFWrpH4Q0lbElGenT+DdbGQpJUiET6S8vR5uHb2tUKHEXNpGjkMKhks/V7UdjkwOUs/7n3VdLjAe4woNdESYkKSifbQQ5GxbBnkhYUJkbAFgIr0Emy2Ar3e5gntp7q3Bj4J8Pahi3DFpGlgVaoIRUgiKZhk7ejvgMPrGHeVbJ21DgBNVycknpSbdeB+TAfDy2D1TGy1Q/VAO7ASkQ3M/bXrV+Tr86GXj/88n0Tfotxp8NksaIJC6FASkkqqwnRj/LZldFVWov3+BwCpFAUv/VfocEgCiq/6cZLwWlZsw54PMmGr8ggdChkBwzChSesT6WtrdXnRZnUBoOoFQpKRcdnVMJxwvNBhhG1WVmCVhxvt4Hhu3Ptp62+EQ8Wg4Yg/wHTD9ZEKj0SYXq5HujIdwMSqbUOVttSbnZC4UW7Wwt1xLNxV9+JPk88f935cXj8ae50AgBIRnata3Bb88dM/4qA3DoLT6xQ6HDKC4DVZp80Nq8srcDQk0litFs4tW+DcuhWcyyV0OCQBUdKWRBTv4wDwkE+eJXQoYevq78IPrT+gpm9iPRATTUkE+to+tOExKLPeRkZaBwwqWaRCI4TEIc7hQOPVy+Dcvl3oUIa1MK8MPM8CrAe7u5rGtY9eVy9cXKBX9zQT9eqOd8Fq21rr+PvaBhO+1B6BkPiRm6qGQqKE2wc09Iw/6VjX7QDPAwaVDOkaeQQjjG9d/V0wqUwwq81Qy9RCh0NGoFPKkJ5RC3n6Omyo3y10OAnnPz/+B2/sfGPCva+jRZabi6yHHkTxhx+AUVA1NRk7StqSiCr6+hdM2rwBigVHCx1K2J79+VlctvoyfFD9gdChxNTeStvxJ22/b1sPWcpWZKXxkQqLEBKnOp94EvY1a9B8ww3gPfG5mkKnVELiD/TX/qFp57j2EVwqX9KgwVQN3YyKd8Hq2PEOI/NxPjTYGgBQewRC4omEZUKruPa028a9n+qOva0RxNRPsiSlBGvOXINPT/1U6FBIGKTpa6EwrcL3TVuEDiWhePwePPPzM3hg4wNw+91ChzMkhmGQcvLJUBQXi+oziEQOJW1JxLG6VDCyxLmTPdELvkS1t9J2/O0RpijPhLvzKExJnxSpsAghccr4l2XQHn44ch9/HIw8fj/jtWw2AODXjj3jen1NXy0kfh73vGFB7oUnw9s8sf64JLqCidbxtkdosbfAy3mhkCiQpcmKYGSEkIkqN+ugzFqJ+3+6BPXW+nHto6ojUJwQPO8VG4WEKvsSQYFqNryWmbA7xNV3eaIabY3geA5qqRpGlVHocAiJCkraEtGb6AVfoirZp9KW58dXKeuyTIWnazGmZeVGMjRCSBxi1WrkPf0UVNPjd9gDAGSq8gAANX1143r9r51VSHEA3VolWJ0O0uzsCEZHIi30HT5QIT1Wwdfl6/PBMnRaTEg8KTVpwSrb0ONtGHcbs2AbMDH1syWJ5+jcc+BqORtOK634GItg0VWRoSiuq1h5jwfWL75Ax2P/As+Nf+YCESc6OyUR03PXhWg+eQ7srzwgdChjErzga7I1wcuJp/l7fpoGEpaBw+NHh218y0mC1QulIq1eIETMPE3NaLru/8FvG/+y1WgoTgl8pre7Gsf1+p3d1ejWM/jbhaeg7Juv4/oigABF+sDfd721flzD50IXfHq6UCYk3pSbdeDcgeq58fatDiVtRXaueunqS3Hp55eKbmZHoorEgGgxCt54jfue9AyDlptuRvezz8JTK67VvWTiKGlLIsaxvRLWnU54mhJrKalJbYJKqoKP96HJNr7BNYlILmWRnxYYTFDdMfa+tltbf0aLZysYqQVlZnGdCBMidjzPo/maa2BbtQpt99wrdDiDHGAqAwDY/S3jen2zPbAEt0BXBFapjFhcJDqytdmQsTK4/W60OlrH/Ppg0jbuL/gIEaFysxacZyBpO47VExzHh5JgpSKqtPVxPmxt34qNbRuhlNL3WCIIFMD4UWuth8fnEzqchBH6Dh9odxivGJkMhhNPRMrZZ4GRSoUOhyQYStqSiEmb7IJppgWaAw8WOpQxYRk29EEv1hYJ1V1jv6v78q9vQpX3MnTGLaKaxksICQxVyLr/PqhmzYLpxhuEDmeQBXkVAABO0guLa2w3pPycHzZvNwBgsrE04rGRyJOwEuTr8gGM7zs8VKUT5xd8hIhRbqoaEp8JALCrp3rMr2+1utDv9UMmYZCXqop0eHGrxd4CH+eDUqJEpiZT6HBIGDINCmjL74Wi8BFsb6FKzHAFv8MTYZBo1r33IOvuuyEvKBA6FJJgKGlLIsPngUbdiPQKBxSzDxM6mjELJW3H2RMvURUHh5GNo9K2ZuDOZqY6n5YPEyJCysmTUbDidcjMZqFDGaQswwzerwHPs9jaPLaLfAkrQZHzX7jthRwsffs1eDs6ohQliaRD8w7FcUXHQSfXjfm1F0+7GMtmLsNM48zIB0YImRAJyyBXG0hw1Nvqxvz64PltYboGUol4LnuD1YcF+gLq1Z0gZBIJZEgDAGxp2SlwNImB5/mEqbQlZCKoNptERm8dwPsBuRbQJd705eDdueAHv1gUZwz0TxpHpW17fwMAoDQl/u9sEkKiY98bNs6tW2H/9lsYr7lG0Bs5DMMgz3kbKpt4eOaOLaHM8zw6Gjsws6Me6KgHq34kSlGSSLp+zvXjfu2huYfi0NxDIxgNISSSKtKL0eYBnD4rel29SFWmhv1asfazTZg+n2SQFGk2uvgW/NZZJXQoCaHH1QObxwYGDAr0iVO96m1thcRgAKtWCx0KSRB0641EhLdyI5xdMvg1xUACVl0GT2rEVmkbnKQ71kpbi9sCF2cFAEw3l0U8LkJIYvF2dKDh0svQ/dTTsHzwodDhYFJGHgAparrG9tnWaXejw8vi7kUXI/3W2yDRaqITICGEkLBUZGaA86YAGPt5enBgbolJXJ/loQGLCbBknOyVpQ4kHhts9QJHkhiCnwfZ2uyE6d3ccPElqDriSNi/+07oUEgCoaQtiQjL51+i/ksj2r4XOpLxEXulbYulH/0ef9ivC35Jcl49pmQaoxEaISSByEwmmK67FtrDDoP+mKOFDmef1i9jW0Xw+Jb/gM1/G00z9DBdcF40QiNR4uN8aLQ1juk1Vb1VWN+4Hm2OtihFRQiZqHKzDpw7cK451r7VYq20pSXjiaksLXA92uke23eZWCXi77ksNxeQSOBNsMHtRFiUtCURwbAspBpAXpAndCjjEhxi0ufuQ5+rT9hgYihNI0eKWgaeB2rH0CJhT08NAIDzmFAmomm8hJDhpV1wAXKf+g9YlfDDXgx6C5RZK7HB+sSYXre14wfI9D/DlOqNUmQkGpxeJ+a/Ph/HvXcc7J7wq6s/q/0My75ahud+fi6K0RFCJqLMpAXnyQCwd55CuKo7A+e2YkvaUnuExDTDXA4AcPKtAkeSGII3cRKpotx4zV8wadNGpF90odChkARCSVsSEen3/hdlWyuR8dBrQocyLmqZOjRdVUwtEhiG2aevbfgXur+07wEASHxGZBkSYzkKIST6GHbvaUXfe+/D/q0wy79yU5WQpWyDTbIVfi78VQSFklMwa+NMHGLXgvN4ohghiSS1TA29XA+FRIFWR/gXuwaFAZNSJ2FS2qQoRkcImYi8NDVYX6A/eWVX+L0+Lf1edNrcAIBio3jaI1jcFvS4egAARfrESWYR4MD8isD/kdjQYu0RNpgEELo5kUCVttKMDLAa8XwekcigQWQkova9YE80RfoiuHwu9Ln7hA4lpkqMWmxr6ENNZ/iVtrt6AhPZMxS5gg4cIoTEJ9vatWi97TYwKhWK338P8sLCmB5/QV4p3J1HgXOb0GV3wawP7wTZ1p2Pm9b9CMVXW+A7/vCYx03G752T3kGaMm1Mk9IvmHoBLph6QRSjIoRMlIRlkK3ORweAmjG0R6gZaI2QqVdCp5RFJ7g4FExkmdQmqGU06CiRZOpSAb8ekFixoXEnTpt6oNAhxbXy1HJYPVaUp5ULHQohUUVJW0IG/PvIfydME/NICvV+7Ay/0rbZ0QAAKNAXRyUmQkhi0x50EDQHHQRFeTlkBbGf6KtVKGHyn4AmWz/qusJP2rY0d+HXjGLMljggy0vMdj9ilaHKEDoEQkiUlKUVocMDdLta4OW8kLGjJ2FDrRHEOoSMqmwTkprJhBNW/Ny2h5K2o7hm9jVChzAutrVrYXnvPWgOPBCpf/yj0OGQBJC4ZZEkbvR//hpqD6lA+6WLhQ5lQsSYsAWAkoElY+FW2vo4Hyy+wPLTqcaSqMVFCElcjFyOvKf+A9PNNwlWjR/sYVgTZr/uLS2/olGxA3ccdhryV60CI5FEMzwiMB/nG1PrDEKIcKaZC+DuPBIV0kvB83xYrxHrELJgn0/qZ5uYjIrADeM9vTUCR0KixVNfD9sXX8K+/muhQyEJgpK2ZMJcv2yDq5OBu8UidChkHIKVtjWd9rBOhJvtzeDhB8/JMDOrMMrREUISFSOXhxK2PMeh69nn4Ovujtnxs9O9kGh34Lum78Pa/u2dH0GVuwKpmT9Aq6CFSImm3lqPG9ffiBvW3RDW9t+3fI8FKxbg+nXXRzkyQshETcrUw9O1FLauGZBL5GG9prpDpEnbgfYIiTSciexVMNCftWVgVSMZmtPrhNvvFjqMcdEeeiiMN1yPjKuuFDoUkiAoaUsmTFvAIOegHqQtnS50KBPi8Dpw9ZqrceL7J8LLiWdyeH6aGhKWgcPjR7t19C+/mr7AsivOk4Fysz7a4RFCkkDnv5aj87HH0HjZ5eB9vpgc06/cAXXeK9hufT+s7av6agGeh0lFbRESEQsWn9d9jvVN68Hx3Kjb11pq4fa7wYD6shMS78pMe1t5+bnwKm2rRFpp+6fJf8KNc2/E/Mz5QodCxmFKRmAVo8XXInAk8W3FzhWY//p8PLzpYaFDGTNFcTEyLrsMqgMOEDoUkiAoaUsmTOZthj7PBe2Bid13RyVVYXPbZtRZ69BkaxI6nJiRS1kUpAUGFYTT17bZYgHn1QNeE/LSaMABIWR0hlNPgdRoROp554GRxqaKdZqpFABg51rD2r7V2YB/Pu/HX1d8CU9dXRQjI9GQrc2GjJXB7Xej1TH633lo6jQtISYk7uWlqaGQu+CT78Inu0dfUuz1c2jodgIQX0/buZlzccHUC1CWWiZ0KGQc5udOBgB42Q54YnSTOxE125vB8RwMCoPQoRASdbT+j0xc1+7AfzMS++SAZVjce9C9SFOmIVOTKXQ4MVVs1KCmy4GaTjsOKh15mEumZCEcVbdhUqYGEpYqlAgho1MUFaHk81Vg1bG70bModzLwM8BJetHnsiNFOXy1lZfzwmdvQ14XALRAkpoaszhJZEhYCfJ1+ai2VKPWUoscbc6I24eG9dASYkLinoRlkJXVgG71i3j+1yn4Q8VhI25f3+2Ej+OhkUuQqRfnzAqSmGZkFsLTcA3c/elos3iQn07pmqHcufBOXD3zarBMYtYgci4X+n/+GZzDAd0RRwgdDolziflbTuIG3++AZVsb+ntk4FMTfyjV0YVHY17mPKikKqFDiang0rHqMIaRBZeblZvpziYhJHz7Jmy5/n70vPIqeG70ZezjVZphBu8PHPOHxp0jbttsa4ZL6ce1lyrguOMBSAz0+ZaIggnY4CCekQS3oQnrhCSGEkMxOHcG5DCNum1oCJlJK9gwTCHUWeqwqm5V6KYUSTxyqRSF2kkAp0B11+grIMWKYRhkqDKQpkwTOpRxcW7ciIbzL0DHQ4nX3oHEHiVtyYR4ft2Alh8MqP8qAzCMXNVC4lexMbB0LJz2CHvaA9sE+4sRQshY8ByHxsv/jPYHHkDn8sejdhyGYaBEYNXE9pbdI267s7sKPMOgSWdG8cnHRy0mEl3BVgfB1gfDsXqs6HYFhuIV6AuiHBUhJBJmZ02Fo+ZG5PkuHnXbapH2s13ftB43rb8JT/74pNChkAkItvQIDtMjyUc1cyakZjMUkyeD94pnlg4ZH0rakgnh3f1Q5ymhzteCYRP/16nH1YP397yPN3e+KXQoMRU8qa0ZpdLW4rZgjeNqqPKfR5FRXNXIhJDIYFgWhtNOhcRggPbww6N6rHR5LgBgd+/IVUfbW/cAAKR+M9I04U0mJ/GncGDq9mhVZsEqW5PKBK1cXEkdQhJVuVkHANjdPnoiq7ojcD5bYhRXP1u9XI8DjAdgavpUoUMhE6DVN0Nh/gBfNr8jdChx6bfu3/CXr/6C//76X6FDGTeJwYCy9euQu/xfYGQyocMhcY6apJAJUR58Igq+OFHoMCKm3dGOO7+/E6mKVJxdcbbQ4cRM8UDStrmvH/0eP1RyyZDb1Vpq4WctYOU8Kqg9AiFknFJOPhm6ww+HJCUlqsfJ0xagxQI02upG3G5XdzWO3sKBlXPwWyzUHiFBhdsegYaQEZJ4giu8qjttcHk9UMqGv8Em1krbU8pOwSllpwgdBpkglboX8rQfUNvfI3QocamyuxLrGtfBy3lx0bSLhA6HkKhL/NJIQiIouEyy192LPlefsMHEUJpGjhR14C5fbdfw1bZ6tgCO2qvgaTsdBeniql4ghETWvglbb1sbbOvWRfwYFemBXuu93uYRt2u01eKc9Rwu+nQrfJ2dEY+DxEYwCdvR3wGHd/jvMhpCRkjiyUtTQ5XxNWTFd+KhH/417HY8z4eWlZdQKy+SgBblzoK763B4excJHUpcSrae9JzLJXQIJM5R0pZMjM8jdAQRpZapkakJ9EAcrSdestk7jGz4ZWdNPT5wrnzkq2ZBLqWPD0LIxHnb21F3zjlo+ss1cGzYENF9z86eBADwMO3gRhh65nA24+tpDLrKSiEvLIxoDCR29HJ9aCjJSN/hwQu+YDsFQkj8k7AMMjRaMBI3dvbUDLtdp80Nm9sHlgEK0tXDbpdsfJwPXj/1xkwGBxVMhqfzGPR0TIbNRX+nvxdaLZPg3+F+iwU1p5yK3QsWUuKWjIiyLmRCqg+cjrrDJsNbuUnoUCIm3J54yaY4I1A5O1Jf2z0DlQulVLlACIkQaUYGVAfMgDwnB/KCyA6Fmp9bCp5nAdaDys7GIbfpc/XBIXfghaMlcP/jSTBS6hyVyILVsyN9h1N7BEISU/DfbLO9fthtqgaKD/LT1FBIh273lYx+6/4N816fhwtXXSh0KGSCdEoZjDoFgNHnjYhR8Ps90b/DWb0evs5O8G433Dt3Ch0OiWOUtCXj5muqhsfKor8dkGSXCB1OxASTtqKrtDWNXmn7v8bXIEvdgPyMWEVFCEl2jESC7EceRsEbKyDLzo7ovrUKJST+wAfWD01DnxDv6Q1UbHFeA6ZmGSN6fBJ7oe/wYfra+jk/6q2BhA+1RyAksUwzlgEALL42eLmhKxCrO4NDyMRVYFBrqYWf90PK0o3HZFBg9EOirsLGYc5dxMrj96DZHmh5lejf4QzDIPff/0bp+vVQzZwpdDgkjlHSloybxNuGoqM7kHs0C9aQLnQ4ERNOlU4yClXadg2dtPVxPuxyvwtl5ofISaOPDkJI5LByOaSpqaGf+3/+Gd7mkfvQhksvCSSCf+usGvL5up5eKK2pYDxmZOqVETkmEU5oGNkwN15b7C3wcl4oJApkabJiGBkhZKJmZOWD52Tg4UezbejviGqRrgqjti/JxaX9H9QFz2Nd8yqhQ4krjbZG+Hk/1FI1jKrEv9Gunj0LMrNJ6DBInKNbcWTcmN5qKFN9UJaUCx1KRAWXWoi10ram0wGe58EwzKDnm2xNAOMHz8kwOyex72wSQuKXY9MmNF5xJWQmEwpWvA5pWtqE9mdW5aPHVosum3vI5zXcFDz4Eo8Ufz1ch+6AatrUCR2PCGuueS4unX4pZhpnDvm8WWPGiuNWoLO/EyxDNyAJSSQVmQZwHiMkyhZU99UMuTw6uGJMbJW2weuWRK8+JAEF+kLUdwOtzuFbgYhR6OaEoXC/a1VCkhUlbcn4de0J/De9TNg4IqzYUAwAaLQ2wst5IWNlAkcUG/lpakhZBk6PH21WF7IMqkHP/9IRqFLjPBkoM+mFCJEQIgLy/HxIUgyQZWeDVSgmvL9TCi7Hlg8OBDtp6IqMmqZuHOrshYz3Q5ZNlZeJbmrGVEzNGD7xLpfIMd04PYYREUIiJS9NDXiNgLIFP7XtweKCI/fbJtgDtMSkiXV4ggquECzSU9I2GUzJKMHX3UCft0XoUOJKrXXg9zyJbk5YPvkUzo0bkX7pJRGf7UCSA5UYkHGzrNsEa4MSPnmO0KFElEltgkqqgo/3Dbv0KhnJJCzyB6bsDtX0flvrLgCAEplQycUz2IEQEluyzEwUvvYacp9+Cqxm4hfdo/Xr3tXnxWkn3Ictf/v3hKt6CSGERI+EZZAqC1x3/Na1f8sbp8eH5r5+AEBxhngqbX2cDw22BgCJP5yJBMzNrgAAeNkOeHw+gaOJH6EhZEnUBqTvrbfQ9/bbcGxKnsHuJLIoaUvGreurRjR/nwa3deKVUPGEZVgU6AN3ucTWIiF4gjtUcmN3T2BYj1GRF9OYCCHiI8vOBiuXh352bNwEfpwXLcElsk29/XB5/YOe83JefO26GrLi52A4IHmqNsSuq78LG1s3otXeut9zr1e+jjd3volOZ6cAkRFCJipPWwgAaLDV7fdcsOggXSNHqka+3/PJqsXeAh/ng1KiRKYmU+hwSATMyi4Cz0nBsD782FondDhxI3htnkw3J/QnnYj0Sy+BcsoUoUMhcYqStmRceI6DelIOVNlyyA9YJHQ4ERdcWiS2YWQlxoFhZENU2jbbA3fwC5LoziYhJP71vvEGGi68EK133gWe58f8+gytHLr816EuvR9ra7cPeq7B0giOtYFVtmJ6tjlSIROB3ffDfbh09aX4qvGr/Z57/pfncf/G+9HubBcgMkLIRE3OKAEAdLv3Xw0n1n62weuVfH0+9epOEnKpFDIuMKBqc3OlwNHEB57nk7INSOoZZ8B0441QTaWZCmRo1NOWjAvDssh6+Quhw4ga0Q4jMw5faWvxNQMMMNVYEuuwCCEiJjUaAYYBq1IBPA+McfAEwzBQKh1wSWzY3robx5bP3fukLx2nfDgXkHmRdZQVgC6ywRNBlKaUoqqvClJm8Gmun/PjxOITUWupTaqllYSIyeysMrzTBnhhQ5+rDynKlNBz1R0DSVuR9bOlIWTJySDNRjdasKOrWuhQ4kKPqwc2jw0MmNCqWELEgJK2hAyhPLUck9MmI1MtriVGxcNU2lrcFvgYGwBgXs6kmMdFCBEv3ZIlKHrnbSgmTx73pODpqj9h3a5uKLMHD6Cq6+zHCXt+gs7bD9htkQiXxIFls5Zh2axl+z0uYSW4fu71AkRECImU6dkmcJsMYGUWVPfVYk7mrNBz1cEhZCKttKWbUcklW5OPbscW1IusiGg4LMPiqhlXocfVA6VUKXQ4EcVzHDzV1WBUashzk2teEJk4Wj9BxoW3dgA+t9BhRM2SgiVYeeJKXDnzSqFDiangSW5zXz/6PXt7P/7aGRj2wHn1mJZtEiQ2Qoh4KadMCSVseZ5H/y+/jOn1czJngXPlobFncE/bqjYLVkw6CpUzD4O8uDhi8RJCCImOvDQ14A2ci25v3T3ouVB7BJNIk7ZJ1OeTAGWpgfOSTnejwJHEh1RlKq6ceSVuX3i70KFEXPsDD6LmxJPQu2KF0KGQOERJWzIurVeejd1zpqPvH38ROhQSQakaOVLVMgBATdfeFglbmncBAKR+M/RKmSCxEUII7/ej7a67UXfW2bB+vjrs1wX7df++9cvqtnfw6Xxg50UXDhp8RpIDz/OD+iC32FvQ1d81rt7IhJD4IGEZZPpPgrP+chjZOaHH/RyPmq5ApW2pyCptqT1CcpqRWQYAcPJtAkdCok05bSoYlQq8xyN0KCQOUdKWjIuntRt+twSsPl3oUKLKz/nh9XuFDiOmgtW2+7ZI2DFQaZsqo+UahBABsWygry0Av9US9sty02SQpW5AjX8F/NzeatsqzydQmj+DMUVcn/NisGzNMhz05kHY0b0j9NijWx7FESuPwIqdVMlCSCKbnjEDfmcxmnr2Ptbc2w+Pj4NCyiI7RSVccDFm9VjR4wr8QVB7hOSyKK8i8H8kVrTZeoUNJg781vUbGq2Ng87jkoX+uOMwadNGZP4t+aqIycRR0paMS94RFhQu7YT68KVChxI1d39/N+a/Ph+f1HwidCgxVTxERVqDrQ4AkKulpu+EEOEwDIPMu+9CwWuvIvWMM8J+XVGGFgrzx2BSvsGOjsAywz5XH7K6bdD081iQS726k43da4fNY0OttTb0WHAJMQ0wISSxlZkDQyP3tO/tRR48by3K0EDCjq//eSJSS9V496R38fgRj0MjE9cAtmSXpU8D/IHf9e8bdgocjfD++s1fcdz7x2FL+xahQ4k4Vi4HI6PVrGRolLQlY+fohsTfC1WaF9KimUJHEzVyiRwezhNaciQWQ1Xa+t2Z8DvzMSWDEhuEEGExEgnUs2eHfubcbniamkZ8jUauhMSfAQDY1BRo97KtdTdueM+P/y73I79mT/QCJoIIVpzVWeoABFbONFgbBj1HCElMxRlKyAybscnyCrxcYKVEVYc4+9lKWSnKU8txZP6RQodCokDNZIH3q7CnS9wtEjieg1qqhpyV03c4ER2p0AGQBNQ10PTfkAfI1cLGEkUXTb0I5085H1maLKFDianigaTtvpW2zvYlcPYehMVHLxQqLEII2Y/fbkfTVVfD09CAwhWvQ5adPey2ekk2+tAxMFjxaGxprMRcLvCcprwsNgGTmAn2dgxW17Y4WuDhPJCzctF9rxOSbCZlGqDI/Ag21otGaxOKU4r2DiETWT9bktwOM9yKlZvaIMkR93kKy7BYeeJK+Dk/WCY56w6d27ej89//hjQtHTn/fFTocEgcoaQtGTPHt2vQ/5sW6lk5SN6ULZClFedFXbA9Qk2nAxzHw+Xzo6m3HwBQKrLqBUJIfOO9Xvi6u8HZbPC2tY2YtM1U56Gv/8dQEm9nby1evUKKAveh+NhkjFXIJEaCSdvgaplQawRDASSsRKiwCCERUJCuBWedCz/HoNPqQ3EK9knaiqtFwJs734TL58LigsXI0+UJHQ6JsHJjGoD2QQOixSyZv78ZloVzww+QpKSA5zgwbHImp8nYUdKWjJn9+83o+UWPVIMkqZO2YpWfpoaUZdDv9aPN6kKL1QIwPqSp1UjXKoQOjxBCQqSpqch/4Xn4e3qgnDJlxG1LUoqxsx/ocAV62jba6wAApoxSMIx4+h+KRXD5ZL21HhzPhdok0LJKQhKfhGVQiPOwo90Kq10PAKgeaOsltgKDN3e+iWpLNcpSyyhpm4SC7T6qOxyjbEkSnXLyZGTefRdUs2YDdF5K9kHpezJmqplzYJhlgnrRgUKHEnUv/voibl5/M5psI/dLTCYyCYv89EA6vqbTgTd3vgXtpDugzf5Y4MgIIWR/sszMQQlbX1cXOLd7v+2mmwJLC+1cCwCgx9MMAJicURKDKEmsZWuzIWNlcPvdaHW0hgaSBStwCSGJrdwcSGbtbrehx+FBj8MDACjOEFfS9rji43BM4TEoSaHvsmRkNvBQ5b6EJs098Ph8QocjmPt/uB9nfHwGvqz/UuhQooaRy5F69tlQTiqnYgIyCFXakjHTX3Yn9JfdKXQYMbGqdhUqeypxbNGxyNXlCh1OzJQYtajpdKCmy446SyMYhkeGOkXosAghZESexkY0XHwJFJPKkbt8ORjp3tOchXkVwE8AL+lDp70PF3zeAoWXx5QraQVBMpKyUuTr8lFtqUadpY4qbQlJMmVmHcC6sL11JxZ0pgEAclJUUMmTd/n0UC4/4HKhQyBRVJqRDommCgzrw4+ttZifJ87etpU9ldjZsxM+XryJayJelLQlZASF+kJU9lSGeuKJRbCvbXWHHanOs2DfMwdHLqkQOCpCCBmZt6UVvrbAhGVfTw9kJlPoueI0E+BXAxIn3vhlNRbs4qDvB8xppuF2RxJckaEI1ZZq1FpqQ9/jVGlLSHLQaDugm3Q3Nrs1OKpzJYC9S8kJSRZyqRRpzvPQ3C1Fr10pdDiC4Hk+1Je+SJ/c3+Gc2w3H99/DXVWFjMsuEzocEicoaUvGhLN0gre0QJIzBZDIhA4n6goNhQAguqRtcPJuTZcDzX394H16zMgRT6UxISQxaRbMR+5TT0FRXjYoYQsADMNAiUy4UIPPa9Zg6/EsCtv0uGsy3ZBKVsHv8F+6fkFXf1fgMaq0JSQpLMwrB3YAHOvAL62tAMQ3hKzF3gIePLI0WWAZ6nqYrKYaDkNDYxuausVZZdrr7oXVYwUDBgX6AqHDiSq+vx9NV14FAEg59VRI09MFjojEA/p0J2Pi+OBF7F56NhqOmyt0KDERrMgJ3t0Ti+BJ7842G+q7nQDEN9iBEJKYtAcfNChh6+vpCf3/dHng5lODZyu2lbFYe/BksCpVzGMksRFM0K5rXAcAMKqM0Mrpu4yQZFBmSgfvTQEArK/9DcDeogOxePbnZ3HMu8fgqZ+eEjoUEkWhYppOu8CRCCN4HZ6tzYZSmtzVxpKUFGiPOAKG004FP8R8BiJOlLQlY+Kt3QUAkBj0AkcSG8ELvmAvPLEIDnHo9tRClv0ytKZvkKlP7i9JQkjycWzchOqlR6PvnXcAAPm6wsATbOBEOFud3BUbYhe88er0BW4+BitvCSGJT8IGVk8AQKuzAYD4krbBZFayVx+KnSnVC6lhCzZ3fyp0KIIQW0/6vKf+g+z774csO1voUEicoKQtGZO0mSqUn9oK0zlHCB1KTARPgnrdvehz9QkbTAylauRI08jBKlsh0+2A2lBFUywJIQnHseF7cHY7LJ9+Cp7jMCk9MF17Sj2HwuocTNOJc6CHWPw+SZvsvfAIERujIrB6gpUH2p+UmMTVHoF6dYuDRmOFKvsdtLIfCR2KIIK/53TjlYhVUiZtn3zySRQWFkKpVGLBggXYtGnTiNu//fbbqKiogFKpxPTp0/HZZ5/FKNLEwns9cGzcCHurEp62XvBej9AhRZ1apoZZbQYAvLHzDWxu2ww/5xc4qtgoTFdCotkNAJCxKnh84uyjRAhJXMZrr0XmXXci7+mnwbAs5udMBmfPx58/keCRlfUoX7MJblp+lrT0cj3uXnQ3bpp7E26dfyvmZ84XzXc4IWKQp8sHAEg0VVDra2FQSgSOKDb8nB9rG9aixxVo/5OnzRM4IhJNi/IGeu9LbLh19fN4aeuXorku8/h8+Lo+kMtps7hF8759Xh82v/IOvvr3S9j8/hfweUX0vt//Al89+aqo3vdoGJ7neaGDiKS33noL559/Pp5++mksWLAAy5cvx9tvv41du3bBZNp/QvT333+PQw89FA8++CBOOOEErFixAg8//DC2bduGadOmhXVMq9UKg8EAi8UCvT452wZYX3wA7U++Cp9j72NSDWC++jzoL75NuMCi7Mv6L/HXr/8KD7c3QW1Wm3HL/FuwpGCJgJFF1z++eRsv73ocjMwSeozxp+C8smtw0yFnCBgZIYSM30v/uhulr69Eun3vqU+XjkXdn87EedfeJWBkJBq+rP8SD216CO3O9tBjYvgOJ0QM/vHN23hlzz8Byd6LEzGcq9Lnmvj845u38XL1vWCYvecuYvhd/8c3b+PVPf8GL+kLPSaG973++beQ+ti9kO1zk7lHnQLfVdfhsEvPEjCy6Fr//FuQ/mc50px9oceS/X2Hm0dMuqTtggULMG/ePDzxxBMAAI7jkJeXh7/85S+45ZZb9tv+rLPOgsPhwCeffBJ6bOHChZg5cyaefvrpsI6Z7Elb64sPoPmRVwZ+2neJfOBXJ+fm85Mycftl/Ze4ft314DH4nwgz8Gfw2OGPJeXJUeDE4B4AwL4dEYKfFBeU3JnUX5SEkOT06uN/x5yn3gQw+JuMG/h565VnU+I2iYj1O5wQMRDruSp9romPWH/Xxfq+1z//FoyP3g1g6HPVzhvvTsoEpljftyiTth6PB2q1Gu+88w5OPvnk0OMXXHAB+vr68OGHH+73mvz8fFx//fW47rrrQo/ddddd+OCDD/DTTz+FddxkTtryXg+qFs6Az8Fj8D+h0BaQahiU/vATGJk81uFFjZ/z4+h3jx50F3tfDBiY1WZcMv0SdDg7cELxCShOKQYAVHZX4ov6L8Z0PAkrwdUzrw79/HH1x6i11GJx/mJMzZgKINCE/aPqsfcyumLGFZBLAn83X9Z/iR3dO7AoexHmZc4DALQ72vHWrrcAAD7Oj//+8jp4xo2hWtjyPMD6U7DlgrWQS6VjjoUQQoTgdrux+aDZSLNzQ36TcQB6dCzmf7sNCoUi1uGRCAv3O3zVaasgYcWxnJqQZOHx+TD3lSPAsX1hnau+Xvk6uvu7cXLpycjXB9op/Nr1K75q+GpMx5VL5LhixhWhn9/f8z4abY1YWrgUFWmB5evVfdX4tGbsw6Kunnl16LNoVd0q7O7ZjUNyD8Es0ywAQIu9BSt3rcSbu96Ew+sYch/0uZZ8wvldB6/ANO2xkLJyzNDuTWJW96+D1deCfOVCpMsC16gWXzNq+tePOY4DtGdAwsgAAPWuDejx1iJbMQtm+WQAgMPfhd3O1WPe7xTNSVCwgeGBze7t6PBUwiSfDLPsALze8mdAYhn+fftTcG7205Am0e865/fhwNsvRXq/ZZisC9CtSsH39z8HViJFzuZ10LY3oe2AhbDklwIAFNZeFH79KXwKFaqPOi302uxt30DXUo/2qXPRVxT4vJI5rChe+xE4iQx7jt2bEM38aQMMjdXorJiJntLAynOJqx+lX74LMAx2HX9uaFvTr5uRWrcL3aXT0FUxEwDA+rwoWxXILew+9mzwkkDOwFi5HWnVv6G3aDI6ps4ZeNMcyj99HbnrPoXK5x72HL1XnYKFG7+BVJZc+Ydw84hJ9a67urrg9/thNpsHPW42m7Fz584hX9PW1jbk9m1tbcMex+12D+qBZ7VaJxB1fHN+9spAS4ThhlAx8DkC22n+cGkMI4uubR3bhr3YAwAePNqcbXij8g3UWGtwgPGAUNK2qq8Kz/3y3JiOJ2flg5K2n9d9jvVN65Gryw0lbZvsTWPeLwBcMv2SUNL266av8X7V+1DL1KGkbZera/B+2RH+thmAl/ZhxU/rcOEcupNPCEkMn6x8EVPs3LDPswAybBw+WfkiTjvvytgFRqIi3O/wbR3bQt+FhJDEsOKndeAlfWGfq76/533s6t2FuZlzQ0nbnT07x3xOrZPpBiVtP6v9DD+0/oDilOJQ0rbOWjeuc/UrZ14JCQLJp7UNa/FZ7WdIVaaGkrbtzna88OsLI+6DPteSTzi/62Dc+M35AXhOjm83zw49p8r7ElLtLmzaI4XPEqjRk2groc57f8xxfL91FsAHriWVWesgS9mGzTVueHsCj7HKRmiKxr7fDT+Wg/elAgAUpm8gT/8GnoZu+OydUBdYhn0dwwCQ9uH5zWvgd5aM+bjxanpnFU7qH+F9A8jo78O3767BL8ZS3L1hNcraK/FJJ4vVhYHfkkJLK55a+y56FDpcL50Zeu0tm7/C7OYf8WWbHx+VBBLwWY4uvPjFu3BKFbhOOTe07XXb1mFuwyZ809yPtxtVAIBUlxUrvngXfjC4Rr0wtO2VP63H/NrvsLnegldbdQAAldeF9754FwBwg3IuvJLA8S767Rss2LMW75Yciuc7UgAALM/h0y/fG/HPhQWQ7uzD9k/WYt4pR43+B5mEkippGysPPvgg/v73vwsdRkz4Whoiul2i6HR2hrXdlPQpODDnQORoc0KPFRmK8KfJfxrT8STM4LuEh+cdjjxdHkpS9n4RZWuyx7xfAJCye/+ZL8xaCI1Mg6npU0OPpSvTQ/vd2LQTe2xbRt1ng3X4mxqEEBJvrK3hfUeFux2Jb+F+h4e7HSEkfoR7Dhrc7vji4zHPOQ9ZmqzQc6UppWM+p1ZIBq/CWJy/GKUppSjSF4Uey9Pljetcnd1nNvjBOQcjTZkWSgQDQIYqAwdlH4TvWr4bdV/0uZY8wv1d13HTkKUuwgEHFYYeq3EdAqu/GPkVM5AmDTxu8UlQ6z52zHEcsKgYLBO4nmxwH4genxnZpbNhkgX26/BrsMc19v1OmVcG+UClbYtnATq8WpgKpqLFakFdOK/PA+ZmFI75uPFKsb4qrO0OSgXmHlQIifRQ7G4rxvSZs5BTUAgAUFr02O09Hj6FEhft8/ugVByM3c05mDR9Ni4qDjwud6Rhd//x4KTSQdvq1Qdid4MRxZPn4KLywONSlxO77ccDDAZtm65bhN05Kcgtn4GLJgceZ70e7LYcDwA476CiUKVtVuoC7DarYS6ZioumDeyD41D98zSU1P466vu2tYg3/5BUSduMjAxIJBK0tw+urmhvb0dmZuaQr8nMzBzT9gBw66234vrrrw/9bLVakZeXnFM7pdn5Ed0uURjVxrC2O7X81P3uZk/LmIZpGeENsRvO6eWn7/dYcUox/jr/rxPa73HFx+G44uMGPZapyQzt9yXJl/jnr6MnbfP1w//7IISQeKPPCu87KtztSHwL9zs83O0IIfEj3HPQ4HYXTbtov+dmmmZipmnmhOI4u+Ls/R4rTy2f8Ln6iSUn4sSSEwc9lqfLwyXTLwkraUufa8kj3N/1yw+4ZIgVkFOH2HIqgGMmGNVQ+wWAwyO235e2fol/jp7Dw2kHTMGFc4aLJ/Fs9rUAn4y+3eKDpmDeiVOBE4d57386BABw2r6PDbft2YsAAKeEs+0ZgZzHH8LZ9tRZYW+7mbscuPWaofezD122ePMP7OibJA65XI45c+ZgzZo1occ4jsOaNWuwaNGiIV+zaNGiQdsDwBdffDHs9gCgUCig1+sH/S9ZqY87H1INAAzX+piHVBPYLpnMNs2GWW0ONfb/PQYMMtWZmG2aPeTzieqcGYeD8adguE7XPA8wvhScM+PwmMZFCCETccKZF6NLx2K4BgkcgC4dixPOvDiWYZEoEet3OCFiINZzVfpcEx+x/q6L9X3POuEI9KhTRjxX7VanYNYJR8QyrKgT6/sei6RK2gLA9ddfj+eeew4vv/wyKisrceWVV8LhcOCiiwJ3Wc8//3zceuutoe2vvfZarFq1Cv/85z+xc+dO3H333diyZQuWLVsm1FuIK4xMDvPV5w389PtPzsDP5qvPS6ohZEBgMNgt828BgP1OjoI//3X+X5Ou0b9cKsV5ZYE7Xb//ogz+fF75NTSEjBCSUBQKBer+dCYYYL+TwuBk2ro/nUlDyJKEWL/DCREDsZ6r0uea+Ij1d12s71sqk8J31XUjnqv6r7ou6YZxifV9j0XSJW3POussPProo7jzzjsxc+ZM/Pjjj1i1alVo2FhDQwNaW1tD2x944IFYsWIFnn32WcyYMQPvvPMOPvjgA0ybNrHl7clEf/FtyLn5fEg1g08QpBoGOTefD/3FtwkUWXQtKViCxw5/DCa1adDjZrUZjx3+GJYUJOcgrpsOOQMXlNwJlksZ9DjrT8EFJXfipkPOGPqFhBASx8679i5svfJs9OgGn/r06FhsvfJsnHftXQJFRqJBrN/hhIiBWM9V6XNNfMT6uy7W933YpWeh88a70adOGfR4rzoFnTfejcMuPUuYwKJMrO87XAzPD1d4TsJltVphMBhgsViSulUC7/XA+dkr8LU0QJqdD/Vx5yddhe1Q/Jwf2zq2odPZCaPaiNmm2aK4i+3x+bDip3VosLYhX5+Jc2YcnnR3NAkh4uN2u/HJyhdhbW2APisfJ5x5MVXYJjGxfocTIgZiPVelzzXxEevvuljft8/rw/ZP1sLW0gZddiZmnXCEKCpNxfa+w80jUtI2AsSStCWEEEIIIYQQQgghhIxfuHnEpGuPQAghhBBCCCGEEEIIIYmMkraEEEIIIYQQQgghhBASRyhpSwghhBBCCCGEEEIIIXGEkraEEEIIIYQQQgghhBASRyhpSwghhBBCCCGEEEIIIXGEkraEEEIIIYQQQgghhBASRyhpSwghhBBCCCGEEEIIIXGEkraEEEIIIYQQQgghhBASRyhpSwghhBBCCCGEEEIIIXGEkraEEEIIIYQQQgghhBASRyhpSwghhBBCCCGEEEIIIXGEkraEEEIIIYQQQgghhBASR6RCB5AMeJ4HAFitVoEjIYQQQgghhBBCCCGExKtg/jCYTxwOJW0jwGazAQDy8vIEjoQQQgghhBBCCCGEEBLvbDYbDAbDsM8z/GhpXTIqjuPQ0tICnU4HhmGEDieqrFYr8vLy0NjYCL1eL3Q4MUPvm963GND7pvctFmJ97/S+6X2LAb1vet9iQO9bXO8bEO97p/dN7ztZ8TwPm82G7OxssOzwnWup0jYCWJZFbm6u0GHElF6vT/p/REOh9y0u9L7Fhd63+Ij1vdP7Fhd63+JC71tc6H2Lj1jfO71vcRHL+x6pwjaIBpERQgghhBBCCCGEEEJIHKGkLSGEEEIIIYQQQgghhMQRStqSMVEoFLjrrrugUCiEDiWm6H3T+xYDet/0vsVCrO+d3je9bzGg903vWwzofYvrfQPife/0vul9ix0NIiOEEEIIIYQQQgghhJA4QpW2hBBCCCGEEEIIIYQQ8v/Zu+/wKMq1j+Pf2U3vHRJKCiSUgFTBgFKkowgIKkUFLIhyBBX1HI+vXY9dFBGxUlQsKNhoUkLvXZASQkJLAoSQnmyS3ef9Y8lCIPQkm+zcn+vKlezM7ub+MWF2595nnqlGpGkrhBBCCCGEEEIIIYQQ1Yg0bYUQQgghhBBCCCGEEKIakaatEEKUY/ny5RQUFNi7DCGEEEIIcQlJSUmUlJTYuwxRRWRb649chknomTRthTjPjh07eP3115kyZQrp6ell1mVnZ/PAAw/YqbLK9eWXXzJixAimTZsGwI8//kiTJk2IioripZdesnN1Va9nz54kJyfbu4xKc+LEiTK3t2/fzogRI+jYsSODBw9m+fLl9inMDkwmE4mJiZhMJnuXUmmaN2/Oa6+9xpEjR+xdSrVw/Phx0tLS7F1GlTCbzRw/fpyTJ0/au5Qq8c8///DYY4/RqlUrQkNDCQ0NpVWrVjz22GP8888/9i7PLhITE7n11lvtXUalSU1N5dtvv2X+/PkUFRWVWZeXl8err75qp8oq1+LFi3nppZdYtmwZACtXrqRPnz7ceuuttvdyetGoUSMSEhLsXUaVSUlJ4aWXXmL48OE8/fTT7N27194lVYqFCxfy999/A2CxWHjttdeoU6cOrq6u1K1bl7feesshm3n9+vXjm2++0d3gEZPJxNNPP02nTp14++23AXj99dfx8vLC29ubYcOGkZ2dbecqK8eOHTu4//77iYqKwt3dHU9PT5o3b84LL7zgsJkB0tPTeeeddxg4cCBxcXHExcUxcOBA3n33Xd28b70SmnLEPZ2odHv27OG2227j4MGD9i6lQv3111/069eP6OhocnJyyMvLY/bs2XTt2hWwHuiHhYVhNpvtXGnF+vDDD/m///s/evXqxbp16xg7diwTJ07kySefxGw28/777/Puu+8yevRoe5da4Vq3bl3u8u3bt9O4cWPc3NwA2Lp1a1WWVemMRiOpqamEhISwdu1aunTpQocOHWjXrh3bt28nPj6epUuX0qlTJ3uXWqGmT59Oo0aNiIuLo7CwkLFjxzJjxgyUUhgMBh588EE++ugjXF1d7V1qhTIYDAQEBJCZmUn37t15+OGH6d+/P05OTvYurVJlZGQwevRoNm7cyG233cbkyZN55JFH+Prrr9E0jfbt2/PLL78QGhpq71Ir3Lx583j77bfZuHEjxcXFAHh7e9OvXz/eeOMN6tevb+cKK96CBQsYMGAArVu3plevXtSqVQuwvnYvXryYLVu28Ntvv9GrVy87V1q1duzYQevWrR3uvQvApk2b6NmzJxaLheLiYurUqcOvv/5KbGws4Ljv27799ltGjRrFDTfcwP79+/n444958sknGTx4MBaLhW+//ZbvvvuOwYMH27vUCnXnnXeWu/y3337j1ltvxdvbG4A5c+ZUZVmVzsPDg0OHDhEcHMw///xDhw4dCA4OplWrVvz9998cPnyYdevWccMNN9i71ArVuHFjvvjiC2655RbefPNN3n//fZ5//nmaNGnCvn37ePPNN3nyySf597//be9SK5TBYMBoNOLp6cnQoUN56KGHaNOmjb3LqnRPPfUUP/74I0OHDmX+/Pl07dqVP//8k//9738YDAZefPFF+vTpw6RJk+xdaoVatGgRAwcOpG/fvri7uzNnzhweeOABPD09+eWXX1BKsXr1amrXrm3vUivUpk2b6NWrFx4eHnTv3r3Me7alS5eSn5/PokWLaNu2rZ0rtT9p2opr4qgHAB06dKBr16688cYbKKV49913ee2115g9eza9e/d22Df/TZo04YUXXmDYsGFs27aNdu3aMXXqVB588EEAvvrqKz799FM2b95s50ornrOzM927d+emm26yLVNK8dprrzFmzBhCQkIAHG60scFgIC0tjZCQEHr27Em9evX46quvbOufeOIJ/v77b5YuXWrHKiteVFQU33//Pe3bt+eZZ57h559/5oMPPrAdADz77LP079+fd955x96lViiDwcDRo0fZuHEjX3/9NQsWLMDf35/777+fBx98kCZNmti7xErx4IMPsnHjRh555BF+/vln/Pz8SEpKYsqUKRgMBsaPH0+TJk2YMWOGvUutUN988w1jx45l9OjRuLm58dVXXzFy5EjCw8P54Ycf2L17N2vXriU6OtrepVaoFi1a0L9//4uOrHz55ZeZM2cOO3furOLKKtflDmCPHTvGe++953DvXQB69OhBvXr1+PLLL8nLy+Pf//43P/30E4sXL6ZVq1YO+76tVatWjBo1inHjxrF06VLbhzFPPvkkAO+//z5z585l9erVdq60YhkMBjp16kRkZGSZ5TNnzuSOO+7Az88PwOFGGp/7nm3AgAFYLBbmzJmDk5MTFouF4cOHk5ubyx9//GHvUiuUm5sb+/fvp379+jRv3pwXX3yRu+66y7Z+3rx5PPHEEw43ytpgMLBr1y7++usvvv76a3bv3k3z5s156KGHGD58OP7+/vYusVLUr1+fr7/+mu7du3Pw4EGio6OZM2cO/fv3B6xnFzz88MMOdyZkq1ateOSRRxgzZgxgzTlu3Dj27NlDcXExffr0oV69eg63X7vpppto0aIFU6dORdO0MuuUUowZM4adO3eybt06O1VYjSghyvHkk09e8uvee+9VBoPB3mVWOB8fH3XgwIEyy7777jvl6emp/vjjD5WWluaQud3d3dWhQ4dst11dXdWuXbtstxMSEpSfn589Sqt0q1evVg0aNFAvvviiMpvNtuVOTk5q9+7ddqyscmmapo4fP66UUio0NFStW7euzPpdu3apoKAge5RWqVxdXW1/6zExMWrBggVl1q9YsULVr1/fHqVVqnO3t1JKpaSkqP/9738qOjpaGQwGFRcXp7766is7Vlg5QkND1Zo1a5RSSqWlpSlN09Rff/1lW7969WpVp04de5VXaRo3bqx++OEH2+1NmzapunXrKovFopRS6p577lEDBw60V3mVxs3NTe3du/ei6/fu3avc3NyqsKKqoWmaCgsLUxEREeV+hYWFOeR7F6WU8vf3V/v27Suz7M0331T+/v5q48aNDvu+zdPTUx08eNB229nZWe3YscN2e8+ePSowMNAepVWq77//XtWtW1d9/fXXZZbr6T1bvXr11MqVK8us37p1qwoNDbVHaZXq3PentWrVUlu3bi2zfv/+/crd3d0epVWq89+zbdiwQY0ePVr5+voqd3d3NXToULV06VI7Vlg5zj8edXZ2LnM8mpSUpDw8POxRWqVyc3NTSUlJttsWi0U5OzurlJQUpZRSK1euVMHBwXaqrvK4ubmpPXv2XHT9nj17HPI927WQOW1FuT766CNWrFjBtm3byv1y1LmTXF1dyczMLLNs2LBhfPnll9xzzz3MnTvXPoVVMg8PD/Ly8my3g4OD8fLyKnMfR530v2PHjmzZsoX9+/fToUMHEhMT7V1SlcnJySE7Oxs3N7cLpgNwc3MjPz/fTpVVntq1a9u2cV5eHkFBQWXWBwcHc+rUKXuUVqnO/wQ7NDSU5557jv3797N06VIaNGjAuHHj7FRd5cnKyqJOnToA1KpVCycnpzJTIYSFhV2wz3cEhw4don379rbbbdu2JS0tjdTUVMB6CmJ8fLy9yqs0ERERzJs376Lr582bR3h4eBVWVDXCw8OZOHEiSUlJ5X5d6t/EERQWFpa5/Z///If//ve/9OzZk7Vr19qpqsrl7OxcZv5eV1fXMu/bXF1dHXI+zCFDhrBq1Sq++uorBg0axOnTp+1dUpXQNM32Om4wGPD19S2z3s/PzyH/LQYOHMgbb7yB2Wymf//+TJkypcwcth9//DEtW7a0X4FVpF27dnz22WekpKQwZcoUjhw5Qo8ePexdVoWrX7++bVTlpk2b0DSNjRs32tZv2LDB9p7OkdSpU4d9+/bZbicmJmKxWAgMDASgbt265Obm2qu8SlO7du0y2/d8GzdutE2ZoHeOPZmduGYNGzbkySef5N577y13/fbt2x1ybp2WLVsSHx9/QbYhQ4aglGLEiBF2qqxyNW7cmJ07d9pOkT7/YkV79+4lIiLCDpVVDV9fX77//numTZvGzTffzCuvvHJBk8sRxcTEANZTUDZv3kyrVq1s63bv3k1YWJi9Sqs0w4cP5/nnn2f+/Pncd999vPrqq8yaNQsvLy/y8/N5+eWX6dixo73LrHDqEjMhdenShS5dujjkhQ6io6P5888/GTt2LAsWLMDNzY2//vqLZs2aAdZ5xM4/zdYRREREsHnzZtt+e+vWrRgMBtub34CAANs8t47k1VdfZdiwYSxfvrzc+dEWLlzIrFmz7FxlxWvTpg1btmzh7rvvLne9pmkOebEegGbNmrF27doL5vJ8+umnsVgsDB061E6VVa6GDRuyd+9eGjVqBFinwCidzxWsB/1169a1V3mVKiIigpUrV/LKK6/QokULvvjiC4d/z6aUIiYmBk3TyM3NZefOnWX+5g8cOOBw810C/O9//6N79+40btyYuLg4Zs+ezeLFi4mJieHAgQNkZGSwaNEie5dZZTw8PBg5ciQjR45k//799i6nwo0ZM4aRI0fy5ZdfsmXLFt577z3++9//snfvXgwGA59++ikTJkywd5kV7v777+ehhx7i+eefx9XVlQ8++IA77rgDFxcXwNp3ccT3qk8//TSjR49my5YtdOvW7YL3bF988QXvvfeenausHqRpK8rVtm1btmzZctGmraMeADz66KOsXLmy3HVDhw5FKcUXX3xRxVVVvrfffhtPT8+Lrj98+DCPPPJIFVZkH6NGjeLmm29m+PDhDjuyuNT5o+zOvxBTUlKSQ1547qWXXmLXrl1ERUXRtm1bVq1aRa1atahTpw4pKSkEBgayePFie5dZ4UaMGIG7u/sl7+Pj41NF1VSdZ555hhEjRvDhhx9y5MgRvv32W8aPH8+GDRswGAzMmTOHDz74wN5lVrixY8fy0EMPsWnTJtzc3Pjyyy+57777MBqNgHW0SumHNo7krrvuok6dOkyaNIn333+ftLQ0wDqaIy4ujuXLlxMXF2fnKiveq6++eskzI5o2bUpSUlIVVlR17r//flasWGGbC/Bczz77LEoppk6daofKKtd///vfMvNanr//3rx580Wb+I7AYDDwyiuv0KNHD+6//36Hm7P4fOfPZdmwYcMyt9evX8/AgQOrsqQq4evry9q1a/nqq6/4448/iIiIwGKxUFRUxNChQ3n00Ucd8sOJzp072xp2F+OIr+FPPPEEISEhrFu3jgceeIChQ4fa5jLOz8/nySef5Pnnn7d3mRXuv//9L3l5ebz22muYTCZ69erFRx99ZFtfp04dPv30UztWWDnGjh1LUFAQEydOZMqUKbb9uNFopE2bNkyfPt2hX8euhlyITJQrLS0Nk8nkkKcRCnE5FouFnJwcfHx8HH70hl4tXLiQP/74g4MHD2KxWAgNDaVjx44MGzbskh9giJpnzZo1rF+/nri4ODp06MA///zDW2+9RX5+Pv369XPYMyg+/fRTvv32W9sBwAsvvICbmxsACQkJmM1mGjdubOcqhRDi+uTm5pKYmEiTJk0u2+gSQghR/RQXF5Oeng5AUFAQzs7Odq6oepGmrRBCCCGEEEIIIYQQQlQjciEyIYQQQgjh8Pbs2UNUVJS9y6hyes0N+s0uufVFcuuL5NYXR869Y8cOXn/9daZMmWIbaVsqOzubBx54wE6VVS/StBVCCCGEEA6vqKiIQ4cO2buMKqfX3KDf7JJbXyS3vkhufXHU3H/99Rft2rXjhx9+4O2336Zx48ZlrrlSUFDAjBkz7Fhh9SEXIhNCCCGEEDXeU089dcn1J0+erKJKqpZec4N+s0vu8kluxyK5yye5HYtec7/88ss8/fTTvPHGGyilePfdd7njjjuYPXs2vXv3tnd51YrMaSuEEEIIIWo8o9FIy5Yt8fHxKXd9bm4uW7dudbgrzes1N+g3u+SW3OeS3JLbEUhufeX29fVl69atNGjQwLZs1qxZjB49mh9++IEbb7yRsLAwh8t9LWSkrRDlMJvNTJ8+naVLl3LixAksFkuZ9cuWLbNTZZVLr7lBv9n1mluvZHsLR9awYUOefPJJ7r333nLXb9++nTZt2lRxVZVPr7lBv9klt+Q+l+R2LJJbcp/LUXO7urqSmZlZZtmwYcMwGAzcc889vP/++/YprBqSpq24JL0e4I8fP57p06dz22230axZMzRNs3dJVUKvuUG/2fWaW/Ztsr3P5ajbW2+527Zty5YtWy564KNpGo54gplec4N+s0tuyX0uye1YJLfkPpej5m7ZsiXx8fEXNKSHDBmCUooRI0bYqbLqR6ZHEJf0r3/9y3aAHxoaesEB/sSJE+1UWeUKCgpi5syZ9O3b196lVCm95gb9Ztdrbtm3yfY+l6Nub73lTktLw2QyER4ebu9SqpRec4N+s0tuya0Hklty64Fec8+dO5eVK1de9L3orFmz+OKLL8pcnEyvpGkrLkmvB/hhYWEsX76cmJgYe5dSpfSaG/SbXa+5Zd8m21sP9JpbCCGEEEIIR2CwdwGienNxcaFhw4b2LqPKTZgwgY8++sghT0W4FL3mBv1m12tu2bfJ9tYDveYWQgghhBDCEchIW3FJ77//PgcPHmTy5Mm6mfsQYODAgcTHxxMQEEBsbCzOzs5l1s+ZM8dOlVUuveYG/WbXa27Zt8n21gO95hZCCCGEEMIRyIXIxCWtXr2a+Ph4FixYoKsDfD8/PwYOHGjvMqqcXnODfrPrNbfs2/RFr9tbr7mFEEIIIYRwBDLSVlzSqFGjLrl+2rRpVVSJEEJUHNm36Ytet7decwshhBBCCOEIpGkrxCWcPHmSffv2AdCoUSOCg4PtXFHV0Gtu0G92vebWK9neQgghhBBCCFG9yfQI4oro7QA/Ly+Pxx9/nJkzZ2KxWAAwGo3cf//9fPzxx3h4eNi5wsqh19yg3+x6zV1K9m2yvfVAb7nNZjPTp09n6dKlnDhxwva3XmrZsmV2qqxy6TU36De75Jbc55LcjkVyS+5zSW79kqatuCS9HuA/9dRTrFixgj/++IOOHTsC1rkBx40bx4QJE/j000/tXGHl0Gtu0G92veaWfZtsbz1sb73mHj9+PNOnT+e2226jWbNmurkIm15zg36zS27JrQeSW3LrgeTWV+6rooS4hNGjR6uoqCg1f/58lZWVpbKystS8efNUgwYN1JgxY+xdXqUJDAxU8fHxFyxftmyZCgoKqvqCqohecyul3+x6zS37trJkezsmveYODAxU8+bNs3cZVU6vuZXSb3bJrS+SW18kt75IbnExMtJWXNIvv/zCzz//TJcuXWzL+vbti7u7O3fffbfDjsrKz8+nVq1aFywPCQkhPz/fDhVVDb3mBv1m12tu2beVJdvbMbe3XnO7uLjQsGFDe5dR5fSaG/SbXXLri+TWF8mtL5JbXIzB3gWI6k2vB/hxcXG89NJLFBYW2pYVFBTwyiuvEBcXZ8fKKpdec4N+s+s1t+zbZHuD429vveaeMGECH330EUpn19rVa27Qb3bJLbn1QHJLbj2Q3PrKfTU0Jf864hK6detGYGAgM2fOxM3NDbAe4I8YMYKMjAyWLFli5worx65du+jVqxcmk4kWLVoAsGPHDtzc3Fi0aBGxsbF2rrBy6DU36De7XnPLvk22tx62t15zDxw4kPj4eAICAoiNjcXZ2bnM+jlz5tipssql19yg3+ySW3KfS3I7Fsktuc8lufVLpkcQl/TRRx/Rq1cv6tatW+4BvqNq1qwZCQkJfPfdd+zduxeAoUOHMnz4cNzd3e1cXeXRa27Qb3a95pZ9m2xvPWxvveb28/Nj4MCB9i6jyuk1N+g3u+TWF8mtL5JbXyS3uBgZaSsuKz8/v8wBfpMmTRz+AF8I4fhk36Yvet3ees0thBBCCCFETSdNWyHO+P333+nTpw/Ozs78/vvvl7zvHXfcUUVVVT695gb9Ztdrbr2S7S306uTJk+zbtw+ARo0aERwcbOeKqoZec4N+s0tuya0Hklty64Hk1lfuKyFNW3EBvR7gGwwG0tLSCAkJwWC4+DX6NE3DbDZXYWWVS6+5Qb/Z9Zpb9m2yvS/Gkba3XnOfKy8vj8cff5yZM2disVgAMBqN3H///Xz88cd4eHjYucLKodfcoN/skltyS27J7Wgkt+TWQ+6rooQ4j6Zp6vjx47afL/ZlMBjsXKkQQlw52bfpi163t15zn2v06NEqKipKzZ8/X2VlZamsrCw1b9481aBBAzVmzBh7l1dp9JpbKf1ml9ySW3JLbkcjuSW3HnJfDWnaClGOGTNmqMLCwguWm0wmNWPGDDtUVDX0mlsp/WbXa269ku0t9CAwMFDFx8dfsHzZsmUqKCio6guqInrNrZR+s0vusiS3Y5LcZUluxyS5y3L03Ffj4udJCgHMnDkTk8l0wfKioiJmzpxph4qqxqhRo8jKyrpgeU5ODqNGjbJDRVVDr7lBv9n1mlv2bWXJ9nZMes2dn59PrVq1LlgeEhJCfn6+HSqqGnrNDfrNLrnLktyOSXKXJbkdk+Quy9FzXw1p2opL0usBvlIKTdMuWH706FF8fX3tUFHV0Gtu0G92veaWfVtZsr0dk15zx8XF8dJLL1FYWGhbVlBQwCuvvEJcXJwdK6tces0N+s0uuSW35JbcjkZyS2495L4aTvYuQFRvejvAb9WqFZqmoWka3bp1w8np7H8Rs9lMUlISvXv3tmOFlUOvuUG/2fWau5Ts22R7g+Nu71J6zf3RRx/Rq1cv6tatS4sWLQDYsWMHbm5uLFq0yM7VVR695gb9ZpfckltyS25HI7kltx5yXw1p2opy6fUAf8CAAQBs376dXr164eXlZVvn4uJCREQEgwYNslN1lUevuUG/2fWaW/Ztsr1LOfL21mvuUs2aNSMhIYHvvvuOvXv3AjB06FCGDx+Ou7u7naurPHrNDfrNLrklt+SW3I5GcktuPeS+GppSStm7CFH9vPLKK7bvEyZMuOgBvouLi71KrFQzZsxgyJAhuLq62ruUKqXX3KDf7HrLLfs22d6lHHl76zW3EEIIIYQQjkSatuKS9HaAX+rIkSNomkbdunUB2LhxI7NmzaJp06aMHj3aztVVHr3mBv1m12tu2bfJ9tYDPeX+/fff6dOnD87Ozvz++++XvO8dd9xRRVVVPr3mBv1ml9yS+2Ikd80nuSX3xUhu/ZKmrbgkvR7g33LLLYwePZr77ruPtLQ0YmJibEP3H3/8cV588UV7l1gp9Job9Jtdr7ll3ybbWw/bW0+5DQYDaWlphISEYDBc/Dq7mqZhNpursLLKpdfcoN/skltyl0dyOwbJLbnLI7n17eL/QkIAw4YNIz4+HoC0tDS6d+/Oxo0bef7553n11VftXF3l2bVrF+3atQPgp59+onnz5qxdu5bvvvuO6dOn27e4SqTX3KDf7HrNLfs22d562N56ym2xWAgJCbH9fLEvR3vzr9fcoN/skltyS27JLbkdg+TWV+5rJU1bcUl6PcAvLi62nU66ZMkS27D8xo0bk5qaas/SKpVec4N+s+s1t+zbZHvrYXvrNffMmTMxmUwXLC8qKmLmzJl2qKhq6DU36De75C5LcjsmyV2W5HZMkrssR899VZQQl+Dp6amSkpKUUkr169dPvfXWW0oppQ4dOqTc3NzsWFnlateunfr3v/+tVq5cqdzc3NT27duVUkqtW7dO1alTx87VVR695lZKv9n1mlv2bbK9lXL87a3X3AaDQR0/fvyC5enp6cpgMNihoqqh19xK6Te75C5LcjsmyV2W5HZMkrssR899NWSkrbik2NhYpk6dyqpVq1i8eDG9e/cGICUlhcDAQDtXV3nefvttPvvsM7p06cLQoUNp0aIFYJ00u3TUkiPSa27Qb3a95pZ9m2xvcPztrdfcSik0Tbtg+dGjR/H19bVDRVVDr7lBv9kld1mS2zFJ7rIkt2OS3GU5eu6r4WTvAkT19vbbbzNw4EDeffddRowYoZsD/C5dupCenk52djb+/v625aNHj8bDw8OOlVUuveYG/WbXa27Zt8n2Bsff3nrL3apVKzRNQ9M0unXrhpPT2be5ZrOZpKQkW+Pakeg1N+g3u+SW3KUkt+R2FJJbcpdy5NzXQpq24pL0eoAP1k99tmzZQmJiIsOGDcPb2xsXFxfJ7cD0ml2PuWXfJtsbHH976y33gAEDANi+fTu9evXCy8vLts7FxYWIiAgGDRpkp+oqj15zg36zS27JXUpyS25HIbkldylHzn0tNKWUsncRonorKSlh+fLlZQ7wU1JS8PHxKfOfy5EcOnSI3r17c/jwYUwmE/v37ycqKorx48djMpmYOnWqvUusFHrNDfrNrtfcIPs22d6Ov71Bn7lnzJjBkCFDbBfd0wu95gb9ZpfcklsPJLfk1gPJra/cV0PmtBWXdOjQIZo3b07//v0ZO3YsJ0+eBKynXD799NN2rq7yjB8/nrZt23L69Gnc3d1tywcOHMjSpUvtWFnl0mtu0G92veaWfZtsb3D87a3X3LfeeqstK8DGjRt54okn+Pzzz+1YVeXTa27Qb3bJbSW5JbcjktxWklty615VX/lM1Cz9+/dX9957rzKZTMrLy0slJiYqpZSKj49XDRs2tHN1lScgIEDt3btXKaXK5E5KSlLu7u72LK1S6TW3UvrNrtfcsm+T7a2U429vvea++eab1cyZM5VSSqWmpipvb28VFxengoKC1CuvvGLn6iqPXnMrpd/skltyS27J7Wgkt+TWQ+6rISNtxSWtWrWK//u//8PFxaXM8oiICI4dO2anqiqfxWLBbDZfsPzo0aN4e3vboaKqodfcoN/ses0t+7ayZHs7Jr3m3rVrl+1Caz/99BPNmzdn7dq1fPfdd0yfPt2+xVUiveYG/WaX3JJbck+3b3GVSHJLbsk93b7FVRPStBWXpNcD/J49e/Lhhx/abmuaRm5uLi+99BJ9+/a1X2GVTK+5Qb/Z9Zpb9m1Wsr0de3vrNXdxcbFtbrQlS5Zwxx13ANC4cWNSU1PtWVql0mtu0G92yS25QXI7KsktuUFyC2naisvQ6wH++++/z5o1a2jatCmFhYUMGzbMNjLp7bfftnd5lUavuUG/2fWaW/Ztsr31sL31mjs2NpapU6eyatUqFi9eTO/evQFISUkhMDDQztVVHr3mBv1ml9ySGyS3o5Lckhskt0DmtBWXduTIEdW0aVPVpEkT5eTkpG666SYVGBioGjVqpI4fP27v8ipVcXGx+uabb9QzzzyjHn30UfXFF1+o/Px8e5dV6fSaWyn9Ztdjbtm3yfbWw/bWa+74+Hjl5+enDAaDGjVqlG35c889pwYOHGjHyiqXXnMrpd/skltyKyW5HZXkltxKSW6hlKaUUvZuHIvqraSkhB9++IGdO3eSm5tL69atGT58eJkrjwshRE0j+zZ90ev21mtus9lMdnY2/v7+tmXJycl4eHgQEhJix8oql15zg36zS27JLbklt6OR3JJbD7mvlJO9CxDVn5OTE/fee6+9y6hSM2fOvOT6+++/v4oqqVp6zQ36za7X3CD7tvLI9nY8es2tlGLLli0kJiYybNgwvL29cXFxwcPDw96lVSq95gb9ZpfckltyOy7JLbklt5CRtuKS9HqAf+6nPGCdIDs/P9+288jIyLBTZZVLr7lBv9n1mlv2bVayva0cdXvrNfehQ4fo3bs3hw8fxmQysX//fqKiohg/fjwmk4mpU6fau8RKodfcoN/skltyS27J7Wgkt+TWQ+6rYocpGUQN4ufnV+bL09NTaZqmXF1dlb+/v73Lq1L79+9X3bp1UwsXLrR3KVVKr7mV0m92PeSWfdtZsr0dd3vrNXf//v3Vvffeq0wmk/Ly8lKJiYlKKeu8aQ0bNrRzdZVHr7mV0m92yS25lZLcjkpyS26lJLdQSpq24qrp4QD/YjZt2qQaNWpk7zKqnF5zK6Xf7HrMLfs22d56oIfcAQEBau/evUopVeYAICkpSbm7u9uztEql19xK6Te75JbcSkluRyW5JbdSklsoZbD3SF9R80RHR/PWW28xfvx4e5dS5ZycnEhJSbF3GVVOr7lBv9n1mFv2bbK99UAPuS0WC2az+YLlR48exdvb2w4VVQ295gb9ZpfcZUluxyS5y5Lcjklyl+Xoua+GXIhMXBNHP8D//fffy9xWSpGamsrkyZPp2LGjnaqqfHrNDfrNrtfcFyP7Nn1x9O19MY6eu2fPnnz44Yd8/vnnAGiaRm5uLi+99BJ9+/a1c3WVR6+5Qb/ZJbfkltyS29FIbsmth9xXQy5EJi7pUgf49erVY8GCBXaqrHIZDGUHoWuaRnBwMLfeeivvv/8+oaGhdqqscuk1N+g3u15zy77NSra3Y29vveY+evQovXr1QilFQkICbdu2JSEhgaCgIFauXElISIi9S6wUes0N+s0uuSW35JbcjkZyS2495L4a0rQVl6TXA3whhGOTfZu+6HV76zU3QElJCT/88AM7d+4kNzeX1q1bM3z4cNzd3e1dWqXSa27Qb3bJLbklt+OS3JJbcgtp2gpxCenp6bi4uODj42PvUqqUXnODfrPrNbdeyfYWQgghhBBCiOpN5rQVV0RPB/iZmZk8//zz/Pjjj5w+fRqA4OBgRo0axQsvvICHh4edK6wces0N+s2u19znkn2bbG890FvumTNnXnL9/fffX0WVVC295gb9Zpfc5ZPcjkVyl09yOxbJXT5HzX01ZKStuCg9HuBnZGQQFxfHsWPHGD58OE2aNAHgn3/+YdasWTRu3JjVq1ezc+dO1q9fz7hx4+xcccXQa27Qb3a95gbZt8n2dvztDfrNDeDv71/mdnFxMfn5+bi4uODh4UFGRoadKqtces0N+s0uua0kt+R2RJLbSnJLbt1TQpTj1KlTKiYmRnl6eqrRo0eriRMnqokTJ6qHH35YeXp6qjZt2qiCggK1YcMG9dFHH9m73Aozfvx41axZM5WWlnbButTUVNW8eXM1ePBg5ePjo6ZPn26HCiuHXnMrpd/ses0t+zbZ3nrY3nrNfSn79+9X3bp1UwsXLrR3KVVKr7mV0m92yS259UByS249kNz6yn0x0rQV5dLrAX54ePgldw4LFixQmqapl19+uQqrqnx6za2UfrPrNbfs28on29uxtrdec1/Opk2bVKNGjexdRpXTa26l9JtdcuuL5NYXya0vkltI01aUS68H+C4uLurIkSMXXX/kyBFlNBqrsKKqodfcSuk3u15zy76tfLK9HWt76zX35Wzbtk15e3vbu4wqp9fcSuk3u+TWF8mtL5JbXyS3kAuRiXKlpqYSGxt70fXNmjXDYDDw0ksvVWFVlS8oKIjk5GTq1q1b7vqkpCRCQkKquKrKp9fcoN/ses0t+zbZ3udy1O2t19ylfv/99zK3lVKkpqYyefJkOnbsaKeqKp9ec4N+s0tuK8ktuR2R5LaS3JJb9+zULBbVXFhYmFq1atVF169cuVKFhoZWYUVVY9SoUapTp07KZDJdsK6wsFB17txZjRo1yg6VVS695lZKv9n1mlv2bbK9z+Wo21uvuUtpmlbmy2AwqFq1aqmhQ4eqlJQUe5dXafSaWyn9ZpfckltyS25HI7kltx5yXw1NKaXs3TgW1c8DDzxAYmIiixcvxsXFpcw6k8lEr169iIqK4uuvv7ZThZXj6NGjtG3bFldXV8aOHUvjxo1RSrFnzx6mTJmCyWRi06ZN1K9f396lVii95gb9Ztdrbtm3yfYu5cjbW6+5hRBCCCGEcCTStBXl0usBPlhPE37sscf466+/KP3voWkaPXr0YPLkyTRs2NDOFVYOveYG/WbXY27Zt8n21sP21mvu86Wnp+Pi4oKPj4+9S6lSes0N+s0uuSW3Hkhuya0Hkltfua9IVQ/tFTXHwYMHVe/evZXBYCgzXL1Xr14qISHB3uVVuoyMDLVhwwa1YcMGderUKXuXU2X0mlsp/WbXW27Zt8n21sP21mvu06dPq8cee0wFBgYqg8FgO83uP//5j8rLy7N3eZVGr7mV0m92yS25JbfkdjSSW3LrIffVkpG24rJOnz5NQkICAA0bNiQgIMDOFQkhxPWTfZu+6HV76yl3RkYGcXFxHDt2jOHDh9OkSRMA/vnnH2bNmkXjxo1ZvXo1O3fuZP369YwbN87OFVcMveYG/WaX3JIbJLfkltyOQHLrK/c1sXfXWAghhBBCiOs1fvx41axZM5WWlnbButTUVNW8eXM1ePBg5ePjo6ZPn26HCiuHXnMrpd/skltyl5LckttRSG7JXcqRc18LadoKIYQQQogaLzw8XC1cuPCi6xcsWKA0TVMvv/xyFVZV+fSaWyn9Zpfc5ZPcktsRSO7ySW7JrVcyPYIQQgghhKjxXF1dSUxMpG7duuWuP3r0KBEREZSUlFRxZZVLr7lBv9klt+Q+l+SW3I5Ackvuczlq7mthsHcBQgghhBBCXK+goCCSk5Mvuj4pKYmQkJCqK6iK6DU36De75C6f5HYskrt8ktuxSO7yOWruayFNWyGEEEIIUeP16tWL559/nqKiogvWmUwmXnjhBXr37m2HyiqXXnODfrNLbsldSnJLbkchuSV3KUfOfS1kegQhhBBCCFHjHT16lLZt2+Lq6srYsWNp3LgxSin27NnDlClTMJlMbNq0ifr169u71Aql19yg3+ySW3JLbsktuR2D5NZX7mtih3l0hRBCCCGEqHAHDx5UvXv3VgaDQWmapjRNUwaDQfXq1UslJCTYu7xKo9fcSuk3u+SW3JJbcjsayS259ZD7aslIWyGEEEII4VBOnz5NQkICAA0bNiQgIMDOFVUNveYG/WaX3JJbDyS35NYDya2v3FdKmrZCCCGEEEIIIYQQQghRjciFyIQQQgghhBBCCCGEEKIakaatEEIIIYQQQgghhBBCVCPStBVCCCGEEEIIIYQQQohqRJq2QgghhBBCCCGEEEIIUY1I01YIIYQQQgg70DSNX3/91d5lCCGEEEKIakiatkIIIYQQwmEdOXKEBx54gLCwMFxcXAgPD2f8+PGcOnWqymp4+eWXadmy5QXLU1NT6dOnT5XVIYQQQgghag5p2gohhBBCCId08OBB2rZtS0JCAt9//z0HDhxg6tSpLF26lLi4ODIyMuxaX+3atXF1dbVrDUIIIYQQonqSpq0QQgghhHBIY8eOxcXFhb/++ovOnTtTv359+vTpw5IlSzh27BjPP/88UP40BX5+fkyfPt12+8iRI9x99934+fkREBBA//79SU5Otq1fvnw57dq1w9PTEz8/Pzp27MihQ4eYPn06r7zyCjt27EDTNDRNsz3v+b/377//5tZbb8Xd3Z3AwEBGjx5Nbm6ubf3IkSMZMGAA7733HqGhoQQGBjJ27FiKi4tt95kyZQrR0dG4ublRq1YtBg8eXGH/nkIIIYQQoupI01YIIYQQQjicjIwMFi1axGOPPYa7u3uZdbVr12b48OH8+OOPKKUu+1zFxcX06tULb29vVq1axZo1a/Dy8qJ3794UFRVRUlLCgAED6Ny5Mzt37mTdunWMHj0aTdO45557mDBhArGxsaSmppKamso999xzwe/Iy8ujV69e+Pv7s2nTJmbPns2SJUv417/+VeZ+8fHxJCYmEh8fz4wZM5g+fbqtCbx582bGjRvHq6++yr59+1i4cCGdOnW69n9EIYQQQghhN072LkAIIYQQQoiKlpCQgFKKJk2alLu+SZMmnD59mpMnT172uX788UcsFgtffvklmqYBMG3aNPz8/Fi+fDlt27YlKyuL22+/nQYNGtiev5SXlxdOTk7Url37or9j1qxZFBYWMnPmTDw9PQGYPHky/fr14+2336ZWrVoA+Pv7M3nyZIxGI40bN+a2225j6dKlPPzwwxw+fBhPT09uv/12vL29CQ8Pp1WrVlf2DyaEEEIIIaoVGWkrhBBCCCEc1uVG0rq4uFz2OXbs2MGBAwfw9vbGy8sLLy8vAgICKCwsJDExkYCAAEaOHEmvXr3o168fH330EampqVdV5549e2jRooWtYQvQsWNHLBYL+/btsy2LjY3FaDTaboeGhnLixAkAevToQXh4OFFRUdx3331899135OfnX1UdQgghhBCiepCmrRBCCCGEcDgNGzZE0zT27NlT7vo9e/YQHByMn58fmqZd0Nw9d57Y3Nxc2rRpw/bt28t87d+/n2HDhgHWkbfr1q2jQ4cO/Pjjj8TExLB+/foKz+Xs7FzmtqZpWCwWALy9vdm6dSvff/89oaGhvPjii7Ro0YLMzMwKr0MIIYQQQlQuadoKIYQQQgiHExgYSI8ePZgyZQoFBQVl1qWlpfHdd98xcuRIAIKDg8uMjE1ISCgzQrV169YkJCQQEhJCw4YNy3z5+vra7teqVSuee+451q5dS7NmzZg1axZgHc1rNpsvWW+TJk3YsWMHeXl5tmVr1qzBYDDQqFGjK87t5ORE9+7deeedd9i5cyfJycksW7bsih8vhBBCCCGqB2naCiGEEEIIhzR58mRMJhO9evVi5cqVHDlyhIULF9KjRw9iYmJ48cUXAbj11luZPHky27ZtY/PmzYwZM6bMiNbhw4cTFBRE//79WbVqFUlJSSxfvpxx48Zx9OhRkpKSeO6551i3bh2HDh3ir7/+IiEhwTavbUREBElJSWzfvp309HRMJtMFtQ4fPhw3NzdGjBjBrl27iI+P5/HHH+e+++6zzWd7OX/++SeTJk1i+/btHDp0iJkzZ2KxWK6q6SuEEEIIIaoHadoKIYQQQgiHFB0dzaZNm4iKiuLuu+8mPDycPn36EBMTw5o1a/Dy8gLg/fffp169etxyyy0MGzaMp59+Gg8PD9vzeHh4sHLlSurXr8+dd95JkyZNePDBByksLMTHxwcPDw/27t3LoEGDiImJYfTo0YwdO5ZHHnkEgEGDBtG7d2+6du1KcHAw33///QW1enh4sGjRIjIyMrjxxhsZPHgw3bp1Y/LkyVec18/Pjzlz5nDrrbfSpEkTpk6dyvfff09sbOx1/ksKIYQQQoiqpqnLXZ1BCCGEEEIIB/HSSy/xwQcfsHjxYm666SZ7lyOEEEIIIUS5pGkrhBBCCCF0Zdq0aWRlZTFu3DgMBjnxTAghhBBCVD/StBVCCCGEEEIIIYQQQohqRIYWCCGEEEIIIYQQQgghRDUiTVshhBBCCCGEEEIIIYSoRqRpK4QQQgghhBBCCCGEENWING2FEEIIIYQQQgghhBCiGpGmrRBCCCGEEEIIIYQQQlQj0rQVQgghhBBCCCGEEEKIakSatkIIIYQQQgghhBBCCFGNSNNWCCGEEEIIIYQQQgghqhFp2gohhBBCCCGEEEIIIUQ1Ik1bIYQQQgghhBBCCCGEqEakaSuEEEIIIYQQQgghhBDViDRthRBCCCGEEEIIIYQQohqRpq0QQgghhBBCCCGEEEJUI9K0FUIIIYQQQgghhBBCiGpEmrZCCCGEEEIIIYQQQghRjUjTVgghhBDCwSUnJ6NpGtOnT7d3KdXe8uXL0TSN5cuX27uUi9I0jZdffvmqH1fT/w5qwrYRQgghhKgo0rQVQgghhKjhpk+fjqZp5X795z//sXd5/O9//+PXX3+1dxkV6tx/89WrV1+wXilFvXr10DSN22+/3Q4VXrvS5qimaXz77bfl3qdjx45omkazZs2u6XfMmjWLDz/88DqqFEIIIYRwbE72LkAIIYQQQlSMV199lcjIyDLLmjVrRnh4OAUFBTg7O9ulrv/9738MHjyYAQMG2OX3VyY3NzdmzZrFzTffXGb5ihUrOHr0KK6urnaq7PqVZrv33nvLLE9OTmbt2rW4ubld83PPmjWLXbt28cQTT1zxYzp16kRBQQEuLi7X/HuFEEIIIWoKadoKIYQQQjiIPn360LZt23LXXU+DTVxc3759mT17NpMmTcLJ6exb61mzZtGmTRvS09PtWN316du3L7///jvp6ekEBQXZls+aNYtatWoRHR3N6dOnK72OwsJCXFxcMBgM8ncshBBCCN2Q6RGEEEIIIRxceXOZjhw5Ei8vL44dO8aAAQPw8vIiODiYp59+GrPZXObxFouFDz/8kNjYWNzc3KhVqxaPPPLIFTXsNE0jLy+PGTNm2E65HzlypK2GiIiICx7z8ssvo2naBc/zr3/9i19//ZVmzZrh6upKbGwsCxcuvODxx44d44EHHqBWrVq2+3399dcX3O/o0aMMGDAAT09PQkJCePLJJzGZTJfNdK6hQ4dy6tQpFi9ebFtWVFTEzz//zLBhw8p9TF5eHhMmTKBevXq4urrSqFEj3nvvPZRSZe5nMpl48sknCQ4OxtvbmzvuuIOjR4+W+5xXmvlq9O/fH1dXV2bPnl1m+axZs7j77rsxGo3lPu7bb7+lTZs2uLu7ExAQwJAhQzhy5IhtfZcuXZg3bx6HDh2y/U2U/h2UTs3www8/8H//93/UqVMHDw8PsrOzLzqn7YYNG+jbty/+/v54enpyww038NFHH9nWp6WlMWrUKOrWrYurqyuhoaH079+f5OTk6/r3EUIIIYSoTDLSVgghhBDCQWRlZV0wsvPcEZLnM5vN9OrVi/bt2/Pee++xZMkS3n//fRo0aMCjjz5qu98jjzzC9OnTGTVqFOPGjSMpKYnJkyezbds21qxZc8lpF7755hseeugh2rVrx+jRowFo0KDBNeVbvXo1c+bM4bHHHsPb25tJkyYxaNAgDh8+TGBgIADHjx/npptusjV5g4ODWbBgAQ8++CDZ2dm20/ELCgro1q0bhw8fZty4cYSFhfHNN9+wbNmyq6opIiKCuLg4vv/+e/r06QPAggULyMrKYsiQIUyaNKnM/ZVS3HHHHcTHx/Pggw/SsmVLFi1axDPPPMOxY8eYOHGi7b4PPfQQ3377LcOGDaNDhw4sW7aM22677YIarjTz1fLw8KB///58//33tr+HHTt2sHv3br788kt27tx5wWPeeOMNXnjhBe6++24eeughTp48yccff0ynTp3Ytm0bfn5+PP/882RlZXH06FFbXi8vrzLP89prr+Hi4sLTTz+NyWS66JQIixcv5vbbbyc0NJTx48dTu3Zt9uzZw59//sn48eMBGDRoELt37+bxxx8nIiKCEydOsHjxYg4fPlzuhwZCCCGEENWCEkIIIYQQNdq0adMUUO6XUkolJSUpQE2bNs32mBEjRihAvfrqq2Weq1WrVqpNmza226tWrVKA+u6778rcb+HCheUuL4+np6caMWLEBctHjBihwsPDL1j+0ksvqfPfpgLKxcVFHThwwLZsx44dClAff/yxbdmDDz6oQkNDVXp6epnHDxkyRPn6+qr8/HyllFIffvihAtRPP/1ku09eXp5q2LChAlR8fPwlM5X+m2/atElNnjxZeXt72577rrvuUl27dlVKKRUeHq5uu+022+N+/fVXBajXX3+9zPMNHjxYaZpmy7d9+3YFqMcee6zM/YYNG6YA9dJLL1115vL+DsoTHx+vADV79mz1559/Kk3T1OHDh5VSSj3zzDMqKipKKaVU586dVWxsrO1xycnJymg0qjfeeKPM8/3999/KycmpzPLbbrut3G1f+rujoqJsdZ+/rnTblJSUqMjISBUeHq5Onz5d5r4Wi0UppdTp06cVoN59991LZhZCCCGEqG5kegQhhBBCCAfxySefsHjx4jJflzNmzJgyt2+55RYOHjxouz179mx8fX3p0aMH6enptq82bdrg5eVFfHx8hee4mO7du5cZpXvDDTfg4+Njq1cpxS+//EK/fv1QSpWpt1evXmRlZbF161YA5s+fT2hoKIMHD7Y9n4eHh2008NW4++67KSgo4M8//yQnJ4c///zzolMjzJ8/H6PRyLhx48osnzBhAkopFixYYLsfcMH9zh81ezWZr0XPnj0JCAjghx9+QCnFDz/8wNChQ8u975w5c7BYLNx9991l6qhduzbR0dFX9bcyYsQI3N3dL3mfbdu2kZSUxBNPPIGfn1+ZdaXTa7i7u+Pi4sLy5curZP5dIYQQQoiKItMjCCGEEEI4iHbt2l30QmTlcXNzIzg4uMwyf3//Ms2thIQEsrKyCAkJKfc5Tpw4AVinZigoKLAtd3FxISAg4GrKv6z69etfsOzcek+ePElmZiaff/45n3/++SXrPXToEA0bNrxg7txGjRpddV3BwcF0796dWbNmkZ+fj9lsLtMMPtehQ4cICwvD29u7zPImTZrY1pd+NxgMF0wlcX59V5P5Wjg7O3PXXXcxa9Ys2rVrx5EjRy7akE5ISEApRXR09EWf60pFRkZe9j6JiYkANGvW7KL3cXV15e2332bChAnUqlWLm266idtvv53777+f2rVrX3E9QgghhBBVTZq2QgghhBA6dbELSZ3LYrEQEhLCd999V+760qbv+PHjmTFjhm15586dL7hg1PnOb5iWOv9CaJerV525gJfFYgHg3nvvZcSIEeXe94YbbrhkTddq2LBhPPzww6SlpdGnT58LRn5WlqrIPGzYMKZOncrLL79MixYtaNq06UVr0TSNBQsWlLutzp+39lIuN8r2ajzxxBP069ePX3/9lUWLFvHCCy/w5ptvsmzZMlq1alVhv0cIIYQQoiJJ01YIIYQQQlxUgwYNWLJkCR07drxkI+3ZZ5/l3nvvtd329/e3/Xyx5qy/vz+ZmZkXLC8dbXq1goOD8fb2xmw2071790veNzw8nF27dqGUKlPfvn37rul3Dxw4kEceeYT169fz448/XvL3LlmyhJycnDKjbffu3WtbX/rdYrGQmJhYZnTt+fVdTeZrdfPNN1O/fn2WL1/O22+/fdH7NWjQAKUUkZGRxMTEXPI5L/Y3cTVKRyHv2rXrstkbNGjAhAkTmDBhAgkJCbRs2ZL333+fb7/99rrrEEIIIYSoDDKnrRBCCCGEuKi7774bs9nMa6+9dsG6kpISW9O1adOmdO/e3fbVpk0b2/08PT3Lbc42aNCArKwsdu7caVuWmprK3Llzr6lWo9HIoEGD+OWXX9i1a9cF60+ePGn7uW/fvqSkpPDzzz/bluXn5190ioHL8fLy4tNPP+Xll1+mX79+F71f3759MZvNTJ48uczyiRMnomkaffr0AbB9nzRpUpn7ffjhh2VuX03ma6VpGpMmTeKll17ivvvuu+j97rzzToxGI6+88opt9HMppRSnTp2y3fb09CQrK+u66mrdujWRkZF8+OGHF/x9lf7+/Px8CgsLy6xr0KAB3t7emEym6/r9QgghhBCVSUbaCiGEEEKIi+rcuTOPPPIIb775Jtu3b6dnz544OzuTkJDA7Nmz+eijjy46f2upNm3asGTJEj744APCwsKIjIykffv2DBkyhH//+98MHDiQcePGkZ+fz6effkpMTMw1XzzrrbfeIj4+nvbt2/Pwww/TtGlTMjIy2Lp1K0uWLCEjIwOAhx9+mMmTJ3P//fezZcsWQkND+eabb/Dw8Lim3wtcdHqCc/Xr14+uXbvy/PPPk5ycTIsWLfjrr7/47bffeOKJJ2yjR1u2bMnQoUOZMmUKWVlZdOjQgaVLl3LgwIFrznw9+vfvT//+/S95nwYNGvD666/z3HPPkZyczIABA/D29iYpKYm5c+cyevRonn76acD6N/Hjjz/y1FNPceONN+Ll5XXJZnd5DAYDn376Kf369aNly5aMGjWK0NBQ9u7dy+7du1m0aBH79++nW7du3H333TRt2hQnJyfmzp3L8ePHGTJkyDX/ewghhBBCVDZp2gohhBBCiEuaOnUqbdq04bPPPuO///0vTk5OREREcO+999KxY8fLPv6DDz5g9OjR/N///R8FBQWMGDGC9u3bExgYyNy5c3nqqad49tlniYyM5M033yQhIeGam7a1atVi48aNvPrqq8yZM4cpU6YQGBhIbGxsmVP7PTw8WLp0KY8//jgff/wxHh4eDB8+nD59+tC7d+9r+t1XwmAw8Pvvv/Piiy/y448/Mm3aNCIiInj33XeZMGFCmft+/fXXBAcH89133/Hrr79y6623Mm/ePOrVq3dNmavCf/7zH2JiYpg4cSKvvPIKAPXq1aNnz57ccccdtvs99thjbN++nWnTpjFx4kTCw8OvumkL0KtXL+Lj43nllVd4//33sVgsNGjQgIcfftj2u4cOHcrSpUv55ptvcHJyonHjxvz0008MGjSoYkILIYQQQlQCTZ1/7pIQQgghhBBCCCGEEEIIu5E5bYUQQgghhBBCCCGEEKIakaatEEIIIYQQQgghhBBCVCPStBVCCCGEEEIIIYQQQohqRJq2QgghhBBCCCGEEEIIUY1I01YIIYQQQgghhBBCCCGqEWnaCiGEEEIIIYQQQgghRDXiZO8CHIHFYiElJQVvb280TbN3OUIIIYQQQgghhBBCiGpIKUVOTg5hYWEYDBcfTytN2wqQkpJCvXr17F2GEEIIIYQQQgghhBCiBjhy5Ah169a96Hpp2lYAb29vwPqP7ePjY+dqhBBCCCGEEEIIIYQQ1VF2djb16tWz9RMvRpq2FaB0SgQfHx9p2gohhBBCCCGEEEIIIS7pclOsyoXIhBBCCCGEEEIIIYQQohqRpq0QQgghhBBCCCGEEEJUI9K0FUIIIYQQQgghhBBCiGpEmrZCCCGEEEIIIYQQQghRjUjTVgghhBBCCCGEEEIIIaoRadoKIYQQQgghhBBCCCFENSJNWyGEEEIIIYQQQgghhKhGpGkrhBBCCCGEEEIIIYQQ1Yg0bYUQQgghhBBCCCGEEKIakaatEEIIIYQQQgghhBBCVCPStBVCCCGEEEIIIYQQQohqRJq2QgghhBBCCCGEEEIIUY1I01YIIYQQQgghhBBCCCGqEYdq2q5cuZJ+/foRFhaGpmn8+uuvl33M8uXLad26Na6urjRs2JDp06dXep1CCCGEEEIIIYQQQghxMQ7VtM3Ly6NFixZ88sknV3T/pKQkbrvtNrp27cr27dt54okneOihh1i0aFElV1qz3PfLa4yc+0a560bOfYP7fnmtiiuqGtseuIOtDw0od93Whwaw7YE7qrYgUanmPz+C+S+OKn/di6OY//yIKq6oaswf24X5j3crf93j3Zg/tkvVFlRFnps2gOen3Vnuuuen3clz0wZUbUFVZMrcoUz97d5y10397V6mzB1axRWJyjZl+xSm7pha7rqpO6YyZfuUKq5IVCa97tP1mlv26UIP5O9ch+LfhBXvlL9uxTvW9cJhyP/xS3Oopm2fPn14/fXXGThw4BXdf+rUqURGRvL+++/TpEkT/vWvfzF48GAmTpxYyZXWLEaDgS3ZP1zQuB059w22ZP+A0eBQf0Y2ymDAffW+Cxq3Wx8agPvqfSgHza1bRgORP62/oHE7/8VRRP60HowOur0NRiIXp1xwsDv/8W5ELk4Bg9FOhVUuAwZ+NyRc0Lh9ftqd/G5IwOBYL482xzJNfJK544I3RlN/u5dPMndwLNNkp8pEZTFoBj7Z/skFjdupO6byyfZPMGiO+beuWzrdp+s1t8FgvOQ+3eCguYW+yN+5DhmMEP/GhY3bFe9Yl8s2dyjyf/zSNKWUsncRlUHTNObOncuAAQMuep9OnTrRunVrPvzwQ9uyadOm8cQTT5CVlXXFvys7OxtfX1+ysrLw8fG5jqqrr9IGbXDxHdwePoJ5h2dywuk3apv7c1fDURg0DaPB+nXuz8YzPxtsP1ubwEYDZ++nWdc7lbnfec9jux9lntdYzmNKn6cilDZotQbeRL74Gru/+BT31fsouLkRrb/8tUJ+h6g+Shu0iXH1qX/7YI7uWEvkT+vZN6g1TgZnDGYzh/u0oMTLDQCf/WkE7ThEbr1ATtzU0PY84b9vxWgqpuPjr+MbGg7A2mXfkLnkL0KatKbtfU8CcLrwNCvfewbnfBPHbo3FFOgFgNehdEI2JlIQ4kNq5ya25627aCcu2QW0Hvk0tWNaALB5w6+c+O0X/OtFE/foiwAUlhSyYOITuJ7OI61jDPlh/gC4H88idOVeinw9ONqzue15673yFTF7CznYzpfbZq63HeSu6uRGnX5tuc0zwnbfKVl/Y7E9sD0Yna0/n0qEnNRy/117uNejUef/grs/BzMPMn/LJ9TOO81grwa2+0zL3kOeKrHeCGsNLh7WnzMPW7/KcbNbKC07PgM+YaTmpvLL5o/wy0rhXu9Gtvv8kJNAuqXQeqN2M3Dzs/6cncra/Qv426WYWJMTkc71OFh0mH/czHTOc6ZXo4mY/BpQTA7bj35FYN4hhrrG2PY3S4oPk6ryMWhgDmyEwSvYuk8qOIUhfS9w4T6oqYs/t7YeA6EtKCwp5MsN78CJf3jUJxbjmcbZovzDJBSfef0JaAA+odafCzMhbVe5/w6RTt7cdsMDEB4HwJQNb2NJ2cZI78Z4GazbZ2VBCjuLTnEss4C/C49xyMNEW9cQujS/n+yEhXyetYtW6XVo2/JjxnWLLvf3iJqrtEHbuW5n/tv+v/ye+DufbP+EsS3HMqbFGHuXJypY6T58cyc3DNFhBP2VTP0jFlbd4kxR36YAdFpnILTuTQQ++CDpWh4/LRpHaFIhnY644FY/AO8W9Zide4Dj5gIiVmZhcA86+/p3aB0+ydkE7c8ht7YbJ5r72X73TRsV4aEd8R8+nAJvF2YsfAy/Q9n0OuiKS20ffG+M5M+8ZJJLcqi3Lgtnp6Czr39HN+F1JJOQXVnkB7iQ1ibA9rytt5QQHdIJ34ED0erU5rP5j+BxLIM+e51w8/fC/+Zovps1m9arTSQ0NGLp2IGsfVtptz6PVZ1cKerTuNx/q/quAdzR9+xo888XPkZRQQb3ecXga3QFYG1hKltN6aAZILzD2Qcf3w0Fp8t93mCjG/f0mwaa9bVg5tKnyc4+ymDPKGo7eQKw3ZTO6sIzr5v14842IdL3Q+6Jcp/XS3NmZJ9PwdX6fmH2ypeZf/BPNmsmxlp8GdNmPFOTfuOTzB2M9WvBmP7flvs8QtQ0pc2bB31iqdWkPyf2/MaX2bvl79yRlTZouz4PnZ+98LZwKKX/x0v/T59/2xFdaR/RqQprqnbS0tKoVatWmWW1atUiOzubgoIC3N3dy32cyWTCZDo7Gik7O7tS66wOpg98np4zTpPq/DtfH/sTzcmC6WQPEtLj+N/+vfYur1xlGr3nNYAN5zZ5jWcbx7bvpc3l1s/wbPrT+O7N5OCI8bijScPWgfV9dRoLCobT4I+tWNZ/QKSCpLtvIvxf46DbMNyK4b1a2zjpZz0I67PJwqglFtY00fjMY6nteT7/rQS/fMgbkmpr2h7avJyWczZzok06nGnaZpmy8PtjDSFZ8IXXNg7UsT5vx90Wxv9uYWeExmf+K23P+96fJdRPh9Nd99uatsm71hE7ZzPpDZLgTNO2yFKEcf4KIlPhR8NWtkZbG4LNkyy8MMdCcgh8Vnut7XlfzDcDELUxi52xTYg0w4Zb/fi4fS5djyzjthPptvt+EVGPkjMHoezefUX/rpEHVtCo/b/A3Z+k7CQ+O/oXLQtNDE79xXafb+qFcdLpzEtS9pU9r2/SalrecC/4hJGWn8Znh+YRXlzMvdt+s93n57Da7HN1Kf95zyze7VrCbpLA2osnpjidOUtXEW/JRXM5iVeDRXibLTy7/y/bQ1fUDmZ96WtE/pXtAwdl57LjnxBWeeZidCpkr+tsAB7ZsYDSz4+XBgeywMvzqv4duubl08rQgCyXWFycDHy+bxZmZeHuXX/hZbZu23UBfnzr62M9v+ZMP3yz6QSbN78HIA1bBzemxRg2pW1ixdEVrDi6AkAato5GKWsD0SOAvh8vZe5jN9N22SnMqw5iVLA/DD6+Wdn2K91+LyGdbfgPH0466XyWvZv79pTQdBX4ReXhnZXFr6G12OnmyswlJbgVJ5d9/TtgYdQyC6ubanwWfsxWxs2LzKTn78SnTx/y3Lz57PR2eh4w02aRwqtOAb4Fp1kUEsRyTw8mryghJCuZz723kRh25vUv2cL4ZWde/6LPfhD4ebyZ9FO78WjXHqewED47tYm2hy3ELbDgHliEv2Uu+1oGkKB5cM8qMxxYBcCPtxj4paMZ/2O7aHJEkRCm2TIAdDSVcO5kV1+nrSZPU/TfvRjfEusHiRv8ffnaz9d6h51/X9HmaGIq4p5zbn9/LJ6jqohb9i6ltqkIgB0+3nwWaP1QlV1Xtr+vVVLCyOJ8W9P21yNL2KmZ6Jubxyde8PnWVyjWNIc+yBX6NKb/t3CmicMG6/8X+Tt3cKWN2fg3YOW7YC6Shq0DG9P/W0w/9beOuJ3eDLO8ltnouml7rd58801eeeUVe5dR5Ua1up3/bVuEpllQFiMDI0ZiDldYLIoSi8KsrD+bLQqLsn43KzBbLNZlFjCrs+tLzOfe78xjlfV+JRYLZgu29aXrzJaz97/cGHGzRWFGgfn6cq9u3JTb9q4BNNCUNGwdXFSvOzH/sRWDgmKjtZF7JOcI27o1wVBioW+zFhR5WUffBKk0EjmMW0QgQxufHTV6/NZtnCos5uaAENuysGbtSOx+iuBmbWzLvF28OdXlBnLyTHS+oSk3BlsPwvxcT5GYn4iq7cPQxmdHB+V0+ZvEzAJa1YmwLasb3YrE7vvwi4ixLXM2OFPYqTWJp/Jo2yqaRnWtB4VePlkkpu+j0M+doY2bQ0EmHN2Iuf4REn0NhO92wtlszd3i5SkM3TSJaK8CiKhne+57cvdgKf3PV6/deSNt08r9N42IiQNXb2u9XnUZGhJHnbwMqBdpu8+AvP3kWkpH2rY6b6TtkXKft1HUjeAZbN0W7kEMDe2Ef1YKhPWz3ad3/kFam8+OtM3Bg71pOWQdP0wdZW0I7PQ9jUXTMCjFLUUhaMYG1ItsRCdjMPklThSaWlC/OIXfXFra9lW++em0KTKhgARLHU4pa74ALYeG2tFy6y0oCGNHlj/bTmWCVkStWs2J0Y7wTUkjDGdG5lpyM2lZYq33kKrNcWXddj5aPo21Q+U+r5OpNs+sgTWrrE2KwFotaG5IYm5xNK4Wazs4Oz+b1hSgAce1EI6qIDSf1agzuaVh6/gyCjNsPzsbnKVh6whKiiB5FeybD3vnQ3AM3G/90Mrn369QvOJfOJvBosHpm0MY6uZle2hhFyOhddqhuboRoAUw1K0+dSKL8FdOuIf7Q5s69MxPItZcwJGbcjG4+p99/Tu8nqC6uSS2y8M9zJWhbt625y3qqFE75CYMvr64O7ky1LMBQfVy8b/ZGddQb2gXzi0FhwktyeVUm1xyDP50ad6UdsFecHQzfrWzSWyXgwpyZqib79mo7Sz4B3TEqVYIBs3AUJ/G+IScxqejE26BntAuinaFKewMzsS82tqoLjaAy0PDGZq6g/p7U2nz20nS67ux6sEw2/PG5rpizs3F6GX9t7kroAWmggw8W94KBusney1MxxladOrMSNu4s//+x/+56EjbWp5uZW7fEXwjp7OPENisExitr2+Ni04x1HTceof6N50z0jbhoiNtvQ3O4HT2uXvWvonY9D0M9q/H4ozVFGsazkrJQa5wSGP6f2tr5mjyd64PJYXWfa+5CIwu0rB1cJ2NPnwJmOW1rAyZHuEapkcob6RtvXr1HHp6BIDhv7zAztxfUcp6tlcbnyFMH/i83epRZRq+ZxrCZlWmMWw+p4lccl7z92wT2dogLttsPtuM9nzrYQI3HQBNgZKRto5u3gsjiZq9gRIDOFmsI237vjrN3mVVrJw0WP4WbJ0JygyagfmH6xO5pohiIzibIalHGH0/Xnr556oBlFKsOXCK6WuTWLr3hO0Dn4hAD27wmki8RxLOSlGsadxhieaNUXOu+neUmC0UmS0UlVi/TCVlbxef+dl0zrKi8+5TZD7zONtt85nHqvOe03zhY89/vNlyyQ+2OgV/zLags6Pjxno1Zsyg2VedW9QcnX/sXKZxKyNta6jCLEhYDHvnwYElYDrnbC83X5iwH5zd+PLR9nSMz3bIffqllE4NcX7u7AULOPX1NDw7dCDkyScAUBYL+2+Kw5KTQ9Tvv+Eabf3gypKfj+bqimasOXPolZ5GWvomXUYnCUc0de4QPsnejVEp6yg8zxjGDP7l8g8UNZPFAm/WheI80IzWY5Yu/4Uu/7Z3ZaIyZKcy/ttbWObpjqYUSgevZTI9whWIi4tj/vz5ZZYtXryYuLi4izzCytXVFVdX18osrdoZOfcNdub+ihEXzFoRTTx7n7k4GXZr3GpnpjaozD/irQ8NwH3TAQ7G1uGWZrsx7TaQfubiZNK4dTzzXxxF1OwNtkZt6Ry38xnlOI3bwiyY3A5MZz6YiunD/NXHiFyTzqEutYlOcqXw8CHbBV1q8kF+flEJc7YeY8baZBJO5NqWd4oJZlSHCJasG8PvhiRbo7b0ImRMu/OqG7dORgNORgMeLhWd4tqUfqhVXiP5t/iHmFZwjJjMYPb7ncTdYuGT3L3w270O/cZIz6bumFqmYTus8TA+2f4JgDRua5qfH4QDi8/e9gyBRn2g8W0Q2Rmc3Zj/eDc6xmdzKBhO/HcEQfMXO8Q+/XJKG7aljdrS26W5ffr0KXN/86lTGL28UEVFuERE2Jaf+vJLMqbPIHDMGIJGP1zFKa5eacP2JtcQ1ptOEGMqsjZwZZ8uHMjU3+7lk+zdPJiZxROns5jq58Mn7Je/c0c2f4K1YQvWEWMKWP4/688y4tbhTP1lEMs8rVPP3d14CEHH98pr2RkOdcng3Nxctm/fzvbt2wFISkpi+/btHD5svYjNc889x/3332+7/5gxYzh48CDPPvsse/fuZcqUKfz00088+eST9ii/2iq9CFkbnyHEBltP1X6wbXfa+Aw507h9w84VVo7Si5BZ3JzxSjEz53RHgpvl4t3SgvuZxq1wHKUN2nNH1vZ9dRpJd99kbdy+OMrOFV4Hi+Xsz26+0Gwg1GkLI+czf91JIpelk9QjjF6fLsNSXIRBM5ByY2C5V+KuCY5k5PPGvH+46X9L+b9fd5FwIhdPFyP3x4Wz5KnOzHyg3ZmGbUKZkbVvjJrDHZZofjck8Py0O+2c4vpYP9Qy4OHihJ+HCyE+btQL8GDx2jFMK9hNq/Q61A55HYACg4HBp0vKvWqrqPlKL0J2rvah7RnbciyfbP+EqTum2qkycVFKWS9yteId+LwLZKecXRfTC4Iawc1PwoNLYMI+uGOSdfmZhm3k4hTMGoSfhDq+9ej78VKSeoTV2H36lTi/YQtcNrdTcDANly0lesVyNGdn2/LCPXux5Odj9D47nUTJ6dMcuLUbx556CmW+znm3KtC5F2oZEvdfAJwDohjr01z26cJhlP6dD1HefOXny211QxmTmc3YQoP8nTuqFe/A5q+tPzfqC23OHIcFRFnnuF3xjv1qExVu6twhfGLIIbLIOud7pG8kY/p/y1i/FvJ/HAcbabt582a6du1qu/3UU08BMGLECKZPn05qaqqtgQsQGRnJvHnzePLJJ/noo4+oW7cuX375Jb169ary2qszs8Vimwrh+dXPs/PkTpKzkpk+8HlGzrWud0SaxYJP82Ky/4aQwjTeVndye8J6jLkmijs0QXPQ3HqlFZdQ4uKE/45DzFj+Ab1vHE4tz1rWEbeMAnMN3N5Kwe45EP8/uPsbqGW9aji937LOiadpYDGXOcit98knONetSxNvb+tBrqX6HJxeilKKtYmnmLYmmaV7j9umBggP9GBEXASD29bFx+3sQbkFS7lTIbwxag5MuxMLNXB7X4HDGbm0yrBedOz2G0JZ/ZsnFuc8+hVmkpTenMOW3Ms/iahRLMrCkEZD+GHfD7ZlydnJthG2FuWYf+s1jrkEjqy3Tnuwdx5knjOH9b75cOND1p/bPgjtLjH602ImuVsoc2rnUOtYPvc1bgtgG3laU/bpV+2817JSV5Lb6Otb5nbdyR9jSkzEKTDQtqxgxw6KU1LQ3NzKTJtw8uPJmDMz8bv7LtwaNaqgMFfOYjHbTh89mHkQgCRTBt8Pmw+/34fFUbe30JXSv/MGWcf5QeWQ7eLBEg93ep0+CY06y9+5I7KYwacuZB+Fht2hwa2w+SvIOGh9PZRt7lAs2ccYezqTBb7+gCLSx3rdk9ILEOr9/7jDzmlbla50LgpH8OXfX/LR1o+4Leo23rrlLXuXU7kyDqI+akVJgTO/1fuANw848c1fr6MpRYO/FuFSv769KxQVKH/rVg7dex+ZHooxYw38MuBXGvo3tHdZ1y5pJSx+EVK2WW/fcA/c+bl9a6oE+UUlzN1mnQJh//GzDcdbooMY1TGCLjEhGAzaJZ5BXyYu3o/RoDGuWzTFZgstPrsTo2cir588hVO9F9kfcCtP9oi5/BOJGuXvk3/z5sY3+Tv9bwAGNhzIqx1ftXNVwubIRph1d9mLWjm5QVQX6wijRn3BK/iKn+5UwSm6/NQFDY2Nwzfi5uR2+QeJS7Lk51Ow828sBfl4nzNA5EDPXhQfPky9L7/E6+aOAJgOHiR7wQI8brwRz3btqqzGYnMxN353I2ZlZuldSwnxCLn8g0SNcu5r+PkmLU3AbFGO+xpuMfPF5EZM8nW3LRqfkclDt38N0T3sWJioFAWn4Z0oUBZ44m/wqw+zR1kHozjoMY2uzR5Fye453BgVQYmysGjQIsK8wi7/uBpO5rQVlaL0U4/krGT7FlIV9s5H08A5Ng7vtj05dWwLq5vfyuB+N2H087N3daKCebRuTcCCX3h5xiAwGqnvU0Ob8mm7YMlL1gvUALh4QYdxEDfWvnVVsCMZ+Xyz/hA/bDxMdmEJAB4uRga1rsuIDuE0DPG+zDPo07kHc85GA16G+niYjuKkFLc5b+O2HjK/qSNqHtycWbfN4u+Tf7P5+GZaBLewd0n6lXsC9i0Adz9o2t+6LCgGTDngHgAxvaFxX+uoIhfPa/oVfq5+/DHgD1JyU6RhW0EMHh543tS+zDKlFCFPjKdgxw7cmzezLc9bs5b0jyfj2blTmaZt9sJFONeri1ujRmhOFX8I5mx0po5XHQ7nHCY5/mVCovtbp80QDsNo0Phg8X6AMo3bSUsT+GDxfp5y1IYtwIl/SDJYzwxxMbhQZCki2dnJemFGado6nsR4a8M2qJG1YQvQcZy1afv3z3Dr/51dLmq+u6Zx7IZBlKx/DjejG7U9a9u7ompFmrbiqkT4RgDWUyuVUmiaA49g27fA+r3RbTQIts5r9lHj23n43l7W3Nmp4BNqxwJFRTvsmsM/4QbqeobhYqwmV5S6Ggufg/WfAgoMTtD2Aej07FWN0ALIXvQX2fPn4zugf5kRRfamlGLdwVNMX5PMkj3HsZw5T6R+gAcjOkQwuE1dfN2dL/0koowWHvdzZE8TAlrmQvfh9i5HVLLmwc1pHtzc3mXoT3rC2WkPjm4ClHVe8dKmrbsfjF4OwU3AeP1vzXPnLaBW/XqEN73xup9LXJymafj07YtP375llrtERuLTrx8erVvZllmKikh59llUURENFi3EJTwcgOK0NNA0nGvVqpCaIn0jOZxzmKS9v9EuN0eatg6mtFH7weL9FBabaV3fn03JGXy28iBP9YgpdwSuwziywdqkBbrW78qi5EUkOTuXvTCjcBzmIvCtX7YhH9bKesHNpBXW453eb9qvPlHhkj2sA27CfcIxaA516a3rJk1bcVXqedfDoBnIK84jvSCdYI+rawbVGPkZcHgtWUnuWP6xEBp6GqNBI6/IzIlTGdRa8DAkr4LxO6Vx6wBKP4BIykoCrAc9NZJPHUBB7EC49QUIbHBNT5O/ZTM5ixZh9PWtFk3bgiIzv24/xvQ1yew7nmNbfkt0ECM7RNClUQhGmQLhmjQI9mLxP/VZ4BZOnI/jn4akVxZlkTfA9rDiHdj5E5xKKLs8tCU06m2dd7z0w+/aFdNMtxQWkvKf/4DZTMPl8TjXltEqVc3r5o62qRJKmU9n4tG2LUVHj+J8zvRap77+mtMzvyFw9GhCnrJeCFkphSoowODhcdW/O8InghWssDa3DiyF4kJwltHWjuTcxm2pMF83PFyMHMnIp17A1f/d1ASq1QiS9n8OJQV0rWdt2iY7O6MyT6PlpYNnkL1LFBWpxRDrNAjmorLLO46HE/+Abz371CUq1sn94BEInoE0CmjEy3Ev18yBU5VMmrbiqrgYXajrVdf6KX5WkuM2bfcvAmUhIymIwg0fU7d+DOEBHhxMz+PAqRI8952iOMkJ/w2fQg+ZF7CmO3TffbjUDyets3WXWDqivForLoANn0FIU4jpaV3W7mEI7wB1Wl/XU/v07oOTvz9enTtXQKHX7ujpfL5Zd4gfNh0hq6AYAHdnI4Pa1GFEXATRtWQKhOsVFWw9/TrxZI7jnz2hU0opOv/YGX83fz7v8TkFJQUknE6geVBzQr3kQ8cKU1xovZBYVJezy9L+tjZsDU4Q2ens/LS+dSqtDHNWFpktInBKzyLVvQg5ebR6cK4VQv2vv7pgP2vJygaDAdfosyMki48eJbF3H9xiY4n44Xs0w8U/cDn58WQwGgh+7DHg7PuXJHcvTq7MhP89R/BLEyslk7Cfcd2imbh4P6UXpknJKuT1eXt4fd4eYsN86B1bm97NajvU+6RTxVnklhRg0AzcXOdmALKNBk4/vokAD2nYOiRNAyfXsssa3ApP7JIPoxzFH+MgZTsM/orajW9jUMwge1dULUnTVly1CN8I63xZ2cm0C626CyxUqX3zAPBq2xSnvABco6OJOnaCg+l5HN+0lSM/n0Az+uCzbhrGWyaAm+9lnlBUV4X79lOweQuFO3aSdGbuuWo90tZihh3fQ/z/IPuYdS7EBrdaT6l1cr3uhi2AR+tWZU7rrEpKKdYfzGD62iQW/3N2CoR6Ae6MiIvgrrb1ZAqEChQV5IlH+BR2GlM5umYo9U4mwoBPz47+EzXeyYKTZJoyySnKIcAtgH8t/RfrUtfxaodXGRg90N7l1Wz5GZDwl3XagwNLoTgP/rUZgs404G561DoFQnSPKnuf4FyrFq8PVhzOyeSr/DTq+0rbtjo5/4OxsLffovZLL5bZ5xbu/gfMZlCqTMM25d//piT9FEH/GotHqzOv0UYD6ZM+BiD4sceI8IkAIHqTE+m7fAgKO1C5gYRdfLjkbMMWoGujYAqLLWxIOsXulGx2p2Tz/uL9NAj2pHez2vSODaVZHZ8a/cFs6dlwYZ5h+Lr6EuYZRkpeCkk5hwlw1EFEepWdCp7B5U8XpGnSsHUUhzfA4XVgdIGw6z9+dWTStBVXLcIngpWstL14OqSmA0Apgjs/C6HWC7ZEBefDnhPs9K5H65YtcTPvRuWfgC3TradqiBrJNSaa8O++xbR/P3vNMwFsBz3VilLW5sCSl62nBQH41IWbn3KIBltBkZnfth9j+tpk9qadnQLh5oZBjOgQwa2NZQqEytAwxAsMRWAo5uC6j6iXl2O9cF2tpvYuTVSQIPcgFg9ezLHcY7gYXYgNiiW3OFcuUHWtctJg91xro/bQWlDms+u8wyDryNmmbXgHu5Q4KGYQiZmJRPlF2eX3i6tz/jQIPr174b48HnNmpm2ZUorcVasxZ2QQ/Pi/bMs94+LIWfSXrXEbOeoeBq220HdVCX7NcwgOTwCLBS4xWlfULJOWJvDhEuuUKy5OBh7r0oAPlyTwVI8YJg9rxdI9J1i4O43VCekknszjk/hEPolPpI6fO73OjMBtE+5fs95THVhC0pbJwNmBFRG+EaTkpZCclUybkNbW98nyd+4Yfh5lPdYZPA0adiv/PhYz7JsPmgEa31a19YmKsXaS9fsN94BPKL8d+I06XnVoEdwCZ6MM0DmXNG3FVWsW1Ix2tdtR38eBR280H2z9OkeD0tOI0/MI/34W2vbv4Lex1onQ24+58PQNUSNomoZHmzYYWzbj2LfWCe2r3Ujb1B2w8L9waLX1tpsf3DIB2o2utE+bVUkJBX//TcnJk/j07FkpvwPgWGbBmSkQDpOZf3YKhDtb12FEhwhiHOjUvurIz8MF19NDycwzckPEXMiLt74JlqatwzBoBmp71rZdiXd8a/mQ8aooBSWms/valG2w8D9n14fEQuO+1oPG0JZ2/xBNlZTwQLMH7FqDuH7OtWtfMB9x/S+/oGDHDlybNLEty9+wAdO+fbhGR5M+6WO0T6dyT7GFH28xMCzaQGhOKqRugzptqjqCqASTlibwweL9DGxVh7nbjhEV5MkT3WMwaJptjttx3aK5+8Z6ZBcWE7/3BIt2pxG/9yTHMgv4ek0SX69JIsjLhR5NrQ3cuKhAXJyqebMzYTHJx7eBr49tCpBI30jWpqwledcP8OcL0G8SRHe3b53i+hWchiMbQFnOfgBanu2z4Pd/gX8kxPQGg7HqahTX7+R+64ffaNBhHFmmLP5vzf8BsGHYBmnankeatuKq9YnsQ5/IPvYuo9JZ8vPRXF3RjNYXgahgLwAOnsyznl7U/G5Y9jrkpMLfs6HVvfYsV1ynw9mHUSi8nb0JdAu0dzll5aRZG7ZGV2j/CNzyFLj7V+qvLNi2jUP33Y8xMBDvHj0q9JQ6pRQbkzKYvjaZRbvTbFMg1PW3ToFwd9t6+HrIi3VVifZrwsbMDA4Fdsb/yJmmbaen7V2WEPZjLobk1dYDin0LoMU90O1F67qoLtYpaRp2t85PG1B9PuQz5+aRcPPNuMbEED5zBgY3GU3tKDRNw61pU9yalv1AzfOWW1BmM26NG3PsiSdRxcWUGDV+udlAnEtLYo7shqxj0rR1EGaL4qkeMXi4GJm77RgNzhyblF6czGw5O2mCj5sz/VvWoX/LOhQWm1m5/yQLd6ex5J/jpOcW8f3Gw3y/8TDebk50b1KLXrG16RwTjLtLNWx+HV5PkrP1fWHp2XCl35MLT1qnCzuwWJq2jiAx/kzDthH4XWKAWLM7YfELcDoJ9vwBsQOqrERRAdZOAhQ0vh2CY8jLTaFjnY7kFuXi4eyYF1O8HtK0FeJcFgts+hIaduPE1B/J/PFHgsePJ/DBB2xvjI5lFlBQZMbdxYWiqOEU/zUJzx0/SNO2Bjr+5lsYAwLwu2swyTnJgPV0K7vP+ZV7Ao7vsjYGAKJ7QreXoPld4Fc1V0t1b9ECp9BQ3G+4AUtuLkbv6x/xWlhs5vftKUxbm8ye1Gzb8g4NAhnZIYJuTWrVrNP1HERUsCcbkzPY4NyelgDHtljnE/ORi1Q5gs93fk5hSSH9G/Yn3CfctrzYUoxRM2LQqvkIq6pSmG096N87HxIWgynr7LrE+LNNW2d3uG+ufWq8jMJ/dqMKCyk8kYrZxYhsWcfnHhuLe2wsJ6dMQRUXg5MTTiUlzE6/i8jRD4NniIxAcyBP9ogB4Lk5O4GzZwHC2cZtedycjfSMrU3P2NoUmy2sP3iKhbvSWLT7OOm5JuZuO8bcbcdwczbQJSaEPs1r07VxCD5u1eADdFMupP1Ncp0QoOz0CABJpTu6hMXQ5207FCgq1IEl1u/RPS59PxdP6xmHK96GNR9a54+39/GbuDLZqbDzR+vPZ6aYDPMKY2r3qXYsqnqTpq24ZvnF+Rg0g2PNi5eyFRY8Ay7eFCX3RRUVYfT1ASDA0wU/D2cy84s5mJ5LeOJOjjw3C+fgBjT4aDbyMlGzlGRkkPHdd1BSgleXLiSZrHM023VqBFMOrJ0Maz+2Tr4/bjt4BFjfhNzyVJWWorm40HDZ0gppYKdkFvDN+kP8sPEwp89MgeDmbODO1nUZERdBo9oyBYI91Q0Al8Cl/JJRwiN12libtvsXQttR9i5NVIBpO38k13yCDmEdbE3be/68h/0Z+xkYMhFvY5itEeBQ4t+0Nqs6P3vhuhXvWOfD6/qc9bZS8Ek765kzpTyDoVEfaHQbRHWumpqvk8eNN/Le+A5kn9pAt7+/YkyLMWXWT1qagNmiHHN769jJKVNIn/Qxnp06kbdyJS6RkRR98T3Z7kEEP/aYvcsTlSDxRB4ADUK8rvqxzkYDt0QHc0t0MK/2b8a2w6dZuCuNBbvSOJZZwMLdaSzcnYazUaNjwyB6x9ame9NaBHnZaRq4Y1swYSbFydq2KH2fHulj/X60KJNigxPOGYmQcRACZC7vGstiOdu0bXgFo6bbjYY1H1mnLUpeBZGdKrc+UTGObLC+76ofB/Uc9KL2FUyatuKaPL70cZYfXc77nd+nZ0TlzXdZ5fbNt35v2I16//6U4pSUMiMMGwR7seXQaQ6ezKNJ27YYfHxwiWmGOa8QJxd3OxUtroXB3Z3QV16mYPsO3BrFkLxqOmCni5CZi60XtFvxNuSdtC4LaQP5p6xNWzu5noatUopNyaeZvjaJRbuP207Zq+PnzogO4dzdth5+Hi4VVaq4DpHB3riGLCbVAtkNh+JzbIt1XyhN2xqvsKSQXLN1n7Jkp6LtmSkylVKUqBJmbN7E+LgB9iuwMhmMEP+G9efSxq1SMP8Z2PQFBDaELv+xfiimaRDVFY5uss5P2+g2qNu2xo1Q1DSNZL/TnPDQqHPYHVqcXVc6F+ZT0rB1KKUN26Bxj+PZvj35Gzfi2rABPv1ut12cLHjMGOs8kZ7VbOoncc0ST+YC2M4CvFZGg0bbiADaRgTw/G1N2J2SzcJd1qbtgRO5LN93kuX7TmKY+zc3RgTQu1ltesXWJsyvCo95jmzARcEij5Yk3TzWNoVZiEcIHk4e5Jfkc6R+W6KS10PCEmg/uupqExXr+N+QexycPa/sQp6eQdYzXTd9aW3eStO2ZogdAPXaW1+XzigsKXSsgYAVTJq24pr4uFpHnx7LPWbnSirY3jNN28a3oRmNuNQreyp6VJCnrWlraBFGw2XLMHqdOTXJYobCLLs22cSVM7i74zdoEH6DBgFwIj2Yem6tiQ2MveC+lTY6SSn451dY+qp1dABAQAPrabjV6DQfc04OBnd3NKfLv2QUFpv5fUcK09ck8885UyDERQUysmME3WUKhGontnYIlmIfDM7ZHAyNpaXBCTSj9e+zmvwNimtzOOcwoHDRPJm67DgexgTGdYumqMB60Nul2aVPqa3RShu18W9Ador1VMpt30JhpnX5qQOQuh3CWllv3z6x0i7sWFlKzBbyTGZyTMXW74VF5Jito4V/31zM6ZMb6N6kFhuSMvhzZyoj4sIZ3r4+ZouS/bCjMFsIGvc4wY89hjKbabR5EwWqiNn7Z+N55AY6pR+EiU2tH1KM/NPe1YoKcDqviFN5RYB1eqOKomkazer40qyOL0/3asSBEzks2n2chbvS+PtYFhuSMtiQlMErf/xDi7q+9GpWm96xtW3X/Kg0h9ejAbXDO1E7LK5MvQ38GnC68DTZvi0heT0k/CVN25osYbH1e2SnK7/Ad9xY2Py1dYRu2i6o3azy6hMVxye0zDRsQ/4cwmnTaT6+9WNuCL7BjoVVT9K0FdfkidZP8OyNz+Lr6mvvUipOxkE4ucfarLjIPDqlb0xKP+G2NWyTV8Pvj0NIUxjyXZWUKyrWDT63s2RxDJuDgulQ5+zySh2ddDoJfn4QlNl6Km7nf0ObkVCNrph55NHHyF25kvCZM/Boc/ELmaRmFfDt+kN8v/EIGWcOJtycDQxsVYcRHSJoXNunqkoWV6muvzuqOBics9lmMtHy2SRwk+3lCJKyrNO+NA5sQIsuDflg8X4+XLIfp0A3XIMh3XSEiYv34+FixMPFiJuzEQ8XJ9xdDLg7O+HhYsTdxYi7s/W7h4sRNycjhhrQ8Ju4eD9Gw0DGNd4JW6adXaEZSfKP44B/J3r4nZ3jt6oatkopCorN5BaWkGMqIbewhDzT2Z9zTed8FZb/c86ZxxQUm23P61WUz7D982ndNJvVTQxYioJYsT+dFfvTbfeZse4QM9YdwqCBv4cLAZ7WryAvV9vPgV4uBHq62n4O8HTB38NFmrzVVPDj/7L9XHrhXCezhYlbJmJubGZpj2fg8y+sc+XnZ8jAAgdQegxSx88dD5fKO5RvGOJNwxBvxnZtyNHT+SzafZxFu9LYdCiDHUez2HE0i3cW7iOmlhe9Y2vTu1koTUK9K/66EEXWvNRvf8Gqb/p8g9FghOO7YfUU6ynyxQXWucdFzdOor/Xsw9rNr/wxAVHWgS6nk63bXlRfJUWQvv+CxnqJpYRDOYcosZQQ5B5kp+KqN2naimsS7BFs7xIq3r4F1u/hHcj/5yA5S5fhcWNbvLt2td2ldML/g+m5ZR5qMfpScvgQLhlJcHI/BMvph9VZ5py5GDw98eraBYOL9RT90tFmHyzeT2Gxmb7NQ/l9ewqfrzrIUz1iKm40Wk4aeJ85RzkgCm56FFy9Ie5f4FrJoxWugebmCmYzBX//fUHTVinFlkOnmbY2mYW70spMgXBfXDj3tK2Hv6dMgVDdORkNeBnCKCCRXScToYU0bB1FclYyYL1gS7BmHbViUWApsr6G7884yI5tCVf9vG7OBmtz99xmrrPxvOavtQFc+vO59y392f1Mk/jcx7g7V0xT2GjQ+GPJMsa6LcA2yYHBiU9vWsbby47yVGwMPa6igVVUYrmwkWoqPtNANZNrKrY1YvPOaa7mlt4+Z905F3ivEC5OBtpkpzLwwHpST8HGJsF0a1SH+H0nsCjQgMggT07lFZFVUIxFwalzRutdjnZBk7f0Z1cCz2nuljZ7/T2ccTLKZdDsxcXowj2N7sHbxRujf6R1QMGJf6yj2FrcY+/yxHUqbdpW5Cjby6nr78GDN0fy4M2RnMwxsfif4yzcncbaA+nsP57L/uMHmLTsAPUDPGxTKLSq51cxH/A9+Bff7/yKkynx9HZzJ8b/7DGWsXQKm5CmEHEL1GomTduarFZT69fV6v8JOHvI2WHV3d+z4bfHoOVwGDDFtjglN4USSwluRjdqe9a2Y4HVlzRthSh1ztQIeevXk/H115gzM8s0bUtH2h48mYdSCk3TyImPJ+XpZ3ALbkD4Tfth3cdwx8f2SCCugCou5sQHH2BOT6fulE/wvvVWcotyKbYUM65bNBal+HBJAlOWJ9oe8+Wqg/y6/RhBnq7WUUhnRiIFebsS5OlCoJd1eZCXKz5uTuWPMsg8AvH/s75gjVkNIY2ty3u9UUXJr03w4+Oo9cwzOIeF2ZYVFpv5c2cq09cmsevY2SkQbooKYGSHSLo3CZED9hqmlntdkhUczEw6uzA71ToC3ChvFWqq5OxkwHrhljnLrdMZaRpYTNamrYdnBne2r09BkZmCYjP5ReZzfi6hsNhCflEJ+UVmTCUW2/MWFlsoLL6yht+1cHUynNfodTqv0Xv+z064n2kku7kY8XA20jbcn9S27dm4PZo44x4sBhcMliLyV0yi3w1jqe3rxterk842WC8y6rX0dtE5+SuCpoGXqxPerk54uTnh6epkve1m/e55zjovV2c8XY1n1jnjdea+1scZcXUyUrivAZs9trE6cwPt6jamqcGPpXtP4GI0UGS2MKBVHcZ1i6bYbLGdXp1R+j3XZGviZuRal6fnmcjIKyIzvxilIOPM/a80m5+7s62Re7apWzqa19rsDSht/nq4XPdrhnVktVbuB6x6ugBb/tZtpH/yCSOCggh7+8yF9hr1tTZt982Tpq0DOHDC2rRteA0XIasIwd6uDGtfn2Ht65OVX8yyfdYpFFbsP8nhjHw+X3mQz1ceJMTblV6xtendrDbtIgNwvo7/438eWcbO9J00Dmxapmlro2ky/YeeuVTdBxjiGlkssHaS9efgRmVWlZ4VFu4TjkGT48fyyJGYuCZKKd7f/D4Hsg7wesfXa/5Q9qI869x2AI364mE5hv999+HRpnWZu4UHeuBk0MgvMpOWXUiorztujRphyc+nuCgES4mGYccP0PX5s6MpRbViMRXhN3AguWtW43XLLQAsSF7Aq+tepWd4T8a3eo0Pl5QdeZZdWEJ2YQkHT+Zd9vmdjZrtIDXQy5V67ib6ZX1P2xOzcbJYD3hTt/wOHcIJ8HTB1al6X+jGNSrS9nNaViHfrj/ErI2HbQfvrk5np0BoEiojNGuqBr5RJGfC8YLD1gXfDoYDi2HkPIi42a61iWtX+kZ49yFX/j6WBcCH97TkwMn6TE+ZRJHK4Zk+9fBz87vsc1ksytbYLSxt8J5p7p7b9C1dd/bnkrKPuUSDuJSpxIKpxMJpiq8r/+PGOcQ57+GD4kFMMg/iceMcJjj/zPu74dmdd17Tc7o5G/BydbY1V20NVrezjVRbU/X8227WRqynq3V0cUWeRuzWKIb198Qy+5/NNMv244NN+21niZRO8wPWs0pCfNwI8bmy6SBKzBZO5xdzKs9ERu45zd4zjd7Sxu+p3DNN3gJrk/d0fjGn84tJvILXTQA/D2dbYzfQ05UAr3KavGfW+3u6XNAAMhq0MhlL6e8CbIq8NWtwCj7njLjGfWHVe3BgKZSYrnyuSFEtlf6fut6LkFUEXw9nBraqy8BWdckvKmHFvpMs3J3Gsj0nOJFj4pv1h/hm/SH8PJzp3qQWvWNrc3N0EG7OV/je98zc+gOiB9A4oDFNApqUWX087zjPrHyGjMIM/hjwR8VPzSCqzrbvrGccNrjVegbitSjMsl7YudV9MhVMdZPwF5zcC64+1qkAz1E6wCDCN6LKy6oppGkrrommacQfiedwzmEOZh6s+U1bF094ej8c2QD+4Xh2CMezw4VXrXQ2Gqgf4MHB9DwST+QR6uuOc1gYkXPn4NqoEdrXveHIelj/KfR4xQ5BxOUYvTwJmfAUIROesi1Lz7fO+RfiEcIHi/cB1tNJFfBYlwbc2bou6bkmTuUWcSrPRHqu9QD17LIi0nNN5BSWUGxWpGUXcjo7m47GRYx1+g1fLR+A9ZYmvFk8lB0rGsKKZQB4uzkRfGak7rnN3mCvsweqgV6uBHm54OvuXOFvSC8/OslCp5hgpq2xToFQcua83jBfN+6Li2DIjTIFgiNoFhLN0kzINR+nxFKCk+eZffre+dK0raGUUrY3wr9tLMbN2UBhsYWGIV70b1mH2d8GkWdO591lq3ijb7/LPp/BoOF5puFYGSwWRWGJtZFb2hA++3MJBUWWM83d85vC5963BFNRET2zfsapJJ97i37mU+0eJpn7A/Cx+U6CvV2ZwHdEBnmyrNbIizZXS0e92kbAnhntWp3PIkjKtjbpNyc4l5nW59zpf869fSWcjAaCvV0J9r6yRl+J2UJmQbHt9bJ0hG56bhEZZ26fOqf5ezq/CKUgM7+YzPziK/pwFMDX3fmcpq51qob2kQF8sHg/e1OzeapnDPP/TrM1bB32gnvncWvShNovv4xrbBOO5RwjqyiLpqGtwKs25KZB0iqI7m7vMsV1KJ0eoTo0bc/l4eJEn+ah9GkeiqnEzNrEUyzalcZf/xwnI6+In7cc5ectR/F0MdKlcQi9Y2vTtXEIXpd6TfmkPXgGcVf/yRBz1wWrfVx92H5iOwrFadNpApy84PBaCG4sg2dqEqVg6SuQexzu/w2iulzb88waYt3+5iLo9EyFliiu05oPrd/bjgK3stdEKh1gEOETUbU11SDStBXXLMI3gsM5h0nOTqZdaDt7l3P9XDytn+5dRlSwFwfT8ziYnsvN0dbGhlvjM6e63/wEfD/EehXLWybIxXxqiEdbPsqI2BFMXr6XX7enAHB7izCiQ7z4YPF+3JyNV3TAZyoxWw9Gc0xE/NIL78y9AJxwj+K3oEdYRStK8oqodabZW2JR5BRa5z08mH75A1Ung1amuRt0pplb2twNOmeahgBPlysayXCx0UkfLN7HpKUHCPF2ZcHPy+h1aCP9PQM52nMQozpG0L1JrWrdvBBXp1VYBGqvExhKSMlNoX6jvrDje9g33zqFh4xeqXHSC9LJK85Dw8CDN7Xli5VH0LSzB/o3hESzLjWdjOKjdq7UymDQzsxv60Tg9TzR/GchdRr41oMO/6XYfCcs3m+bJiDzxifBKYI7LWbu7Nr68s9XA1jy8ynJyCD5zPQmdzZvdcFrVultc0VPqHseJ6PhzGuTK3D50VJmiyIzv3S0btGZJq/pTJO3dCSvybbudH4RFgVZBcVkFRSX+9o5f1ca83elATAiLlw3DVsAg7s7/kPuYcvxLYyc05s6XnVYOGghNOpjvSDfvnnStK3BCovNHMmwDgSw1/QIV8LVyUjXRiF0bRTC6wMsbD50moW70li0O43UrELm7Uxl3s5UXJwMdIoOoldsbbo3qVV2EEB2CqTvg1MJ1qmayuHu5M7ErhOp41UHbxdva9PuwBLo8w60f6SK0orrlrbT2rB19oT6cdf+PG0fsDZtN3xmvVaIzG1cPRzeAIfXgcEZ2j96werSpm2kb+QF64SVNG3FNYv0iWQlK23/0WqsM6felLIUFmLOzMSpVq1yRzU2CPZkyR7KHQ2iGvZE+cZgyNoPe/6AVsMrtXRxdfK3bMHg5Y1bowtPk/xy5TGmLE2laZgP/6RkEx3idXWjk5TC1WggzM+dMD93aHevdcR11+cJaTGEhw1GHi5zd0V2QQnpeSbSc0y200vTS0fz5pw9UE3PNZFdWEKJRXE828TxbNMV5fV2dbI1cUtH8AbZRu5al/VtXpuCIrMt4z031mP899tYn5QBwIkcE80LT9M3eT0qqiFNH7mON1Oi2moY4oOlKAijWxp7TiVSv8GtYHSF00nW05lCmlz+SUS1UvraXNe7Dt0a1+GLlUeo5+9h+zAnwjeCdanriK7rQFdb3vQlbPzM+nOv/zEptUmZkZa2aQJ6DHSoRl7ehg0cffQxHq6r8cJ9RiZ0LX90fHXMbDRoZ+aFd4Val7+/2aLIKii+6PQMp/KKmL8zldLW9Mz1h8guLOGJ7tGEB+pn3sPSEUspuSkUlhTi1vwu64V6YgfatzBxXZJP5WFR4OPmRJBXzTjLyclo4KaoQG6KCuSlfk3ZeTSLBbvSWLgrleRT+SzZc4Ile05gNGjcFBVA79ja9IytTa3D6wE4UrsxublHCTeE4+HsccHzd6vf7eyNiFusTduExdK0rUkSFlu/R3W+vulbYgfA0lch67B14EHbByqkPHGdSueybXEP+IResFqmR7g8adqKa1b6H6v0dLwaK2Gx9ZSMVvfCTY9SsG0bh0c9gGuTJkTNnXPB3UtHKZWenlQqd9Uqjr/1Nu5RzQmb8CGEXzi9grCv42++ReGuXYS9/Ra+/fuXWWe2KJ7qEcOyvSeAsyMYrmh00rEtsPgliBtrHc0C0G403PjQRT/l1TQNXw9nfD2cr+gUN1OJ2XZaaem0DOlnDlrL3D7T9C02K3LOXEAn+VT+ZZ/foFmb06XNW4BQXzfuiwvn7oZtMH+j4REnDVtH5evujLOlFhbS2Ja6j16RXa1vnhP+gr3zpGlbA517EbLyLlxT2tSp8R+8lkpcZh1lC3DrCxc0bOH6pgmozkrS0sDJiTQ/Mx5OHgS7lz8qzREYDZr14mWeLpS39SYtTWAeqTgbNYrNCqVg7rZj/L4jhbva1OXxbtHU8XPs0Vfm3FxcNv1N932uLGlk4nDOYWIiOkJER3uXJq5T4okz89mGeNXI+Vs1TaNFPT9a1PPj370bsf94Lgt3pbFwdxp7UrNZc+AUaw6c4oXfdjPZfy63AzO8A/jxz7tp5nU73w9684LnLHOhweiesOQlSF4FxQUy0rKmOLDE+r3hdZ4FYHS2Host/Des/RhajwBD9b52iMMz5cCxrdafO4y/YHWWKYuMQutAIZke4eKkaSuuWel/rOSsZLvWcd32zYPjuyDdevGpkpMnwckJ59rlz4UUFWwdqXH+SFuDuztFiYmYMzNRddvXyDdTjsxiMuEcFobp4EE8z1yADKwNi1fXvUpsYCxPtJ3A5ysPAhB9TnPjogf2pxKtn+j+86v1tikbYnpbR25X8IU+XJ2MhPq6E+p7+TegSimyC0vOjtzNNZGed948vLnWq4Ofyi0iq6CYc3vSGvDJ8Nb0bHrOFAjPPVeheUT1E+BSl3R2sO+U9f8Ajfpam7b75kOnp+1bnLhq584RdiDV2rQ9d79WehpajX8NBzi5H34aCcoMNwyBWyZgXpJQ7lymVTVNQFXyHzqUrW39mbnkaSJ9I3X7/uPci46dO7I6ItCD5FP5/LDpCHO2HmNou3qM7drwii/GVtOY9idw9NFHGertxJJG1n1BjL9eLsTm2KrrfLbXQtM0GtX2plFtb8Z3j+bQqTwW7U5j4a40th7OpH7e32CAxacLwcs6V/eLv+3ilTtibfu4SUsTmBi/ge5tT/DD3jCGNLoHfOpA9jFIXiNTgdQEBZlwZKP15+ge1/98re+DFW9BxkHrWa+xA67/OcW1c/WG8dutH6QEX/g6VDrAIMQjBE9n/ZwNc7WkaSuuWekBn+3UK6ca+ObXYoF9C60/N+oLgO8dd+DTpw/m3NxyHxJ15o3SscwC8otK8HCx/jdyb9OGsHffwatrVzSnM/+1CjKtV0k0yNyf9mZwdaXupI+w5Odj8Dh7etWBzANsPr4Zk9lEWnYhuaYSnAzapU+jzD0BK96xzg9nKQE0aDEUuv63Wsz9qWkavu7O+Lo7E3UFA66KSiy8t2gfn686aBuddOBELn2bX3gKi3Bcdb3CSS+EI7mHrAtielu/H9sCOWlyUY8apmlgU3qE96BlSEtm7DhzoF9O0/ZozlGKLcU4G5ztUud1y8+AWXeDKQvqtYc7JoGmWUddXYSjjLA91/GSDHI8NN2eXnh+wxbKjqy+58Z6HDqVx/qDGcxYd4gfNx9hRFwEj3RuQICDXUzTrUljXKOjORhkwrnk2NkPZszF1gPno1ugs1ykpyYqbdpW5/lsr1V4oCejOzVgdKcGHD95iuAph0FBpksRAJaiIGauO8QfO1IY2q4+AFOWJ3LPLR7MT59G6p5whjQeYh2tuXUGHFgsTdua4GC89QPXoEbgV//6n8/FE258GFa+A2s+gqb9q8Wxma45uV50FHXp61Okj8xneynSSRLXLMAtAG8XbxSKwzmH7V3OtUnZZr2aros3RJ4dfak5O+Pk71/uQwI8XfD3sB7cJp1zAQxN0/Dt1w+j15k3UsvfgomxkLCo8uoXV+3chi2cfbGI8ImwnUIcHuiBi9NFdo9bZsCkVrDpC2vDtmEPGLMaBn4KfvUqs/RKM3VFIp+vOshTPWJIeKMvT/WI4YPF+5m0NMF2H6UUpoNJZP5y4ZQhwjHEBjWmJKcxbiVnLqzoEwo3PwV3fmH9pFzUKP0a9OODLh/QI7wHCSdygLIH+rU8avFup3f5/vbvMdTkt4OZh61nOfjVh3u+q/CzHGqKYU2GsXH4Rp5pq89mXOkUR+WNrH6qRwy1fdz4/uGb+O6h9rSq70dhsYXPVh7klreX8cFf+8gqKLZT5RXP4O5O1B+/k/rUXRQ7aWenMTPlwLeDIf51OJ1s1xrFtSl9n+oII20vpVbOLgzKTJFPXXDJBqBD/aYYNY3T+cVMWZ7IlOWJPNUjhqe6WKf9OJpzlGJz8dnRmgl/2at8cTVSd1i/V8Qo21LtRoOLFwQ2gOLLTxEnKsmJPWAxX/IutrPCdPqB85WSkbbimmmaRqRvJDtP7qy5p17tm2f93rDbVR3oRQV7seXQaRJP5hEb5lv+nYrzoSjX+ilf6Tynwi6Kjx3D4O2N0cfngnW2yc9PHcIj6QPgZqJDzmtQrXjH+qLT9TnwCrFu17DW0OMViOxU+QEq0eVGJ5XeVvn5HLzjDigpwaPdjbjUq5kNanFx7evEMnXxSEpKzvn77/6S/QoSFSK7sNh28cJzm7aaptE7sre9yqo4YS3h4WVQXAhejjuX68XkrlrF6Vnf493tVvwGD8bdSZ9zOF7pyOqODYPo0CCQ5ftO8t5f+9idks2kZQeYvjaZRzo3YGSHCDxdHePwqHTkkm2krUeA9XoLyatg73yIe8x+xYmrZrEo29RsDYId/DRiJzeI6c1hD18sWevwcvZi5tBu5BWZueHlRVjOXEN6XLdolFJ4OHmQX5LPkZwjREV2BoOT9fT4U4nWxp2ovrq/DG1GWrdZRfEKhqf+AbeLHKOLyleYDV/1sr7ujPjjogObzr3+gri4Gjy0QlQHNX5e230LrN/PTI1gKSjg6OOPc+KDiajii4+6aGCb1/bCKRTyN2/myKOPkb7XDwzOcHgdHN5Q4aWLK3f8vfdIuPkWMuf+esG60k/4Il38aHNwCo8b55xtbCgFs0dB/BtnJ7KP6Q33zrE2CWp4wxYuPzqpdN5Hg6cnHje2xaN9eyw5OfYoVVSy0pE7B9PzHGq+Tz3KK84jLS8NpRSJZ0Zm1fJxxcethk6BUJ6C02d/9o+AkMZ2K8We8jduIjc+noIdO+1dSo2haRpdG4fwx79u5tPhrYkO8SK7sIR3F+3jlnfi+XLVQQqLLz06qCaI8I3ApViRnJ2MUmf26aWDCPbNt19h4pqkZhdSUGzG2ahRP8Dj8g+oyeq3h2E/ktzqbsB6vKlpGl+vTrJdg0Ep68ADTdPKXhzbzQf6T4FHVkFAlJ0CiKviHwG+dSv2OaVha19bZ1inrTI6W+eZvojn2j3HZ90/o2u9rlVYXM0jTVtxXWwXMjnzKUmNkpEEJ/4BzWg7JaPo0CFyFi8h88cfwenin/iVzmubeN7FyACKjh4lNz6ezD8XoW6wvtlg7aSKr19cEWU2U3zkKKqoCLfGjcquU+rs9AhxT/Cj131McP6Z205/Y72AwQdNYfcc6yf+7R62PkjTrCOzHWR+pCfLadiWGtctuszopfpff034jOm4NW1aVeWJKlTH3x0XJ41ilc2e4ylnV5xKhFUfQNIq+xUnrsqaY2vo8XMPHlj0AAknSi9CduEUF8lZyczYPYPfE3+v6hKvz76F8GEL2C/TD/n0ux3vZ5/g/9k77/AoyrUP37Mlm947JISQ0ARF6U1QEFTEfuxd7L1/HuuxHnvBfuy9KyqKBeldLIBSEhJII73XbfP9MZlNQjbJbrYlm/e+Lq4ddmbeedJ23nne5/n9Xo7fzgMbHmhLzgl6RKOROG5sEstuPJJnzxxHWkwwlQ1GHlq6kyMfX8F7G/bRYu6fyVvjvn1Y/3Uli1+20GCsp6ypTNnRWqTA/vWKHrSg36BKI6TFhLSZxPo5tsKKiKG2zrCrZrVVzqpSXp2KiA47E5IO9Zu5ut/ijftV2R74/V3PX0fQhtkIG15Stqdd3623T0JIAtMGTSM5NNlLwfVPBsYnvsBjqK1X6k21X2FtdZkeuUAp3Qd0MTEk/PvfxFx+Wbfuy7aKNDuVtuHz5hFz+eWkvPQS0vQblDd3LVVuGgKvI2m1pH32KUO/WULgqFEd9lU0V1BnqkNCIjU8lceaTuIp0+mM2rUY3j4e6oqUaunpNyivA5yB6kg+UNBqJKJTlhE6/CHe3PFO244tb8Dy/8CfH/guOIFTVDRXoJN0JIcm2ypt7RnX7KzcyZO/Pcnnez73doi9p3gHfHGpUsGxZ5mvo/E5gcOHU7JgAl+FZ7O+aL34nO4FWo3EyYcP4uebZ/HYaWMZFBlEaV0L9yz5m6OfXMWnW/IxW6y+DtMpdAkJmAuLiGqAyIZ2yazooRA/WjH+yfrZpzEKnGPvANGzpakKagqAtqKgA+VhNimvO44bSWyoYh54zuRUnv55DyUVivxZv3weHch8cDp8eJaifeoJKnPgxUnw3U1Qne+Zawg6s+Nz5Rk6NBHUAjaBS4ikrcAl1HaUDq1X/YXYDDj1VTjzPdtburg4oi84n5hLL+321HSbPEID1oPaiDXBwcTffBOGYcMgbgSMWADIsGGx278EgWNIkkTg8M56d+rkblDoIOqboLLByAFi2p2ogZv+hqP+DQY/nyQ7gWwyYW0Uwv7+SHxQIrIscaC+XQXWyNbKrD3LwGL2TWACpzh75NlsPm8zt0+83VZpO8xO0nZE9AjmDZnH7JTZXo6wl9SXwkdnKbriQ4+E4x73dUR9grTwNB6d+ShXHXaVr0Pp1+i1Gs6cmMqvt87igZMOIT7MQGF1E7d/sY1jnlnNkj8L+410jCYoiCHvv8//Hp9JdajUMZmlVtuqvg6CfsHeMvWz3M/1bHd8oRg5f36p7fc2TJvcQcpL7RwZnxrFzccMJ0yrVOl16PzcvQy+vEJ0CfVVmqph7wrY8wPoPST3EZ2uGI1bzbDxZc9cQ9ARq1Xx8wGYclW3nkF/V/zN4j8Ws6ZA/I32hEjaClwiNSyVk4adxKKxizBbB87DfGp0MDqNRJPJQnFtc/cHq9W2279QnHsFXsPa2Ihs7bo6xmZCFpFGVmk9kdTxoP4tZaekBdmqaPIIbJS9+CJ7Jk+h6uNPfB2KwANMjTue+t0PMJQL295MmQJBUUr1S/5G3wUncAq9Rk+EIcLWUptpJ2mbHpHOU7Of4pIxl3g7POcxNcPH50JNPkQPg3+9o2ilDWBasrKoX7eOSKOOE9JP4KSMk3wdkl9g0Gm5YGoaq247iruOH0V0SAC55Q3c8PGfHPfcapbtONAvChWCjzic5CRlwbpDMktN2pZneac9WeAW1KStva4Jv6LVB0SOHmarEL9u5rQOUl6ZCcr3YE9pHdfPyeTaGdMApRjD9re5eyls+xh2fee92AWOk7NCqfiPHQFRQzx3HfU5fOvbQhLGG2T9BGW7ICAMJlzc7aG/Ff/Ga9teY8neJV4Krv8ikrYCl9Br9Tw04yEWjV2Evj89PBVvhwPbOk1Wm3b8jbmsrMfJuF6rITVGWRXMsaNrC9Cydy8ljz1OXVY9zH8UrtkEhs6aggLPUf7yy2TPnUvNd/arSdprZWWX1vO4/jWCMEJIHNxdAkfdpZiQrRLVXCqakBCsjY00/fWXr0MReIARCbEg6ztKv2h1kDlf2VbNGwX9gmaThfwqpSq+Xz/oyzJ8cy0UbIbASDjnU5us0UCm+vMvyL90EWXPi04eTxAUoOWyI9NZfftR3DpvOOGBOvaU1HPl+7+z8IW1rNhV2ueTt2pHnE3TFiD5cLhyHVy1Xmh+9iOyS5XnDb+XR2hdHK5IGt1Bwqw9mQnK81R2iTJXUffXGmupamk1qcxQ/EqEDEgfJesX5bXVV8ZjDJsDCWPA1AC/veHZawkgu/XvbcLFPZrBZUZlcvrw05kxaIYXAuvfiKStYGCy6nF4dSasfcb2lizL5F1wAVkzj8SYk9PjEOmxqhlZZ11bgNofllH51ltUffAhTL0aIlPcE7vAIWRZpm7FCsxFB5AMAXaPsZmQhaeR/Ndi5mm3si3meDj7Y6WCa9btInF7EBELFjD0yy8Y9MzTvg5F4AGGdWWyqEok7FoqKrP6OOVN5Zz93dncvfZuskvrkGWICtYTE2L/c9AqWzlQf4DSxlIvR+oE2z6F7Z+BRgdnvKvIGwnQhIehT0lhZ7yR9UXraTb30Pkj6BWhBh3XHp3JmjuO5rqjMwgJ0LKjsJaL397CaS+vZ312ua9DtIu1oYHp66pZuvcknjjyibYdGg0kjhEJ235ETaOJ8voWoM0M2S+pPQDVeSBpyA1VFuYGhQ7CoO3YYq12jqjyP0G6IJJCkoB2+s3ps5V7RuVeRdtU0HeQZchuTdpmzPXstSSprdp206tgavLs9QY6xz8JF30PU6/p8dBpydO4b+p9nJxxsufj6ueIpK3AZUwWEznVOWRVZfk6FMcwNUP2cmU7fbbtbWttLdqYGCS9noCUnhOsqqaUPTMygIiTTyJ07hyizjuv4w4hkeAVJEli6OefM+iZpwmbNcvuMe0rbavrm3jKdDq7pj4Ogye0HaQmbq3900Ha3eji4ggcPRqpGydQQf8lPS6EgNjl1Ec/x4r969p2DJsDWgNU5SptT4I+S051DjsqdvB76e+25HtGfGiXBlVPbHmCeV/M4/2d73szTOcYcypMXATHPwHp9j/PByJx11xD6rLvuClwCVf8fAW1xlpfh+TXRATpuWXeCFbffhSXH5mOQafh97xqznl9E2e/tpGt+/tY661WS/WTz9Lw6ReYS0rsH2Mxi4W4fsDecuVZIzE8kFCDzsfReBBVginhEMrNDegkna1avD1q0ja/qpEmozI/HxpxkDl2YDikTlW21apOQd+geDvUF4M+BIZM8/z1DjkFIlKgoQz++sjz1xvISBKkTYewRF9H4leIp26By3y992tOWnIST2/tJ5V3+9YoLRJhSZA0zva2NiKCjJ9/YsTW35AC7FcktWdYbBcVaa0EDB5MygsvEHb0UcobdcWKQ+aLk8FsdPnLEPSMJjCQ8OOOs/vzNFqMFDUUATC0LJc3m49kseVUu7qPSuL2Tk+HKxD4nLBAPcEhpeiC97GxYHvbDkOokizTB0PZbt8FKOgRVbtSlX2B7qURUsKURUpbdVJfRKuHBU/BhH6gvetl8uvysWAlWBdMXFCcr8MZEMSEGvj38aNYc/tRXDh1CHqtxIacCk57eQMXvbWZ7QU1vg4RUOZAUWedRey11yJptZ0P+O4meGIYFP3h/eAETuHIZ7lf0KpnS8oUjht6HJvP28yjMx7tdFhMqIGYkABkua3jMS08DThIv1mt4swWEgl9CvXnMfTIbo2q3IZWr1R+BkWLRSpPUV+qeF84SLO5md2Vu2kyi8pnRxBJW4HLDA0fSrAumABNz4nOPsGuVn3TEccpLWIH4UjCFnqutO1EUJQyMa4tVNo8BR7DEY05s9XMbRNu49z0k4j+5kY+Nl5PhlRg12Fd0BFTSQmlTz/DgXvu9XUoAg8QFTAYgN0HtxOe8AzcnguHnOz9oAQOo1YZpYWntXvQ71pPXa1i6vCg2xeoKYRf/gMWk68j6ZOo9zk12T40YmiX1dQCzxAfHsh/ThrDiltnc9bEFLQaiZW7y1j4wlqueO83dhf7vrMq8Z67+fHoCK798x7WFq7tuLOhHJqrYff3PolN4DhqYnJYXIiPI/EweRuU19QpgGKoGRkYafdQNYG9p0T5OxsTO4ZJiZMYHDq47SBVLzV3tWiL70tEDoEh05VncW8x/iK46W+YeKn3rjmQ+PVBeGYM/PGBQ4fvrtrN6d+ezsKvFno4MP9AJG0FLnNEwhFsPGcjzx39nK9D6Rmrtc1IZ8QCl4ZSNW2LapppNJq7PM5SW0vVJ5/SnJ0LU65S3lz/vBKLwCNUf/oZ+y+6mLqVK7s8JlgfzHmjz+P/CnPRmBr4Rx5CQ2g64YH9yFDPR8gmMxWvvUb1l19iqXdw0ULQb0gJVVx8C+r2d9wRMRj0gT6ISOAMubWtSduINIeqs9Ij0gHIr83HZO0jCVJjA3x0Fqx9Gpb9n6+j6ZOUPPIoexecQN33SsLNXguxwDsMjgrmv6cdyvKbZ3HK4YOQJPjx7xKOfW4113/0h+OL+x5iZ+VO1hWt45+KfzruGNk6D94lkrZ9nb2qCZm/FxbMuAkmX+lQy/zwVjMyVdd24bCFvDH/Dc4ceWbbQfGjIXwQxGRCbZFHQhb0grGnw8Xfw/gLvXdNfRAEBHvvegOJumL462Mw1kPMMIdOaV9gIOgZkbQVuIxG0vSf6o6iPxQNnYBQGDqzw67iBx6k4KabaNrxt0NDRYUEEN1q7JLThUQCQMl/H6P4vvuo+vAjxUkxIEzRhMz6qfdfh6Bbar76isaNGzHm5HZ/4O5lsOs7rJKOu02XMCwh3DsB9nMCBg8i+sILSHroIaFt64eMbJ1wVZoKuj7I2OilaATOolZepoSmkVuu3Jvsyr60Eh8cT5AuCLNsprCu0Bshdo/VCl9dAcXbIDgGpl3n64j6JE3b/sK4dy9lTYoJlnjw8T1psSE8c+Y4frrxSI4fm4gswzd/FXHMM6u5/fO/yK/0zefmiSnH8d/4y5mTOqfjjsx5IGmh9G+o2ueT2ASOoSb+M/zZhAyUTp7jHsMYEsu5S8/lzjV3dmmwmJnQakZW0k1FuyTBNZvhqrUOJ5MEfo7VCnt+FLIw7mTTK2AxQspkW5V8T9jMwMWCs0OIp23BwCLrR+U1Y04nDZ36Vauo+2EZcrPj7TPpsa0SCeVdJ20jTjoJQ2YmgaNHQWCEkrgFWNcPKpP7KclPPkns9dcRsfCELo/5q2gT//x4G82SxKbEs9gjp/i/VpgbSbjzTiJPORlNsFi19jcOT8oEwERtZ2Oj3NXwwkT4zIvVEQKHaTY3U1SvVBPpLAmYrTIhAVqSIrqukNZIGoaEK9XVfUIiYcVDsPNb0AbAWR9CVJqvI+qTpLz4IimvvsLmQcqcRTXhEfiezIQwXjp3PN9dN4M5I+OxWGU+/a2Ao59ayT1f76C4xn4SyhPIRiMRJ11H+k0vkdp8UGt9cHSbUZPahSbocxjNVva3Jvz9vtK2lbzaPLaVb2Nl/koMWvuap+qcXa20VWk0NdJiaWl7wzAwvmf9hvwt0FDhu+uvegw+PANWPOK7GPyJ5lrY8qayPf0Gh09r778g6BmRtBW4hQ92fsBJX5/Emzve9HUo3TPzVjj/a5jW+UMl8d57iL/9dgyZmQ4PN6x1xXtvadetb8GTJjL0myVEnXWW8saUq0Cjh7z1kL/ZqfAFjhEweBBxV1+NLq5rU5bHVt7GmRGwOmYwb+vOAAaAwYNA4ABjkhKwmpS2w5yqg6rVg2OhfA/krIIWIY3R18iry0NGJkwfRlm14jA+LD60x26YoeEHuW77ir8+hjVPKdsnLna4YmMgoouNJeTII/nHqlRHi0rbvseYQRG8cdFEvrx6GjMyYjFZZN7buJ9ZT6zgoe/+oby+pedBXEQKCCBgyBC0kZGYiuy0h488XnlV/R4EfY79FQ1YrDKhBh3xYV4wbfIV2z+HfevA1ExCSALPzH6G2yfe3uX9S5VHyKtspNlkAeDqX65m8oeTWVe4rvMJxkYxb/E1sgyfnKcYIOZv8U0Mh54BkkbpeC1xrLtW0A2/vwMtNYoEyXDHNYqFPIJziKStwC00mZvIqckhqyrL16F0jy4Ahh0Fg8d32hU6axYxl1yMNiLC4eHS43qutJUkqeOEIzwZDmvVW/qtjye5/ZXSXUTVHCDKYmHotJv5p0KZ7HXXQizojLmyktrvv8dc5bhbqKDvkxwZBKZ4AH4v3tNxZ/wopfLR0gJ7f/V+cIJuaW9KtbdVtseRxag+YUaWtxG+aZVCmHEzHHaW72LpJ1S1VFFrrEVCslVLC/oeR6RG8f6iyXx02RQmpkXRYrby+tpcjnx8BU/8uIvqRqNHrz/k3XdoWPIi34fmUNNS03HniNak7f710Fjp0TgEvaO9CVm/kaNzFqsFvrsJ3j4eynYRFhDG3CFzOSXzlC5PiQkJICpYjyxj028PDVDudwV1B8k7/XwvPJYGf37oqa9A4AjF2xWZQn0QJB3qmxhihsGoE5Xt9Yt9E4O/YDbChpeU7enX2zV4t3ua1UxeXR4gKm0dRSRtBW5BXSVRHxgHCukOVNqqyFYrDRs3YS4rg+k3woKnFDd2gdto2LSZA/fdT9P2Hd0fGJnCixlnszpkPIMOuYCCKqW9NDOha4d1QWfyFi2i8OZbaFhrp6JB0G/RaiTCtEkA7CjN7rhTktpMHIXjeJ/DVrngoAmZijpp9mmlbUudIokwaiEcfY/v4ugHVH/9NVUff8y+PUqlUnJoMoE6YRLY15k6LIZPr5jKO5dM4tDBETQaLby4Yi8zH1vB88uzqGv2jBGgNiKCu9bdzX82/Ifdlbs77oweCoecAkfeqlTBCfoc6gKcX0sjlO6EllrQh0DCGIdOkSTJNm9X73e3jL+FNWeu4YJDLuh4cFCUstic/bNbwxY4iernMnRWJ5lCrzL9euV1+2dQne+7OPo7hb9BQymEJsKhZ/Z8fCtF9UWYrWYCtYEkhiR6MED/QSRtBW5BTdrm1uYi99VJ31dXwbI77X44N+/aRePvf2Cp60bM3g7DWittc8sbsFq7/7oLb7mFvIsuovqrryE2EyYuUlYaBW6j+tNPqf7kE2q++qr7AwNC4JgH4Ix3ySlvQJYhup2xnMAxQqZOxTBiBJJW3Er8jcQgpWovp9pOEk9tp92zDCxmL0Yl6Am1UjYtPM2m85cZ3/NiVJ9YeM08BhYth1NedbhaY6BS+c67FN//H8p/3wSI9sL+hCRJzBoex5JrpvPa+eMZmRhGXYuZp3/ew5GPr+DVVXtpMlrcfl11YcZuNf2/3oaj/g0hMW6/rsB11ITkMH82IcvfqLwOngBaHd/s/YblecupM3b/XKZ2yO1pNSNLCEkgMjCy84EZxyivuWvA5D1NacFBZP+ivGbO9W0cg8ZD2kywmmHjy76NpT8zZBpc/yec+ppTSXj1PpQanopGEvM9RxDfJYFbUP/oGkwNlLc6GfcpGith2yew8SWQO0+GK954k/3nnEPVRx87NWxKdDA6jUSTycKB2u4nAaEzZqAJDe2c5LBaweS4+ZmgayLPPIOIk04k4tRT7R9gbASrtW1hQZKcqkYTdCT+lltIX/I14ccf7+tQBG4mvbVdvrTZTgVCyhSlaqWpCvI3eTcwQbeoSdchYUNsLbWOfLaprfVVLVVUN1d7KrzOWC1Q205nM36ksqgm6Jawo48mZPp0spSCeOG+3A+RJIl5hyTy/fUzWXz24aTHhVDVaOLRH3Yx8/EVvL0ulxaze5K3sixz/LclPPiumQM5PXQiCfocbfIIfjxPzWudS6ROQZZl/rv5v9y44kYONBzo9jRV1/ZgM7JOJBwCYclgboL9a90RscBZmqrbvFzUJLovmX6j8rr1bWU+K+gdkSmQPsupU9SuLiGN4DgiaStwCwHaAAaFDgL6iPv0wWT9pCRr4w+x60StDQtDl5SEYVi6U8PqtRqGxAQDkFPW/YQh/IQTyFy7htirrmp7c89P8NJkxclS4DIhkyaR/NhjBI05xP4By/4P3pzHM2vu4tgvjuXT3Z+KpK0LSKIazm8Zm6AYMtZbirFYD0ocaHWQOV/ZFhIJfQZZlsmtVSbCwZpkmk1WArQaUqJ67ugI1gfbWtS8eg//6W54ZYaiZytwmLjrriX1jdfZqSsF2ozkBP0PjUZi4WHJ/HTjkTz5r8NIiQ6ivL6F+7/9h6OeWMlHm/MwWawuXUOSJJJ2VzCiEFr++cf+QS318M83iuakoM8gy7JNgi0j3o8XtNRK25TJVDRXUGesc0irW620Vefysizz+JbHufKXK6lqbpeIk6S26s6sX9wevsABclYoz+KxwyGqD2iwZ8xR8gLR6VDb/eKAwA5V+3p9qjAhcx7xxC1wG31CE68r1MTCSPsVgYn33kPmil8JmzPH6aFVXducsq7NyAA0BgOawIM056wmxYl9y5vQXOv0tQVOkLdJcbgs2MLemn0U1iuO21mlSkuVMCHrPbIsY23o/vdf0L84InkoslWHLJkpqrfjOD72dDj8PMic5/3gBHaxyBZumXAL548+n8aGSEAxy9Q5KF+iTp7z67yk7/bbW0r3S2MF1IkHpt4QFhBGdGC0qLT1A3RaDaePH8zym2fz8CljSAwPpKimmTu/3M6cp1bx5e8FWHqQ4eoO6fzTeO5EDZtja+wf8PM98On5wiC3j1Fc20yD0YJOIzEkxk+TtrUHoDoPJA0MnmjrGEkOTcag7b7lOiNBmbvvr2ig2WRBkiSW71/OusJ1nZ9H1epOoWvrG9RkeV+osgUlkX/BErhyDSSM9nU0/Yv8LfDcYfDJeb3SQrdJeYm5i8OIpK3Abdh0bfta0tbcAtnLle0Rx7l9eLVdaW8PlbbtMRUVYW1pgeHHQUwmtNQoCUVBrzDm5VHx1tuYy7uQ5rCYFFdagHHnkWuqBpSFBlFp6xp1v/xC9qzZFP37Ll+HInAjw+LDMZYfRXPxQiwWOw9NmcfASS863RIl8Bw6jY5/Df8Xt0+8nf3lihu9M8Y1/5n2H9aetZaFwxZ6KsQ2clbB97cq20fdrRghCRzCVFKCbFZklh6d+SirzlzFpMRJPo5K4C4CdBrOnTyElbfN5t4TRhMbGkBeZSM3f/oX859dzdJtB3r0ULBHyilnse4QDXsoocXS0vmA4a3z490/CEOyPsTeUmVBPDUmGL2/+geoVbbxh0BguFMJnbhQA5HBeqxyW/GMel6nrpH02aDRQUU2VOa4JXSBE8y5R5k3HnaWryNpIzROSd4KnGP9c8qrIaJX3z8hj+A8fvrpL/AFXd4kfU3uajDWQ1gSJB3u9uHTW83Ieqq0VSn6vzvJnjOX+uXLFbMV1cFyw0tgNro9voFA9RdfUvrYYxy46277B2x6BUr/hqAojEffbauyTQ5JZV9FI+CYWY+gM9roGMylpTT9+WffNSEUOE2oQUe0aQGmqulU1ul9HY7ASXrTQZAcmkyEIcJTIbVRnq1U9FnNMPYMxbVe4DB5l17K7gkTady61faeJB46/Y5AvZZLZgxl9e1HccexI4kI0pNdWs81H/7OgsVr+eWfEqfuuTGBMYQFhCEjs792f+cDhh4J+hCl6r3oDzd+JQJXGBB6tiNPgEW/wvyHgXYJHQdkXyRJst3n1Ptel8aageEw5WqY/4iSbBJ4l7BEpUMr6VBfR9KZljql++dgOTBBZ8qzYed3yva065w+vcHUQG2L0l0s5BEcRyRtBW5Dvbn2uUrbXUuV1xHH2XWkrv7yK3JPO52Kt97u1fDDWpO2jlba6pISQZZp3rlTeePQMyE0AeqKYMfnvYphoGMYnknQYYcRccrJnXfWFMCKR5XtYx4gz9KAVbYSqg+lviEIi1Um1KAjIdxx10tBG0Fjx5D61psM+3GZSBz4GT0uSFmtULgVtrzuxagEXbGtbBu/l/xOnbGu73YQNFbCh2dAcw0MngQnLhZVLk5gNRoxl5YhNzejT031dTgCLxAcoOOq2cNYc8dR3Dg3k1CDjp0Haln07m+c/NJ6bvrkD577ZY/dc59fnsUzPyv7JEniCGMy0/6xkpdnx4xMH6hoPILQKu9DOGMo2W/R6mHweFvnjlr842gVXqZqRlZS3+E8u8+j8x6EqddASIyLQQv8BqsVXpkJ393YljMQdM2GxYCsdGfEj3T69BB9CJvP28x3p3xHiN5PJV88gEjaCtyGWmlbVF9kv/XKV4TEKVW2I+zr2Tbv3Enz339jLivr1fDpscpE6kBNMw0t5h6Pjz73XIb98gvxt9yivKEzwJRWc7J1zys3D4FTRCxYQNonHxM2f37nnT/cAaYGxfF+3HltbVfhaextTUZlxIeKhGMvkfR6QqZO7azXLOj3pMUGoAksYE3BOvsH1JfA/46GpbdCXYl3gxN04uW/XubCZReyLHeZzUnbmQf9ZnMzj21+jKt/uRqT1eSZIFc9BpV7ISIFzvpASRQJHEYTEMDwjRsYtuwH3i/5jmO/OJY3dwgN0oFAeKCeG+cOZ83tR3HV7GEE6bX8lV/NV38U8cwvWdz++bYOxz+/PIunf96DVtM2tzn7/QJuXGKlevMG+xdR58m7f/DUlyFwEnUBzq8rbQ/C2dbpTpW2fbXzcyDz412w/gVoqPB1JJ3RaBSfBoB1zwp5mO6oK4E/P1K2p9/Q62H0Gn2PJoOCjoikrcBt9Nh65SuOvgtu3gnD7JuMRV94IYNfWEzEib3T8YsKCSA6JACA3PKeJRJ0sbEEDB7U8c3xF0NAGJTthH1rehWHwE6LaEsd1OSDpIUTngaNpoNWlroq79cVDAJBLwkLLydk6AusqX7e/gHhSZB8BCDDHvGQ72tig2JJDkkmSj+IumYzGgmGxoZQtvgFyl56ye45ZS+9RNniFwAI0AbwRdYXrClcQ2FdoWeCnHMfjDsXzv4YQuM9cw0/R9JoCEhLY2/1XgrrCzFahKzSQCIqJIA7jh3J6tuP4pLpQwnQKY9yn/6Wz6zHV7CjsMaWsL35mOFcPyfTdm7zIUPJSoKS5i6KFIbPV+ZLJTtccgYXuI82eQQ/rUgr2ArfXAf/fAOA0WK0SZg52jqtypvZKm1bOz/z6/IxWewsQNYVw+/vwYG/XAxe4BBN1bDxZfjpLjDW+Toa+0y6ArQGpXts/3pfR9N32fwqWFqUTqnUKb6OZkAhkrYCtyFJku1G2UlHyNdIkl1pBICAwYMImzuXwJHOl/irOCuRoGJtaVE0yYIi4dhH4fyvFV0xgUNY6uup+3UFsqmLqjBDGFy2Ai7+HhIOATqu4Ge3/ryc0X0UdEY2Gql48y3yr74Gq1EkEPyF8ckjsJpDwRTfdWJoZGtl1i7RTutrHpz+ID+e/iPB1uEADIkJwaDTglZD+fOLOyVuy156ifLnF0OruY1G0nDNuGu4b+p9RBoiPRNkQDCc/BIkjvHM+AOIWybcwtvHvs2C9AW+DkXgA+LCDNy7cDSrbpvNuZNT0Uiwv7KRExavtZuwBZBvupS7LtKxdmgX3XDB0ZA6Vdne10WHhcBr1DWbKKlVflbOmEr2K/b+Cr+/C39/BSiJVqtsJUQfQmxQrENDDE9Qvjf7KhpoMVuID44nWBeMRbaQX5/f+YRfH4Jvrm2rGBR4lpyVIFsgdjhEpfk6GvuExsHh5yrb657zbSx9FasV/lmibE+/odfSVs9ufZa71t7FjnI7Mj2CLhFJW4FbuWTMJfx35n8ZFz/O16EoFG71iqi4KpGw10EzMoCS/z5G1swjad6+XXnjiPNh2FFC388Jan/4gYKrrybv4ku6Pkij7bAaqC4opIWnkVXSataT4KeTYW+h11Px1pvU//orTX/86etoBG5idGIcDVl3U5d7GRp09g8a0ZowylkJRsc//wSeI+ugdtq4q68m9vrrOiRu1YRt7PXXEXf11bZzLzzkQk4ffjqRgZHuC2jPT7DiESH94wYKb7mVkscex1xeTlRgFOMTxpMSluLrsAQ+JCkiiIdPGcuq246yvafTSJ0SttBO67M2t2sTs/kPw/V/tCUwBD5D1ZOPDzMQHuinhqD5G5XX1nl6exMyR2XL4sIMhAfqsMrK90ySJFvrtd0iosxjlNfsn10KXeAgWa3f54xjfBtHT0y9FpAg60co+cfX0fQ9NBq4ci2c/EqXkpOOsDJ/Jd/s/Yaalhr3xTYAEElbgVuZM2QOC9IXEB/cB9oeK3MVvcWnR4Gp2e4hppJSapYsofkf1z6ch8Wrhj2OV9qay8ux1tZS99NPdgKzH6/gIMxmtDExhMw6qDq5dBf8+jCYmjq8LcuybUKYGjaEnFY5i4y4MK+E669IkkTMxZcQf8cdBAwR5jj+QnJEEIF6DSaLTH5Vk/2D4kcplROWFqViRuATrHJbQlTVQGy/GBV39dXEXHM15c8vZufYQ+0mbD1CyT/w+SWKlu3v73j2Wn6OpaaG2qVLqXzrLSRdF4soggHLV3+0SZqYrTLPL8/qdExKWArhAeGkhQ2hvqWLNuXkcRCd7qEoBc7g93q2Vivkb1G2UyYDdJAwcxRJkhiumpGVOmBGlj4bNDqoyFaeFQWeQ5Yh+xdlO3Oub2PpiZhhMPpEZXt9F7JgAx19EIw7u8vuZUe4/ojrue7w6xgRPcKNgfk/Imkr8F9UI4XY4V2anTT9vpWiO/6P4gcedOlSaqVtly7rdohZdCmpb75B3E03tb0py/DLf+CpEVDeecIt6EjU2WeTuXIF0eed1/amLMPSW2D147Dszg7HVzRXUGeqQ0JCY47DaLYSqNcwKCrIy5H7HzGXXEzMxRehT0z0dSgCN6HRSAxt/WzLLq21f5Akta24C4kEn/H69teZ/cls/rftf7YH/YyDHvQltbLOZELS6+0mbJvMTWwt2cqKvBWuB1VfBh+eqWjYDZmhaNkKeo9WR9LDDxNz5RXkyuU8ueVJftz3o6+jEvQBVA3bWcOVdvKMuFCe/nlPp8RtgDaAL3KO54H/FiKv2eSLUAVOYNOzjfdTPduyndBSA/oQSFAkc9Qkq6N6tirqIqXaQdetGVlghGJODG0JRYFnKN4O9cWgD4Yh030dTc9MvwEkDVjNwpCsPbVFbuuWOjr1aC4/9HKH5U8ECiJpK3ArJouJtYVr+XDnh123XnmL3a0JhG5K+DXBwQRPmkTQ4Ye7dClVayqnvB6r1bGvO3DkSEKmTUPSatvelCQo3QnN1bB+sUsxDRQkvR5NULuk618fw/61ygRh5s0djlXbpJJDk8mrUHRw02NDO7grCwSCNsJjdhIy7L+89Pf9XR+kfsYWbBGTXB+xr2YfFc2KK3OWnUpbgEZVukSjQTaZ7JqT5dbkctGyi7hv/X2uBWRqhk/OhZo8pWrvzPdAF+DamAMcbWgIkaedSvyNN/JX2V+88887fJX1la/DEviY9qZjl80cBoBFlrn5mOF2E7fWpiYsFRU07/i760GL/oCPzoGvrvJk6IIeaDMh89NK27xWaYTBE0CrdA+EB4STHJJMeqRz1d4ZXZiR2a20hbaqzywhkeBRVAmKoUeCzuDbWBxh0Hi4cQec9rqQK1SxWuG9U+GlyUoSXuATRH+VwK3IyFyz/BqsspVjhhxDXHCcbwJprGxzfxxxXJeHhc6aReisWS5fLiUqCL1Wotlk5UBtM4MiXajcnH6D4sT+10dw1F0QluByfP6GbLVi3LcfQ/rQjjsaKxV3UoBZd0Bkx1b93No2E7KuEhuC3mNtbqbp99/RxcVhyOyspyfofwyKCOcfSzXFzXldH5Q6FS5aqlSuiEmuT1AfTOMDUyivbzWuafegX/bSSzRu2GCTRLCZkEGHilu1uqmqpYrq5ureadvKMnx7PeRvAkMEnP2JYnAkcBu2ajQnWogF/onFKttMx9S//X0VDSyaOdS2vz3RF1xA1L/+RcDw4V0ParXC7qUQEAoLn+0fyRY/xNY14a8mZPmt1d7tfCfumHQHd0y6w+mhVDOyrFKl0laVR7BbaQuKvuov90PuamWRsYuOTIGLNFWD1gAZfVwaoT0Rg3wdQd8i+2elKj4gDCJc09DfUb6DssYyRsWMIjFEdGY6g6i0FbiVAG0A05KnMTd1Li2WLtxpvUHWz4pTZfxoiB7a8/EuotNqSI0OBmBvqeO6trIsU/H22+Sceiqm0lLlzdQpMHgSWIyw6RVPhNvvady8mZzjjyfvsss77lj+H2isgLhRMPWaTufNGjyLZ2c/ywWjL7BN7A5uIRb0ntLHHyfvkkup+uRTX4cicBNj45Xke4OlGEtXpo5aHaTNsFXKCLyLLMttD6YmZaE0OSKQEIPy87BnOmbPnAwgWB9MQrCyUNjlw25PrH0atn0CkhbOeAfiukkOCRymbsUKTAcOdPh5q9VkgoHLTa0JW4DYUAOxoQZkGfaU1HP9nExuOqbj39/WkDLOzL2HGzfc3vWgyYdDaCIY62HfGk+GL+gCk8XK/opGwI8rbevbPfe4SGZrpe2+ikZazBZSw1M5cdiJXHTIRZit5s4nJBwCYclgboYDf7l8fUEXzHsQ7tgHh53t60icp2KvkM8AWPec8jrhIgiKdGmoz/d8zvUrrufzPZ+7HNZAQyRtBW7n5bkv88xRzzA4bLDvgti9VHntRhpBlmW3SjiokypnzMgkSaJu2Y+0/LOT2m+/Vd9Uqm0BtrwBXZlFDGCad+4CrRZ9cnLbm/mbYevbyvYJT4O2s9NufHA8c4bMYWryVFtyXVTauo/gyVPQJSSgCRPfU39h/KChyFYdsmSmqL6o5xNkWUgkeJnypnLqTfVoJA11dZEAZCS0M1e0WG0JW1NJCWWLX6Dk0UdtiVssHXXK1OrNLttKeyIsCTR6OP5xGHZUz8cLesRcXk7BVVeTffQcrA2NvTLrEQwMRiYqf/u7i+3rkBu0BnJrcsmq6sY3QaOBEccq20Kr3CfkVTZitsoEB2hJivDTKtALvobb9tq0Tl15JksINxAWqMNildlX3kiQLoiHZzzMorGL0GnsLChLkiLbc9teSJ3c6+sKHCAgGAz97Llg31p4YYIiETOQzcHzt8D+dcqcbrLrcjmiS6j3iKStwP8wt0D2cmV7ZNdJW3NpGXsmT2Hfuechu0FcO701abvXCTMygJjLLyPxvnuJPO20tjdHHA8xmYpA/1bhuH0wMRdfRObKFcRedWXbmz+2yiKMOw+GTOv2fFmW/b/tzAeEzZ1DxsoVxN9wg69DEbiJjPhwrMYYALaX9mCO+ONd8MwYKN/jhcgEKmoCb1DoIPaVKx0u7TsI4q67FsOwDLLnzKX4gQcpf/FFqj78CGtzM3FXX03cddd2GM+mBVjby6TtuHPg2s0wcVHvzhd0wlxRiWHUKAyZmViDAiioKwDaWoAFAhU1abvzgP0F/1Exo3g96TZezj+axj/+6HqgEQuU190/iIU4H6AWFgyLC0XyZ9mhkFib/MbnWZ8z65NZPP3b004PI0kSma3z+T0lDha7DJ4AITFOX0vgIC4WHZUtfsGu9j4oHURli19wafxuSZmsVGI3lMK2jz13nb7O+tYq20PPcItshK1LSMxdnEYkbQUeQZZlalpqfHNxjR4uWKLowSZ1bTBmzM3BWluLubwMSeP6n8KwOMXdNafc8UpbgLCjjybq7LPRRka2vanRwPTrle1Nr7rNsdGf0MXFoU9sp4dz2usw9gw45gG7xxstRl7f/jrL9y+noLqBBqMFnUZiSIyfuvL6AEmr9e+HiwFIcIAOg6z8nf1RvLv7g8t2Q20B7FrqhcgEKu3dtrvS6m7ZvQtTYSHayAgizziDhLvu6vK+YnPdbjVudIi64rZWV1DMxwRuI3DEcNK/+pKhX31Jfl0+FtlCsC6YuCAf+QYI+iwjk8IB2NVFpW2IPoTUNVk0v/IW9StXdT3Q0CNBHwJ1RYoxmcCrZNtMyAbOHHVfzT4qmysxWU29On94a4eJeh80WUzkVOeQXZXtthgFDiLL8MJEeHm6IjPQG7SaThJO0Cb5hNaDaSytvk1mb/1i6EoezJ8pz4ad3ynb065zebialhoqmyuBNv8EgeMIATqB2/m74m8uWXYJ0YHR/HDaD94PQKNRVk8HT+j2sKAjjmDokq+x1rlHfiDdJo/gXKVtlxx6JpRnwYRLlK9JAChmV5pAO61iUUPgtP91eV5ebR7P/f4cofpQHh2/BIChsSHoPXnTH8BYm5rQBLlgyCfoM0QHDKaUv9hT2UPl5YjjFMOC3T/AzJu9E5zAlrQdGjGUr7voIIi+8EKCJ09BGx5G4KhR3Y7Xo4HLwRgb4cMzFT3xcz+D+O7HF/QeSau1VUCnRaSJRTJBJ9RK213FdciybPd3JGT6dKxNzQSOOaTrgfSBkHE07PxW+UwfdISnQhbYYW+p8izht3q27ywEJDj2UUVfFrhm3DUcn348ofrefc3qfS+71bPi86zPeWTTI8wePJvFcxbbP2n75/DbmzD2dOV5S+AeSnZA3QForoHw3lVoqhr87U1T7Wn0e4wjLoBVj0FFNuz+HkYt9Oz1+hp/fwnIMPxYt8zr1DllfHA8IfqBsxjlLkTSVuB2EoITaDQ30lTfRIulBYO2b7rOagICCBwxwm3jqavhB2qaaWgx20xgHEG2WKhfs4a6H5aR+OADaAIClHaheQ+6LT5/wJifT85JJxM+bx5JjzysVEiX7nToZqLRaFiQvgCtpLVJWAhpBPfTkptL4Q03YqmrI+PX5SKp4Aekhg2htBEK6vd3f+CI42HpzVCwRam6DI33ToADHHUinBycSmF1E9DZYFEbEUHI5EkOjafKI+TX5mOymtBrOuuD27Ba4esr4cCfEBwDOj/VXuxDtE/SCwQHkxEfilYjUd1oorSuhYTwzn+TOUcksiYhlVHREt16uo86EerLICbDY/EK7LNXrbT1x3mqsQH2rVMMow1t+uvB+mAOielmIaEHMlsrbfeUKN+7tPA0gnRB9jVtVWryFc3OgFCRtHUnWT8rr0OPVBaAekn7xG3Fy68gm0zeSdiCosM7cRGseRLWPgsjT1C0kAcKR94GKZMgKNotw6ndW8JAtXeIEjOB24kJjCFMH4aMTF5tnncvfmAbLLm2TdPWi0QGBxATEgBAbrnz1bbF995HzZIl1K9Yaf8AS+/ahfyJuuXLkRsbMZeXKwnbPT/BS1Pgm+t71FxLj0jnvzP/y8MzHratwmf642TYx+gTEzHm5mI+cABToQPGVYI+z4iYYQBUmQq6PzA8CZKPAGSlMkvgFdQknt6aAEBsaABRrfeirrDU1FC3fDnWxsZO+xJCEgjUBmKWzRTWFXZ/8ZWPwD9LFFmiM9+HaDEZdzemAwfImjWbghtuRJZl24OPaC8U2CNQr2VorFJEsPOAfYmELcVbeG3ba/ya92v3gx16Blz6Ixx2prvDFHSDLMs2TVu/LC4o3KokbMOSISLFbcMOb5UF2lfegNFsZVLiJDads4lnjnqm65MyjlFec1cPbMMpd6MmbTO6XRZyiKizzwaNBtlkQtLrvZOwVZl8BWgNUPgb7F/vvev2BSQJ0mdD0qFuGU4YqLqGSNoK3I4kSW2aeI62V7qLf5bAH+/B7z2bd1W89TY13y3F2uAmOQPa2pjUFXJHkbRaoi84n+gLL8QwPLPjzsoc+OQ8eO8Ud4XZb4m+8ELSPv6IuOuvU1pyv79V2REY7tTqp2pC5pcVDD5GExREyv9eI3P9OgIGuy5aL/A945OUjgQTtdQZe5CTUc0fdwvHcW/QYmmhqF5ZHGlpUgxVDm6nbcnNpeLtt2n8vU2XMvf0f1FwzbUd3lPRSBrH7uHbPoPVTyjbJz7fowGkoHc0bd+OuaQEY14ekiTZ5BFEpa2gK9pLJNhD/d0pKs7GXFXltbgEjlFW10JdixmNBENign0djvvJ26S8pk62zd0L6gp4aONDfLHni14PmxgeSJhBh9kqs6+iAa3GAZ+FhEOU5LG5Sam4FbhOUzXkt/6MM49xebjK996zafDLJlOX5mQeITReMVcNjICaHgoX/AVjIzTbX/BzBdEl5BoiaSvwCOofpPoH6jXU6q4Rx3d7mLW5mdLHH6fo1luxNrtvZTW9VSJhby90bWMWLSLhzv/DMPSgDzOtAXYvg31roOA3d4TZb5EkiaBx4wg69FClXaV6P4QPhln/1+O5xQ3FmK1mZFm2tU5lxof1cJagN4RMmYIu2j3tNALfc0hSPFaT8reSXZXT/cHqZ2/OSqUFUuBR8mrzkJEJ04dRVKG0gB5sQta4aROl/32M8ldetr0XPGECAenpyM1NdsdVqzi7NCPL3wxLWk06pt+oPNQIPELojBkMef894m+5RVTaChzClrTtotI2LTyNs1dauOOe7VS+9VbPAzZWQvYv7gxR0A2qCVlqdDAGndbH0XiA/I3Ka8oU21u7Knfxye5P+GzPZ70eVpIkMlrvf1klDhbPSBJkzFG2xe+4e8hZqVRSx2RCVJpLQ5W9+CIVL72MYeQIkp94nJgrr7BrTuZRjr4bbvp74HQcbH0LnjkENr7i1mHF3MU1RNJW4BF6fODzBFX7oPRvkLSQOa/bQ61NTUScdiohM2agdWNyqS1p61ylbbdEDFJa1ADWPee+cfszZbth3fPK9nGPKbpD3SDLMqcuOZVJH0zi9wN7qGkyKV0fA8iVVyDoLYnhgUhmRZ9264E93R8cPxrSZsL4i8BkPyEocB/tKxdsWt0HVdrqEhMJO2YuIVOm2t5LeuA/DPt+KWFz7bcu2hZea+0svMoy/HI/WFpgxAKYc58bvhJBV2iCgwmeMIHQGdOpaqmi1liLhMSQ8CG+Dk3QRxmZGA50XWmbGp5KeYTyCFhfsK/7wWoPwBMZ8MEZ0CSqcr2BX/suWK2Qv0XZTp1se9tdrdOq7NmeEuV3/71/3uPkr0/mvX/e6+ak1mpQtaVf4BrZrd9HF6tsy156ifLFLwBg3J9H+HHHEX/jjcRef513E7chsR20l/0aiwk2vAgttS5pER+M2Wpmf53iiyEqbXuHMCITeASfyCPsam3HHTINgrtPxOqiokh+6CG3h6C2peb0otJWpSUnl/pVq4i+6MK2tp5p18GfHyguvhV7oVVjcqBgbWhg33nnEz7vGKIvuQTNdzeD1QTDj4ORC3o8v6K5gjpTHRIS9fXKjTc1OphAvR9WMPQRar//nroVK4m59BICR470dTgCF9BoJMI0ydSzl79Ls7s/WJLgou+8E5iAyUmTeWXuK0hI3P2RqoHY8eEibPZswmbP7vCepO/GXAxl4TVYF4yEndZSSYKzPoAVjygJW41Y//cWRouRBekLaDA2EChM3wRdMDJJ+QzYW1aP0WwlQNfxbzRAG8C+SYO5dEQBz51yPundDRaeBLHDoWynktRSiwgEHkPVsz1Y6sYvKNsJLTWgD4GEsba3bQuQLpoUqR10qgxavbGevTV7yarK6vqk9Nmg0UFFFlTmCm12Vxl5AlgtyqsrWKxEnnkmpvx8pKAgJJ2StrJp2lqsLgbqJLIMe39VzK/Dk717bW+x4wuoLYSQeDj0LLcNW1RfhNlqJlAbSGJIotvGHUiIpK3AI6g33dyaXGRZ9o6DvKqh2IM0gidJb51g5ZbXY7XKaDTOfd2W+gZyTz4Z2WgkZPIkAkePVnbEj4Lhx8KeZbB+MSx81s2R921qf/qZlp07qWlsJGZKBOxfC7ogpcrWgd8tdTKYHJpMXoUR6FyNJnAvNd8tpf7XXzFkZoqkrR8wImQOa3PTSB58pK9DEbQjwhDB9EHTMZqt7K9cBnSWR+gJ1dyjPfPS5nHc0OO6vncHRcHxT/QqZoHjmEpKqP3uO4IOO4zgCRNIDEnkvzP/6+uwBH2cQZFBhBl01LWYySmvt1XeticxPp0sUyG5NblMTJzY/YAjjlOSbbu/F0lbL6B26/ll0tZihGFHgz4YtG1pCFvrtKuVtqo8QqvhsENyfYERSkwanegQcgcjjlP+uUjcddfatmWLhZacHIx5eYTNnu1dMzKV726ErW/D1Gth/sPev76nkeW2jt4pV7m10lYt4ksNT0UjiYX+3iC+awKPoP5R1pvqqWiu8PwFGyvbXB0duFG4U8e2PSlRQei1Es0mK0U1zt/4taEhhM2fT+isWciy3HHn9BuU1z8/hPpSN0TbfwifP4/kx/5L7HXXIRlCISQOZt0OUY61h7Zvu1JX3zOcTGwInCPixBOJueIKQmdM93UoAjcwLmEs5vpDKKsKcuwEiwlyVimdAQKPs6+iAYtVJsygIz7MYHvfajRibWmxe07111+TPfcYSp96utM+nUbXOWH7872w6TW3xi3onsYtv1H6xJOUPC4S5ALHkSSJETZd2+7NyBzynlA7mrJ+AbP9zxOB+7BV2sb7oYRX8uFw/ldKt0Yrsiy7zWAxM0H5vc8tb8BksTre+XnOp3D2R5Aw2qXrCzyDuaKCnOMXUHD1NR57hu+REa2fg1vfVszW/I3sX6D0HwgIhQmXuHXoZnMzSSFJpEd029ch6AaRtBV4hABtAINCFed4r5iR1eRDdLqip+hAW8v+885nz9RpNGze7NYwdFoNQ2KUSVZvJRKSH/svKa++QtAhh3TckToVBk9UdAS3vOFqqP0KTXAwESedRMQJC2D0iXDtFmWl00HUFfyh4UPJKhUmZN4g/Nj5xN90Y1u1uKBfo1b8OGyy+N1N8O6JiqGBwCPIsswrf73C0pyl/HOgElAWo9onWxvWrGH3EeMpuP6GTudLOj2mggIaf3PA4HLrO0oFxg+3QdEfbvsaBN2ji41pXchVKtzLGsswW80+jkrQH1AlErrStU2LSOOQ/VYyXlxG5fsf2D3GRvIREJoIxjrFFFfgMRpazBTVKEkpv6y0tUNFcwV1RkXCLDUs1aWxkiMCCQnQYrLI7K9osGl/V7dUU9XcjSazNzpCBwJ/fgQH/lKqNl1ENhqRTSYAdHFx6AcNInDsGCwVXigGs0fmMUqewVgPv73pmxg8iVplO/4iCIp069Dz0ubx0+k/8diRj7l13IGESNoKPIZqRuaVpG3SYXDdb3DxDz0eKssyxn37sFRVoYuJcXsow1w0I5O60geUJJh9Jxz7mKJxO5AJigJdgMOHqyvsQyOGtlXa+qPBg0DgIdLjQtCG7iS7ZQlljWU9n6C6Me/63i2Td0FnypvKefHPF/n32n/b7jcHy7605OSAxYImqHObW+iM6aS89iqpb79td/yX/3yZk74+iW83PglLb1benH2nUikl8AohU6Yw+LlnibvmGgAu/vFiJn4wkT9L//RtYII+T5sZWa3d/WnhaSRWwuhNxdT/+mv3g2k0MOJYZVv1jxB4BLXgIzY0gMhgx+e5/QJjA9SVdHpbLaxIDk12WatbkiQyWqtt95TUE6RrM356AAEAAElEQVQLIikkSbmOIz4rVfugOs+lGAYszTWw5Bp49Ujl++gidStWsnvSZA7ccy+SJDHsl58Z+skn6AcNcj3W3iBJbV2vm14Bk48qfj1BZS7sX6dIhEzxnPSEkEboPX75nXvxxRdJS0sjMDCQyZMns7mHaspnn32WESNGEBQUREpKCjfddBPNviq99yN8YkbmwMqQJElkrlrJ0C+/ICDVtRVde6S7wYwMFH3b+nXrOr6ZMQemXAmGgZFwlK1Wiu78NzXffov89imw/fNeJYDUhYO4wMGU1imtfSJp63lkq5Xmf/5xrJJP0KdJjw3FEPcj1qjv2VK0vecTMuaCNgAq90L5Hs8HOACxyBZOyzyNY4YcQ26Z/c+1mEWLyFi5gtjrOi/0aSMjCT3ySLSh9ltwK5sryanJIXvra2A1w5jTYNYd7v9CBA5hsVqoaKrAbDXbkhACQVeMSupZHuGfVInPp2sIu+i8ngdU/SL2/CgW4jyIugCX7o9Vtlk/w1PD4cMzO7zdXsLMHWS23gezSpTvpVpEpCaHu+SX/8Bzh8GGl9wSx4AjZyXIFojJcIuZW/OO7chNTaBTTKO94o/TE2NOg/DBUF8C2z7xdTTuI3ooXP8HnPQSRPgoKS7oFr9L2n7yySfcfPPN3Hffffz+++8cdthhzJ8/n9JS+xqgH374If/3f//Hfffdx86dO3njjTf45JNP+Pe//+3lyP0Pp/SyXKGhwmmNLU1ICIGjR/fooN0b0mNdq7QFMJeVkTVzJvlXXIm5qzYQWVbcOf2Ypq1bqfnqK4rvuQs5ewUsvQWaq50aw2gxUlhfCIClJQ6ApIhAQg3Ch9HT1H73Hbmnnib0GP2AoAAtQeaxmGoOp7bRgeofQxgMbTUt2y0qszxBYkgi90+7nydnPWnrIDjYhEySJPSJiQQMHuz0+KcPmccrDTrOqyyDQRPgpBdFC6kXsTY1YW1stP1fq9Gy7ux1/HTaT8QHx/swMkF/YHhrtWFxbTPVjcZO+2MCY6hPDOfTIzWUj3Xg82HoLFj4HFz2q/gc8CB+bUKWv0l5jej4+6Y+J6om1q4yvDdmZADJ45TX7J/dEseAI6v1+5Y5zy3Dxd10E+nffUvMRRe5ZTy3oNXD1NZK1PXP+9dzeFQaHHZmj4c5S62xltmfzObiZRdjsprcPv5Awe+Stk8//TSXXXYZF198MaNHj+aVV14hODiYN9+0rz2yfv16pk+fzjnnnENaWhrz5s3j7LPP7rE6V9AzswfP5p1j3+GRGY949kIrHoLH0+G3vqGdOCze9UpbXVwchowMAgYPxlRU1PmAnd/By9Nh26e9vkZ/QJ+SQszF5xKVWY9GJ8MxDyjSCE6QX5ePVbYSog+hvFpJNokqW+8QPGkSmuBgdHFxyFarr8MRuMjowDNpLjoTqcXBDgW1Mku003oUi1Ump1y532TEOafVbamtpfK99yl+5KD7tMXMiJ8eZHppDnEhyXDWh6B30IRO4BZql/3I7gkTKfq/O23vaSQNSaFJfaPiSNCnCQvUMzhK+Zu1p2srSZKtstGh4gp9oKJ1GJbgxigFB+PXEl55G5XXlCkd3rZV2rZWxLqK6llhq7RVf89re/g9T5+ttIdXZCvt4gLHkWXFyAqUTis3IGk0yrPwEEWX2FxRQd6li8ieP9+3zxRHXACBEcrvSt0B38XhLhorPTr8vpp9VDRXkFebh17j/mK5gYJfJW2NRiNbt25l7ty2DwuNRsPcuXPZsGGD3XOmTZvG1q1bbUnanJwcvv/+e44//vgur9PS0kJtbW2Hf4LOxAXHcUTCEUQFOpdkcwqrFXb/oIiCRzhWSVTzzTeUv/wyzbt2eSSkYbHKRKu4tpn6lt4bhqS8+grpP3xP0NixnXeW74HSvxXRcD9uU9MnJhKftpv4Qypg8CQ4/Hynx1AfRtLC08gubU1s+ONkuA+iT0xk+OZNpLz4QtdazYJ+g9NmZCOOU14LtkC9/W4XQe8prC+k2dxMfmUjRrOVQL2GQVFtidWW7GyKH3iAmm+/63IM2Wym5OGHqXr3PcyV7SbuGq1SLWMIh3M+FokaH2DM2QtWK9qICF+HIuin2HRtD9h/TpmSNIV5yUcTnVdD03YHZG8EHqet0ta+bE2/xdgIxduU7dTJHXbZzIIj3FNpq3ac5JTXY7ZYHZdHCIyAlNbY1ASkwDFKdigJTH0wDJnukUtow8Np3LwZ0/48TIWFHrmGQxjCYNGvcNUGh3MPfZb6UnjmEPj4XEVz2gOMiB7Bxws+5sHpD3pk/IGCXz1Fl5eXY7FYSEjo+HCRkJBAcXGx3XPOOeccHnjgAWbMmIFer2fYsGHMnj27W3mERx99lIiICNu/lJQUt34dAic48GfrTSIE0mY6dErNN99S9tzzHpugRgTriQ1VKjpzXam2jY7uuppmwiUQEAplO9vaUfyRPT/Bzm9A0sIJzyhmGE7SwYSsdTKsrsILPI+kEzIU/oLyEGnhn7K9jp0QntxqWiVD9nJPhjYgWfTjIiZ9MIlfcpRF5/TYULSatntG059/UvXhR9R89WWXY+iio4k84wzibrqpY8uzJMG0a/np1Gd54cAqihvsz6EEniP+llvIWLWK6EsuBhRjuFtW3sKW4i0+jkzQX7Dp2tqptAW4/ojrubtyOiGX30vZM884Nujm/8E7J0LVfneFKWjFbLGyr1yRRPE7eYTCrYo2elgyRLQ9NxstRgrqCwD3adomRwQRHKDFZJHZV9FoSwYX1BX03J6deYzy6s/PVp5A/X6lzVSq8l2k5ptvKHn8CZq2bbO9J+n1JD/xBGmffoI+wccLybEZvXom7XNsehVMjVBXrCTcPYBBa+CQ2EOYNmiaR8YfKPjBb5trrFy5kkceeYSXXnqJ33//nS+//JKlS5fy4INdrwbceeed1NTU2P7l5+d7MeL+xa95v/LElif4o/QPz1xA1UrMmOPwTSJs7lwiTj6ZoDFjPBMTysMzuKZrqyLLcucVxaBIpU0NlGpbP6TyrTdoeucWpZB4ylWQ2LufV/tKW7VVSlTaeh9ri3O604K+x+BoLaEj7+Uv7qTOaD8J0Il5D8Hlq+Cwszwb3ACjxdJCUUMRMjI1dUpi5uDPNcOIEURfeglhxx7b7VhJD/yH2CsuRxcVBQW/QXNbVd7/9nzMq9teZVelZzpTBN2jT4i3PZyuK1rHT/t/oqK5C517geAgRiR2n7QFCBxzCJrwcDRh4Y4N+vdXkLtK6XITuJWCqiaMltauiUg/k6PJb5VGSJ3cYYGwydzEKRmnMDVpKnFBcW65lEYj2czIskvrSAhOIEgXhFk2U1BX0P3JGa1J29zVYBKm5A6zb43yqia9XaRm6VIq33yTpj//7PB++LHzCTr0UKQAB7wVvIGxof9KgLXUw5b/KdszbhRa5X0cvyqBio2NRavVUlJS0uH9kpISEhMT7Z5zzz33cP7557No0SIAxo4dS0NDA5dffjl33XUXGjurKAaDAYPB4P4vwA9Znrecb/Z+Q4QhgsPjD3f/BdQPypELHD4l6qwziTrL/ULb7RkWH8LmfZXkuJi0Nebnk7doEdbaOjJXrex4k5pyNWx6BfavVR60B09wMeq+g6mwkJLHngQg45xk9LPv7OGMrlHboZJDUimsbgLanGUFnke2WMhbtIimrb8z7OeffL86Lug1oxLjkM3BSPo6sipzOCLxsJ5PSpvh+cAGIHm1eVhlK2H6MArLlancwZ9rQWPH2pfX6YrSXfDeKRA+CC74GsISSQtPY1flLnJrcpmdMtt9X4DAKWRZdrtZj8D/UeURdhfXYbXKaDSdH8oNo0YR/uvXJIbYf07qxIjjYf862L0UplzpznAHPKqebXpsqN2fVb8mr9WE7CA92whDBPdPu9/tl8uID+OvghqySuo5doxEWngaOyt3kluT270MQ8IhSjVwXZHye54xx+2x+SVnfwJ5GyBupFuGizzlFPTxCQRPmdLzwb6ipQ6ePxwayuDqjRA/ytcROcfv70JzDcRktHlQeIBX/3qVsIAwjh96PJGBkR67jr/jV5W2AQEBjB8/nuXL29owrVYry5cvZ+rUqXbPaWxs7JSY1Wq1gDJJFrjGzMEzOW/UeRwW58DDvbNU7VN0XSWt25wq3YWt0rbcNX0YfVIS1oZGrC0tNGdlddwZMQjGnqFs+1m1rWwyEX78cYRMHIP+3BfB0Psk6//m/Y9PTviEWK1SqRsbGkBUSB9ZoR0ASFot1voGZKORxt9+83U4AhdICDcgmRXX+q0Hdvs4moGNzbglIs12n3Glg0CuPkDjU6djqatTzB6DooE2jUH1egLvULd8OQfuvY/61asBqGqpotZYi4REariDRoCCAU9aTDAGnYYmk4W8ysZO+81WMzM+nsG8L+ZR3lTu2KAjWx/u962Dpio3Riuw6dn6Y2HBEecr0m7ps7xyOVXXdk9rIvzSsZfy6MxHGRPbQ9eeJMGce+GM9yBlkqfD9B90AcrP1k369+HHHkvSgw8QOHx4h/etzc3UrVxJ5bvvueU6LmEIg9TW/NL6xb6NxVksJtjworI97TrFx8ATl7FaeHXbqzy6+VEazJ7RzB0o+FWlLcDNN9/MhRdeyIQJE5g0aRLPPvssDQ0NXHyxogl2wQUXMGjQIB599FEAFi5cyNNPP83hhx/O5MmTyc7O5p577mHhwoW25K2g9xybdizHpnXfmtlr1Nas1KkQHO3QKZbaWtBo0IZ6dkI0LF4xENhb6lqlraTTkfLKKxiGpqEJsWNKMO06+OtD2Pmt4nQa7R8VOAFpaQx6+mll4cTFdo1gfTCjY0bzVZ7SEuV3OmH9gMS770IbEYG+1QFW0D+RJIlwbTJ17OXv0mzHTzywDTa+BMExMP9hzwU4gFA7CIaED+Hb1vuM+pAKYG1owFRSSsCQVKSe5jLmFvafehxNRRaS5w4m4sz3lQcwcNzAReBW6letpvrTT9FGRBB65JG2739SSBJBOj9rmxZ4DJ1WQ2ZCKDsKa9lVXEtabMd5pE6jIzIwkiZzE0UNRcQFO9CeHp2uVNOV7YKsX+DQf3ko+oGH35qQAYw+Sfl3ECUNJUQYIgjUua6D2p7hrffDrBJFGmR+2nzHTx53tltjEbgPubmZgiuvAiDilJPRhvnYo2T6DYr3yrZP4ai7lIKq/sCOL6G2AELi4VDPyZcV1RdhspowaA0khSR57DoDAb9L2p555pmUlZVx7733UlxczLhx41i2bJnNnCwvL69DZe3dd9+NJEncfffdFBYWEhcXx8KFC3n4YfFg2efJnKdoyUSnO3xK5fvvU/78YqLOP5/Eu7o2m3MVtdI2t7yhy5Y0Rwkac0jXOxNGw5G3KW6nUWm9vkafIne18rVEpnZtxNYLVD3b9okNgXcIOswDlfYCn5AYlEKdBXKdqbxsqoK/PlKStsc84LEV/YGE2iofZ0ihvsWMTiMxJKbtQb/xt9/Iv+JKAkePZuiXX3Q9kCzDtzcSFFaBMTAY62FXQkiMbbdqDKNeT+Adwo8/Dm1EOKGzjgQ6VlYLBM4wMjG8NWlbx7FjOj80vzX/LYJ251N54xPkR0aS8uILPQ864nglabt7qUjaupG9Za53TfQ3bl51M9vLtvP80c+7VYJHNRzOKWvAbLGi0/pVc3HfQZbh7QWQeCjMut3hIqruaNq2DV18PHo70pbayEhCpk1DGxODtbHR90nbwRNgyAxFqnDTy4qPQ3/g93eV1ylXusU4ritya5W545DwIWgk8TfoCn6XtAW49tprufbaa+3uW7lyZYf/63Q67rvvPu677z4vRDYwqWmpIbcml+FRwwl2pzNhzDA48lanTjGXlgKgi3OP2H1XDI4KQq+VaDFbKaxuIiXaPV+3bDIh6fUd3zz6breM3SdoqqLhuUswhDeiu2wJpEx0abjVBatZXbCa6cnTyS5tNesRlbYCQa8ZFplOVgWUNjlhwDlkGgRGQGMF5G+GIfbligSOoybxtBZlQXpITDD6dg+l5opKpMBAAtLSuh9o3bPw14fEHaoh/sk3kQ4yEVErbataqqhpqSHCEOGmr0DQHSFTphDSTsvPpmfbnRajQGCHkaoZ2QH7ZmQJIQk0B1bQtHUrmvBwZFnuecF85AJY+7RSaWtuAZ3w+XAVWZZtmrZ+1xG250cIjITkcZ1+VyqbKpGRSQ5NduslB0UGEaTX2qRBUqINbC7eTEFdAWeOdMDXpOQf+OdrSBgDo090a2x+RckORfu36A+Ye79bhiy89TZMeXmkvvUmIXbkLVPffMMt13Eb029Qkra/vQ0zb1XMwvs653wCf7wPh3nW46e9GbjANUTKW+BxzvruLM7/4Xz+rvjb16GQdP/9DN+8yeNGZDqthrTWqqccF3VtAYz79pF3xRXsO/uc7g+0Wl2+li+xfn8vBb9oyfo8gpZG1yetmw5s4pPdn7C5eLNtMpyZ4ONV2QFK4x9/UPzwI9R+309dVgUAHJqQCUC9tRiL1eLYSVo9ZLa2Ju5e6qHIBg7tTalammKBtqoilchTT2HE1t9IvL+bBemWetj8OgCaEx7rlLAFRV4mIVhJDItqW9+hyiOIBx+Bs4xKUszIdhXXdnmMISODpP8+StqHHzg2aPIREJEKqZOVxTiBy1Q0GKlpMiFJMDTWz+QRvr8N3pwH+9Z23nXq96w6cxXpEY53TTqCRiPZKpb3lNRjls1c+cuVPLTpIaqaHdBi3rMMVj2mJLYEXZP1s/KaNtMtFZvWpiY0oSGg1xM4pgf94b5C5jEQPxqMdbD1LV9H4xiGUKXKNijKo5cRXULuQyRtBR7HI+2VG1+G7Z8rzo1Oog0PRxse7r5YuiA9zj26tgCaiAga1m+geccOjPv2dT7A1AQrHoEXxisP4v2Rgt8wrfsQfZgZfUIcAZkjXB5y5uCZLBq7iClJ09hXMfDazvoSjZu3UPXee9SIpG2/5ohBQ5GtOpDMFNYXOn6ial6japELek1FcwX1pno0koaK6tYOAjufa5JW2/29zhAKly2HeQ/D5Mttbx9swiokErxLS04OLVlZyJa2RRG1xVBU2gqcZURrpe3+ykYajeZO+4sbirl783+4P2IlhowMx2SpNBq4/g847wsId2+F5EBFfVYYHBVEoN6PJITqiqF6P0gaGNy5e06SJKIDo9Fp3N/8m9l6X8wurSNIF8SUpCnMSZ1Ds7nZgZNbFzFzV4PJgeMHKtm/KK92Fn17gyYoiPQvv2TE5k09Sh9Ym5rcck2XkSSYdr2yXbXPp6H0SHOtImnhJUSXkPsQSVuBx1H/UN32wGdugV8fgi8uhbI97hnTA6jtTTnlridRdVFRJD/yCMOW/WC/3VUbADu+gMqcNp2a/oTFDN/diCHcTPrt80j7cgmSxvWPpylJU7jhiBtI1I/DKkNYoI74MNHG5wtCZ88i8swziTztNF+HInCBYXHhWI2K5um2kiwnTpwDGj1UZPfpz+3+gHovTQ5JJre0BehBq3vFo7Dq8bb/t5+w//6ubfGzfvVqck87nQP33NPhdJsZmTM6xoJeU/Ha/8hZeCLlL78CgMlioqBOMdIUlbYCZ4kNNRAbakCWlYrDg9FpdHyz9xt+zfuVFkuL4wNr/VJhz2dkt5qQ+Z2EV95G5TX+EAj0fMFMe9TOuqzWhPj/5v2PZ496lqRQBwyREsZAWBKYm5T2f0Fnmmvafr5uStqqaIK6Ntw0l5WRPfcY9kydhmzuvBDlE8aeDtdsgYXP+TqS7vnmWnh5etvPzcOoXUJDw0XS1lVE0lbgcdz+wJe7Boz1EJoIyYc7fFrT339TdOe/qfrsM/fE0QPpatK2zHV5BICIhSd0rU+o0cK065TtDS+CxeSWa3qNza9B8XZF82reg+ii3NuukVWqJCUy4kPdam4mcJzAESNI+s/9hB11lK9DEbhAoF6LAcUc4s9iJ5K2geEwVDFVEhIJrtG+ciG7rLMGYktWFnmXLqLspZeUNzRaWPGwkrg1NcHbJyidKqseV95XjeEkDc1//03j+g0druf2hVdB90gSUnAwgYeMBiC/Ph+LbCFYF0x8cLyPgxP0R0Ylqbq2nSUSYgJjCNWHEtRsZd9XH1L1yafODV5TAHUl7ghzQLO3VHlW8Ds92/xNymvq5E673tzxJtcsv4Zf8371yKUz28kjOI0kQcYcZVutJhV0JGclyBaIyfSqGbY2OhpzZSVyczPGPCf8FTyJVg9xw30dRfdU7IV/voHSv8Hg+QWUWmMtFc2KfI6QR3AdkbQVeBz1gU9dbXGZ3a3t1SOOVVq0HKR5+3ZqvvqK+l+WuyeOHhimyiOUeUmu4NCzICQeagtgx5feuaY7qC2CFQ9jrNciz7kPQmLdMmxVcxWbDmyirLGsTc9WSCMIBC4TGzAYgN2Ve507ceTxiu5XkOvuwgMZdQE0MSiFygYjktTxQb/p779pWLeOxg2tlRSzboej7lIStK8dpRhmLLlW+f9Rdyn7geDxR5D8+GMM+ejDDtdTKyREpa13SH70EUZs2UzozJlAOyOPiDSx6CjoFTYzsuLOkmKSJDE0Yijx1WC9+3FKn3qqk0RKl/x0NzxzCGx53Y3RDkzUZ4Vh/jZPVSv6UqZ02rW1ZCurC1ZT3lTukUsPb6203VtWj8Wq/E7LskytsWt95w5kzlNeVd1WQUfU74ubqmytTU1kHX00BdffgLWxscvjJK2WIe+8Q+aG9RjS+2AFZ9V+pRCpr7HhBUBWPCYSRnv8cmreJz4onhC9n+l0+wCRtBV4HDVpW1hf6FzrlT1kuU0TccQCp04NOvRQYq+7lvATnDuvt6iVtiW1LdS3uKd9w1hQSPHDj3DgXjvmMvpARVQcYN1zXtWscYnASORJV5C3bjDZt75L09/uMazbWrKVRT8t4vpfr7e1Rgk9W99jKiqiftUqX4chcIGUsCEAFNbnOXfi+Evg6g0w/kIPRDVwUCfCga0Vz4OjgggKaNNADJ4wgcT//Ieoc9sZV866HYYfC2U7lf+bmzokbAE0wcFEnHgi+oSEDtdTKyTy6/IxWftZF0c/RdJqkXRK+7kwIRO4yojE7s3I0sLTyI+D2swkwo87DtlRrcj41gd/oVXuMmrS1q/mqcZGKN6mbNuptPX0Z9ugqCAC9RqMZit5lY1sK9vG1I+mcu7Scx0bIH02aHRQkdX3tUp9QVgihA+GjLluGa5p+3bMRQdo+uMPpG7kEQCCxo5xe1emW9jxBTx/OCy9xdeRdKS+FP5oNZqcfoNXLilMyNyLSNoKPI7aeiUjk1fr5EP+wRT9AXVFoA9pa7V1kMDRo4m75hoiFi50LQYHiQjSExuq6KfmuKna1tpQT9V771H91VeYq+y4n064BAJCldaHbO9UFLtMQDCmkZditQZjqavD0JUEhJO0v1nstVXadi9qL/Asxv37yT56DgXXXY+1WRg79FdGxgwDoNrkhBEZONUZIeia5456jiUnLSGK8UBnDcSAwYOJOvMMwo89tu3NpmrI39z2f21Ah4RtdySGJDIqehSzBs+i0dR19YvAMxyRcASLxi7i6NSjfR2KoJ/SvtLWXhXt0IihWLQSS26fQtJ/7kcTHOzYwJnzFYOpku1KdZmgVzQZLRRWK4lyv5JHKNwKVjOEJUNESoddRouRgvpWrW4PJXW0Gsn2/cwqqSM+OJ4GUwMFdQWOLUAGRkDKZOW1ItsjMfZrjr4bbtoB6e6RPQs67DCGvP8eCffe03+7SobMUBL9+Ztg/4aej/cWm18DSwsMmgBDpnnlksKEzL2IJyiBx1Fbr8AN7ZWqNELGHKWytI+T3iqR4C5d28ARI4i+9BJSXlhs31UzKArGX6Rsr+/jYuhmI1gVd+yAwYPIWLOaIe+8gybEPS0U6s0iNWyI7fvvVxUM/RB9air65GQCR43CXFbm63AEvWTy4DE05FxHaNndvRvA2Aj5W9wb1ABCr9WTHplOYYVSXevQ59rqJ6CpUtnWBoDF2NGcrBVrSws1335LyaP/tSV3NJKGTxd+yrNHPUuEIcJtX4egMwfuv5/9F19Mw8ZNtvcOjz+cG464gflp830YmaA/kxEfilYjUd1ooqS2c8ebmjRzWrc6JAZSpyrbotq21+SU1yPLEBWsJzokwNfhuI+USXDJT7DgKUUjth35dflYZSsh+hDiguI8FsLwdmZk8cHxBOmCMMtm8usc1EI9/U24Lcdt1aR+hyS5bUFeYzAQPGEC4cf0LLdgqa+n8p13KH74Ebdc222EJcC4s5XtdX3kObylHjb/T9mefkOnv0VPIbqE3ItI2gq8gs2MzFVdW3Ulf8TxTp0mG40079yJ1dGWLzfhCV3bhNtuI3TWLFvrZCemXKUkbhc847ZreoTVj8Prc6DoTwA0AQEEjR3jtuHV37VQbTJGi5UgvZZBkd232wg8iyRJDFv2A2mffExASkrPJwj6JKMSorG2DKKg0kKL2eLcydV58Hg6vHMCGN2zmDVQUe8r7TsILHV11K1ciam4uO3A8mzY2GpKduhZcE9Zm8btwYlbWebAv++i8p13MO7b5+GvQHAwDRs20LhhY99xxBb4BYF6Lemxynx0px2JhPaGwbIsY6mudnxwdT6uFlUInGZvmZ+akOkMiizCyM7PbDat7nDPanWri5pZJXVoJI3zz6NhiaDt4nlrIFPyj63wxhdIkkTJfx+j6r33MFdW+iwOu0y9DpBgzw9QusvX0cA/X0NzNUQPg5HekYgEyIzK5Ij4IxgRPcJr1/RnxKeQwCv0ehX/YE77H8y9X3Eid4KW3H3knnIqmogIhm/c4LW2C3UC5q5KW4eIGAwL+8jqXleU7YG1z4LVhFyWg5Q8zq3Dy7JMbq3yu2ZtjgMqGRYfgkbTT9tt/AgpwI+qSAYocWEGwgw66lrM7K9otFWyOERECoTEQU2e4jzsxQmkP7CleAtLspcwOWkyWSVKC3N745qmbdsouPIqAtLSGLastfLtx3+DbFUm7Ke+qrynSiOseLjD/zWBgUScdiqaoGA0B/2tyrJMg6mB0AA/Syz0IQY/9xxN27YRdOhYABpNjWwr38bQ8KEkhCT0cLZA0DUjEsPIKq1nd3EdR42I77AvNTwVjaQhqKyOPTNnQIuJ4Zs3OTZXHnEc/HQX7F+nyLAERXokfn9m7wD0XfCW3mX7SltQksQ7K3c63/kpy2Bu6Rddnh6nuQZemaE8i1+7Vam4dxFjQQG1P/xAyMSJBI0b1+PxmpAQIs88A12Me8yr3UpsBow6AXZ+C+sXw8kv+jaecedCaCLIFtBoez7eTVw97mquHne1167n74hKW4FXcJs8AkDEIDA4p01qqapEGxlJQNoQr+rkpHug0hbAUl1N5QcfUPnBBz0f3NcMyWQZlt4MVhNkzid/8Q/sP+98txmQAVQ0V1BnrENCorpWSfAfrPso8C2yxSKqyfopkiSRlJiPIfErPt75ubMnt1Xd7BKVWc7yR+kfLNm7hNX56yiuVXSh2z/oyyYThswMAke3cwZOPlzROz/3s46Dzbpdqbg9qFom6f77SbjjdvSDBtneW1OwhikfTuGa5de4/4sS2AgcOZKoM85AG95qHFW5i8t+uowLfrjAx5EJ+jujklp/pw50rrQ1aA0khyRTGQaWmjqsDQ2Yi4ocGzhmGMSNVLRLVTd5gVNktz4j+FWlbcVexYzpnyV2d9v0LsM9q3eZ2Xp/zC6tx2KV255Hnen8/OsTePZQWP4fD0TYD8lZqSQAg2PckrAFaFi3nrKnnqb0mWcdPifp/vuJu+5adNHRbonBrUy/UXnd9gnUOvhZ6ikkCTLnwnAhsdSfEZW2Aq/Qvh1FluXeJU5Nzb1e4QyZMoXhGzf4QB5BmSzkljdgtcpuq/Rs3LqVkgcfQhsbS9SZZ9qXSijbA6v+C0HRsOBJt1zXLWz7FPatAV0Q5ul30/DouWA22x5S3YE6GUsOTWZ/uWI2kOlMNaDAo5Q8/gQ1X3xB4oMPED5vnq/DEfSCsPAyAsyb2FqqAS527uQRx8OmV2DPMiVh6MWV//7OlKQpWGUreouSUI0PMxARpLftD5s9m7DZszuaDR11Z9cDOmhGFhMUQ6O50XEdQIFbaDY3kxaeRmp4qq9DEfRz2puR2WNoxFAK6gvIfmIRJxy5yHEzMlA+R2QZMnvWohR0Rq20HRbvHk+HPkHuKtjyOpTvgdEnddpt07v0cKVtSnQwBp2GFrOVgqrG3nV+6gxKd1DWz3Dso54JtD+hLs5kuO/vXZ+cTNi8eQQdfrjbxvQpgyfAkOlwYBsUb4fwZO/HIMtgaoQA73+uNJoa0Wq0GLQGr1/bXxGVtgKvoLZemWUzNS01zg/QVKXoIL53Kph6n3jVBHlX03RwVDABWmWyoDrDuoPQI48keOoUYi+/rOtqxYYy2PEF/P4u1Je67dou0VSltNEBzLoNXdqhZPzyM0n/fdStGqft267Ulii/qmDo58hGI5aaGhq3/ObrUAS9ZEzM4bSUH0W4ZarzJw+ZprgxN5ZD/mb3B+fHHBp3KFcediXh1nFA1+20kiRB1i9KtVMvkGWZluxsm1ZcZmQmS05ewo+n/dir8QQ9U/PdUup+/RVLbVsl5LRB0/j2lG954egXfBiZwB8Y0Zq0zS6tx2i2dtqvJrN2xTQ7l7AFGHMajD1d+VwXOIXFKpNb7oeatnmtZoopUzrtai9h5mmTIq1Gsn1fs0rqe9f5mT4bJC1UZEGVE+f5I7IM2cuVbTcu0oTOnMHg558j5uKLnDrPajTSkp3ttjjcyomL4ea/fVfhmr0cnjkE1nrf4+azPZ8x8f2JPLTxIa9f218RSVuBVzBoDfx8+s9sPGcjkYGRzg+Q9QuYGpQWA33/MZPSaiTSYpXJrzslEiS9niFvvUX0BRegCeyi+njINBg0ASwtsOlVt13bJZY/oCSTY0e0CrWDPjGRyJNPdutlbAYHYWltZj0JfjQZ7udEnXMOaR9/RMIdjlX5CfoeUwYdhrFsPrWVw50/WauHzNYKa2Fe0yuybSZkXXyuNVXDV5fDi5Nh/wanxy+88SZyTlhI7feKLq5eqyc9Ih29Vt/DmYLeUvr0UxRcfQ3NOzsbl3hT1kngnwyKDCLMoMNslckp7zwfVZNnLntPCJyisKqJFrOVAJ2GwVFOJsv7MvkbldfUyZ12VTZX2iTMhoQP8Xgo6vx/T2kdqWFK10J1SzVVzVWODRAUCSmtX8dAlwAp+RvqikAfrFSS+hBTaSm7jxhPzsmnYDUafRqLXWKG+XYha92zSrFUQ7nXL11YX4iMTIRBLOS5C5G0FXiN+OB4NFIvf+V2L1Ve7TiQ9oQsy+QtuowD99yLpaYXVb4ukh7rAzMyUDRspt+gbG/5H7S4V1fXacxGKN6hbJ/wNOg8Z0ilrqBH6gfRaLSg10oMifajyXA/x5A+lKBx4+zLegj6Bek2k8X6jq34jiIcx52m3ljPqvxV5NXmkV3S2bimJTub7GPmUXTH/8HqJ6CxQnloGDzR6WsFjhqFZDA45yIv6DWyyUTIlKkYhg8n8JDRPZ8gEDiJJEmMTGqVSDjQWSLhsLjDOH/0+Zw86FjKX/sfhbfdjmztXJHbJdV5yufOBh+b7vQz1MKC9NgQtP5illtX3FqRKtm9/6gLA8mhyQTqPG/spZqRZZfUE6wPJjEkEXCy2jZzrvKa/Yubo+tnZP2kvKbNdJspm7mqCmuD88/Iurg4NMHBaEJCHNfg9gWyDLlrFCM7b1G4VZEi1OhgylXeu24rd066kxVnrOCcked4/dr+ikjaCvo+5hal0hZghPNO4+ayMhrWrqX6iy+QvCyPAG0aVe42IwPFzKl+3Trq162zf8DIBYpjeHONIpPgS3QBcMmPcME3kDaDksefoOjuuz3S1qJOCDVmxSF5aGwIOq34uBMI3MWQmGC0uloaNTvZXtKLyqyMuXDMg3DOp+4Pzk/ZWbmTa3+9lit+vsJWaZsR36bV3bxrN6b8fIw5exTNYID5D4PW+cWRqPPOY/iWzcRd22Y8tr5wPXetvYtPd4ufmbuR9HqSH3mY9G+WoA1VEvEmq4lZn8zi3KXnUmvsbB4lEDjLyETFO2BnceffpxHRI7h94u3Myzie8hdeoPbbbzHl5Tk+eOlO+PUh2PBS3zPA7cPs9UcTsrzWKtuEQ+xWGtokzDwsjaCiLm7uKVUWK1TzM6fMyNTuoNzVisfKQEVNWrtRGqHi1dfYPWky5a+84tR5kiQxbNkPDN+4gYC0NLfF43Y+OQ/eOUExJfMW655XXsf+CyIGe++6rUiSRGxQLDFB7jGqE4ikrcCL7Cjfwc0rb+bhjQ87d+K+NWCsg9BExQXbSTTBwSQ/8Tjxt92GJsBz1Z1d4clK2+pPPyX/0kWUPfuc/QM0WpimyBCw4UWwmNweg1NoNJA+C2tzM9WffUbN519gLndv24bRYqSwvhCAxgbFUTQzXpiQ9TWMBQWUvfgi5a/0EekOgVME6rVEpHxP8JA3WKJWXjg1QDhMv16pBBU4hPqgmxo2hPzKRqBjpW3orCNJffst4sYZFSf3zHlKcrwXaENDOt0vc2tz+WbvN6wr7GKRUOBW8uvyqWyuJKs6izC9uIcJXEfVtd3dhRkZKAsI0ZdcTPxtt6IJccLAZuiRSst0bQEc+MvVUAcMtqRtV1I3/ZF8Vc+2szQCwNjYsVw77loWpDtfiNMbbJW2pfVYrXKbGVmtEwvOCWMgLEkxdspb74Eo+wlz/wNH3uZWnVZjfj5YLOiTnTfr0kVH9335oNRWXed1z4Mz3Qu9pWIv7PxG2VZzAIJ+j+hNFXiNJnMTP+//mcGhTq747Gptnx1xrJL0cxJtaCgRCxc6fZ67SI/zXKVt2Pz5lL34EkFjxyCbTEh6O3qDh50NKx5WJtI7voTDznR7HN1SWwRb3oCZN9scLKWAAFJeepG6X34heNIkt16urKmMsIAwTBYTByqUpINfTYb9BFNBIeWLX0AbF0vMFZf3/UmXoBOxhkEc4E/2VPbO7ErgHGpVUKR+EFYZIoL0xIa2JVa1YWGExDWBaY1imjLPyQXSHrBVJznTUipwCGtDQ6cEmc1dPTxNfD4K3MKobuQRAGpaasitySVu0b+IDx3k3OD6IBh2NOz6TpG9SR7nYrQDg2ybWa73Hd49RtV+5TW1swkZKFXdI6JHeC2clKggAnQamk1WCqqaeqffLElw+PnQXK0kbwcqKROVf+4c8sUXMBUXown102e1Iy6EVU8oRnZ7flC6YD3JhhdBtioL9wmHePZadthduZsX/niBw+IPY9HYRV6/vr8ikrYCrzE8aji3TbiN9Mh0x0+SZditGKH0RhqhL6BqP5bWtVDXbCIs0H1GLrroaDJXrUTSars+SB8Is+8EYwOMOM5t13aYZXfCP19DRTac8Q4AkkZD8MSJBE90740fYFDoINaetZY6Yx0XvbEd6MasR+Azgg4fR/jxxxE8aTKYzWBvwUHQp0kJS+NAPRTWO9FC2x5ZVtrFdn0HC56G0Hj3BuhnqA+Yequix5cZH9oxmWcxw4//VrYnXQZxvTCJa0fTX39R9vxitBERDHr6KZvrdl5dHmarGZ1GTCHdgSzLZM8/Fk1AAClvvI5hqPJ9thlqtlaFCQSuolYcFtc2U9VgJCqkYzX9Y5sf49ucb7n+8Ou57NDLnL/AyAXK5/mu7+Gof7sjZL9nb2sXnl/JI5z9IdQeAEPf+Jp0Wg3psSHsKq4jq7SOOalzGBk90nZPc5ij7/JMgAL0iYm9Os9SU0PZc89jzM8n5bVX++YCZ2A4TLwU1j4N657zbNK2pQ7++kjZVn1tvMzuqt2sLFhJo7lRJG3diJBHEHiNCEMEFxxyATMGzXD8JIsJZtyorBYNPbJX123cupWWrCxkHzlLKtVQBgByy90vkdBtwlZl4qVKK3JguNuv3y1ZvygJW0kLR97q1UuH6kPJKlGqSTJE0rbPoTEYGPT000Sddab9CnFBn2dUTAYAVabC3g0gSbDxZdj5LexZ5sbI/BO1wrWlSdEIa/+5Zqmro/Ld92gImA4RKTDrDtcvqNHQsG4d9WvWIFssJIQkEKgNxGw12yRoBK5jLivDUl6OqaSkw4Or+vN2OrEgEHRBWKCelGjF22GXHYmE9Mh0EkMS0UgaLPUNNG7Z4pwZWeZ8kDRQsl0xJhN0S2WDkcoG5dkk3Z8qbQHCk8DQWdbFZDGxKn8V+2v3987EtJeoCxZZpfUkhCRwRMIRRAVGee36/R5Zhh/vgp3fKcbSfQQpKIiqTz+lYc0azMXFvg6nayZfCdoARTpE1Xz2BIYwuGINHH03DJnuuet0Q/suIYH7EElbQd9GFwCTr4BzP+u1S2XRHf9HzsITafrLdxpbwzwokaBiLCigJceBVh9vTZJMTfD9Lcr25CshcSwA1V99TcXrr2MqLfXo5cvqW6htNqORFCMygUDgXsYnK5WcZqmGOmPXGondolYcqDI4Aru01+quqlaMXdonbZt37qTkscc58NGfcP0fEBzt8jUDR40i4e67GfL++6DRoJE0DAkfAjjZViroFn18PMO3bCbtg/fRtDNLVR98VFkKgcAdjEhQFu932zEju3TMpfx8+s9cMvoismbOZP/5F2Dcv9/xwUNiIKW1JV7tkhN0ifpMMCgyiOCAgdG5kF+Xz7W/XssZ357h1euqHXd7Sno5V1GxmCB3DeRvdkNU/YiSv2HDC/DFIqX13k0UP/QwB+69j5a9vZPZ0gQEEH/TjSQ//ljfllcIS1DkCkGptvUksRmK7rCPqo5Fl5BnEElbgVcpqCvgh9wf2Fqy1SvXky0WdDExaEJDCUh3QpbBzagSCZ4wIwOofPdd9s49hvIXFnd/4D9L4LXZUOiF7/+ap6FqH4Qlw1F3AkobaMXrr1P65FM0rF7tkcvetuo2rll+DSty/gQgNTqYQL0D1cgCn2Cpq6Pu11+RzWZfhyJwkjFJCVhNSvXKnope6tqqki05K8DY6KbI/I+82jysspVQfSj7y5TPs/ZJW01AAKFz5hAycwZo3VO5Lul0RJ93LoEjhttaDtWqT6dctwU9og0LI2jcuA7vqSY5otJW4E5surZ2Km3Vv3NJq8UwPBNdYiIWZ81iRx4PAaHQ0jkpLOjI3lI/NCH7/FJ4/7QunzMazY2MiBrByOiRXm1lz2xnRgbwa96vPLnlSbaVbXNuoA0vwDsnwJqn3B1i3yb7Z+V16MxeF1EdjGy1UvPNN1R/+inWxt7P/2IuvZSIE09EG9bHDTunXQdIilyg0QP5AFOT+8fsBaJLyDMMjGU9QZ/hh9wfeP6P51mYvpDxCeO7P7i2CPb8qDzUh/VO60bSakn75GOvtuDYw9OVtsETJoAkYW1qRpblridCu3+AA38qDpat+rIeoTwL1j6jbB/7aFuLlNVK9EUXUrv0e8KOPdbtl5VlmfVF66k11jJUezoAGfF9/CY+gJFlmb3zj8VSWUnaxx91SloI+jaxoQFozPGgr+O3A3sYnzTO+UESxkBEKtTkKYlbTxs09FPUSfCQ8CFs/Vt5uLElbcuzCdp0Aym3PADpszwah1o5IczIPEtVcxU1LTUApIan+jgagT8xMlGptN1pJ2nbntS33kIbHOz8BcZfDJMuB52hN+ENKNRnAr8xIbNaleRec43Snm2HMbFj+PzEz70cGGQmKPfLrJJ6rFaZZfuW8UPuD0QHRXNo3KGODzRsDvxyP+SuBlOz2xKYfZ6sX5TXjGPcN6bVSvIjD9P0558EjhzpvnH7KrGZcOlPMGg8aNxcTGQxwYuTlbGPe8xnHhEWq4X9tUp3hpBHcC+i0lbgVZx64PvnG/juRmXV1kUkSfKpOPkwD1faGkaNImPlSlJefqn7r3Padcrrzm+gt5VxjvDTPWA1KTf30SfZ3pa0WqL+9S+GvP0WWg+1sTwz+xnumXIPlVWdW4gFfQtJkgieMIGAIUOw1NT4OhyBk0iSRLguGYB/yrJ7O4hSmQVCIqEb1HazuMAUTBaZ4AAtyRGtrfQ/36Msxq3vodOiF8gWC/Vr1lD69DPIJpOtVV/II7gHWZYpuusuKt5+u0OlkTpHSgpJIkgX1MXZAoHzjEhs7Y4orsNq7VzQcOeaOznq06P4vfbv3l3AECoStg7idyZkZbuUhK0+BBLG+jqaDgyJDiZAq6HJZKGwuolZg2dx3qjzGBMzxrmBEsdCaCKYGiFvvWeC7Ws010J+qw5rpvuStpJOR9jcucTfeqtL3hay1UpLbi51K1e6LTaPkTLJ/QlbgL+/gur9sG8NGLzsX9OOovoiTFYTBq2BpJAkn8Xhj4ikrcCrqKsuuTW5PVe/7m59gFfbZ/sxqsFATnkDFjuTZFeRJAl9ggOragmHKKZuslVp8fEUC56EMafB8U94VVNHkiQmJU3ijBFnkFumCOVniqRtnyb5iccZ9uMyQmd5tkJQ4BkSg5UqQJeSeOpn/J5lYLW4ISr/Q03iBcpK18mwuFA0Ggn2rkDe9T2yVQvzH3H/hSWJoltvo+K112j++29RaetmTPn51HzxJWVPPY2ka2t+U/+eRHuhwN2kxQRj0CnJq7zKzi3JNS01lDeVu2dhpq7E9TH8GLVV32+KC9TE3uDxoLXfzOurzkedVmN7FssurWdB+gLumHQHk5ImOTeQJEHGXGVbrT71d3JWgtUMMRkQ3ffuSZbKSnKOO56Cq67G2uCZ4ii3Y2qGgt/cM5Yst+nkTr7Sp9XfqqxTangqWk8kpwcwImkr8Cqp4aloJA31pnoqmiu6PrCpGvavU7bVKqxeUPLof9l/0cXUr1nb6zHcweAoZYXXaLZSVO1ZzRlrU1P3VYvTb1Be//gA6j1kBhYxGE5/s8PNvXHrVmp//hnZ6B3X0azWybDaEiXom2gMoiKnP5MRqfyNlzYX9H6QIdMhKEr5vGgoc1Nk/oWaQLEY44DWxSiLGX78N8Y6Lbs+TyT32nvc/kAsaTSEn3ACEaeeihQUbFt4rWyutLXvC3qPFBhI3I03EHXeeUgBAbb3hfuywFPotBqGJ6i6tp11Z9svzBQ/+BA5J59CS66TCdyaQlg8Hl6Y0Kec5vsSzSYL+VVK0txvKm3zNimvqhndQciyzNzP5nLqN6dyoP6AFwNTyHCXGVlma9JW1Xn1d7J+Ul7dKY0A1Hz7HS179yJbXTM208XGok9JIXDMGMyVlW6KzoNU5sKzY+Hdk5XKdFfJXg4lO5QK94mudye7gpi7eA6RtBV4FYPWQHKI0k7b7Sp+1s/Kql7cKIjuvYFY49atNG7ciLXZt+LcWo1EWqyiDZbtIV1bgMoPPyRrxkwqXn+j64OGTFc0bywtsPk19wZQ3nWLdPlrr1F43fWUv/Y/916zHasLVvNV1lf8U5pDeX0L4EeT4QGAbDL5OgSBkxyaMByARmsxlt5WyWr1cOMOWPRLr/XL/RlZlm0T4braKKDVuOaPd6H0H1qaIsEqg7UbPXMXSLznbpIfeZjAEcMJ1geTEJwACIkEd6CPjyf2yitJuOP2Du+LSluBJxnZKpGw80Dn5FX7jrjmHTto2bWL5h1OSiWEJUFLnWJGtt+3RRN9lX0VDcgyhAfqiA0N6PmE/oBaaZs62e7uyuZKSptKya7KJiowyouBKaiLFWpRR01LDX+W/kmjyUkTrPSjQNJC+R6o2u/uMPseVfuUVzVZ7QbM5eUU3XYbOScsdEt17LCffmToZ58SkJLihug8TOQQCI4GYx389pbr4617Vnkdf5FSAOFDhIGq5xBJW4HXUVfxu33g271UeXWhyhYg8b77SHrkEYIOO8ylcdyBp3VtQVlttDY00Pj7710fJElt1bab/wctbkoiF2yFFyfCF5cpFWDtkGWZwJGj0MXFEXGC54yGPtn9Cfeuv5fvslcCMCgyiBCD8Fvs61R//TXZ8+ZT+uyzvg5F4CTjBw1FtuqQJTNF9UW9H8ggFle6os5UR5A+CI2koahcae8cGSXDrw8BEHbh7WT8upykBx/wSjxCIsHzqN9b9XstELgTVdfWXqWt+rC9r2YfMVdewaDFzxMyfZpzF9BoYHir2azQKrfL3lLlWSAjPtSnnhtuo66kNbknweCJdg9RP9eSQ5MJ1Hm/hVuVS8tqrbT917f/4vwfzmd31W7nBgqKhJTWxHTOCjdG2Ee56Du4/g9Im+m2IS01NQRPnEjgoWPRhrluGN2v/oY0Gph2vbK98WUwt/R+rMKtio6tRgdTrnJPfC4gKm09h8hmCLzO0IihrC1c2/UDn9nYphM0wrWkbdDYMQSNdVJk3kPYdG09WGkbNns2qe+8Q/DECd0fOPIEGHO6ojur74U78MFYzIppnGwFSdNJy0qSJOJvupG4G65H0nhurUi9WVhalBbiYf6iE+bnSFotprw8Gjdv8XUoAicZGhuGbIpF1taSXXGAlHAXqxyaqpRXH1cL9CXCA8JZ/q/l1Lc0MPGhNQAcVvEDNFZA7HCkiZei1+rRJyd7NA5zWRloNByTegyZkZmiksJFZIuF5u3bMYwa1Ukm5v5p97O3ei8jowaAo7bA64xKUoxqdhd3rrRV/64L6wsJOHkaBm0vJYxGHA+/vwO7f/C6v0F/QNWz9ZtusOZqGHqkkoAKjLB7iFqs46uETma7SltZlkkLT+NAwwH21ezj8PjDnRts7n2K4V6i74uCvIILXa/2MAwbxpD33vWZxrHPGfsvZeG9rgi2fQpHnN+7cTa1dsyOOR0ifV9lrP6Np0e49/dFIJK2Ah/QvvXKLsXbFVfO0ERIPsJ7gXkYdWK214NJWykggJDJDojqa7RwejcSCs6y5XUo3qZM1OY91HV8HkzYGi1GCusLAairiwJqhAlZPyFkxgwGv/IywRPsV2cI+i4GnZbYmlvYX2HBMNvFSdov98P6xXD0PTDjRneE51dUN0o0mSwEaDVEzroGElMgKFqRl/AwJY8+SuU77xJ73bWcec01Hr/eQMC4bx/7zjobTVgYwzdt7HB/HJ8wnvEJ430YncCfUeUR9lc20tBi7tCRFBMYQ6g+lHpTPXm1eWRGZfbuIumzlKKA2gJlfpg0QJJbDqI+C/hNcUHcCLjwW8UUqQvUwgpfLfgNiQlGr5VoNFooqmkmLSKNDQc29E7qJ9W+bq/fYbUoz4wewl0VsuaqKopuux1jfh7DfvjBo8+bbkEXAFOvhp/uhvXPw7hzlQpcZzn+CUgcA8PmuD9GJ6kz1tn8ioaED/FxNP5HH/+NFvgj7Vuv7DJ4PNyWDWe+37sPsFaad+6k7tdfMR3wvti9PdK9II/QHlmWkS1ecGKvPWBr02Xu/RAa12G3saCAlqwsj4eRX5ePRbYQog8hv1xJYviNI6+fo4uKImz2bLShIb4ORdALMuJiANjr6mdb+CBFy3y3aKe1h6rDNzQ2BJ1OC4ecgiV+AsUPPkTVZ595tGIlYGg6SBLmYuEG7y7MpaVoo6MxDB/e9x8wBX5FTKiBuDADstzZlEmSpLZ5eu0+WrKyqP7iS0wlThrX6oNg2NHKtpBI6IQtaetkpW3Z4hcoe+kl+/teeomyxS+4HJtLdJOEs8m++KjSVq/VMDRWmWfuKamz/Z6rOpyCg2iuhcfT4aNzwOik7m83yBaL259PtWFhNG7Zgml/Hqb8fLeO7TGOuBAMEYo28p5lvRsjMBymXQcJo90bWy+wylauGXcNZ444k9AA8fztbsQsUeB11Jt1UUMRLZYudFyCoyHFtaq76q++ouDqa6h8512XxnEXqjxCaV0Ldc2eNVyqWbKEnIULqf7s8+4PbK6FVU/Auyd1uzreLT/eqYipD5oAR1zUaXflm2+Rs/BESp95tnfjO0h7HZ2cVq0wUWkrEHget0m/qHI4+Zuh3skEgR9z99q7ufTHS1m9fxPDpELGxLTta9mzm6oPPqD8hRc9qukWfsIChm9Yb9PNVQ1czFZzD2cKuiJk6lQy160l5dVXOry/pXgLX2V9JYzeBB5Frba1J5HQviPuwD33cuCuu2jcvMn5i6if6WIhrgNWq2xL2jpdXKDVUP784k6J27KXXqL8+cWg9cGjvdkIDRU9HtYXDBZViYTsknrb73mXRUQ9UbAVvroKVj/hnuD6GjkrFdmLsl0Q4AYpvVaafv+dPZMmU3j77T0f7CCSTkfyY4+R9vFH6JKS3DauRwkMh4mXKNv5Tn6+mo29f273EBGGCK487ErunnK3r0PxS0TSVuB1YoNiCdWHYpWt5NXmddzpxg8gXVwchtGjMIwc4bYxXSE8UE9cmKIN5ulqW3N5BcbsvdR+38NEWbbA2meUG/Pe5c5fKPsX+PsrRcf2hKftVkZbjS2g0xE80bOt7+pK+eDQIRRWNwGi0rY/YalvoPKDDzhw//2+DkXgJHGRTQSlvMX3Ff92baCIQa0ttDLs+dEtsfkDv5f+zubizRRU1vKi/nkezL8A9m8AQBsZRfTFFxNxyskejUEbGoo2MhJQqinmfjaX83843yZJI+gdkiShDe14n/pm7zfcu/5elu3rZeWNQOAAI21mZHaStqrZYM0+gidNInjSJDQhveiEGX4sHHEBHH13n0sw+JKimiaaTVb0WomUqCCnzo27+mpir7+O8ucXU/rc8zRt32FL2MZefx1xV1/toai7oWAzPJEOb5/Q5SEmi8l2v/ClwaJazNG+0ragrgCTtRfFNDX58NeHiiapP5L9s/KaeYxbh23880+sDQ3IzS4YcNkh/Nj5BI0bhyYgwK3jepQp18BV6+GY/zh33uon4LVZsHcAGOEJAKFpK/ABkiSRFp7Gjood7Kvd11Eva9XjSvJw+g0wcoFL14m97DJiL7vMxWjdS3psCGV1Lewtq+ewlEiPXSfixIVowkIJP+647g8MioLxF8HGF2Hts5Ax17kLWa2K9vCYU7vUK0t+6CHib74ZbYR9YwJ3oa7gh2kVM57YUAORwf3oxj3gkSl55FGwWIi97DL0gwb5OiCBgwyPj0MXuptGoN5Y71pb1IgFcOAvpTKrt8YMfsbjRz5OTk0OuUvWMVKTj5EIRT8QMKQPJeEO91WrOIJG0pAWkUZVcxVVzVVCu8zNDI8azpSkKRwSc4ivQxH4MSMTFTOynQdqO+1rL48Qf/Mjvb9ISAycuLj35/spqpRQWkwIul5UxqqJ2fLnF1Px8ssAvkvYAuRtVF6DY7o8RJUwC9YFExcU1+VxnmZ4OzOy+OCxBOmCaDI3UVBX4HwFcPpskLRKe3vVfojyo3uhLLeZgme4N2kbc8klhM6c6dYx+y2hcZ1kBXukpR42v6ZUQbd0/vz2Ff9U/EOoPpTk0GR0GpFidDei0lbgEzKjFOdpq2ztuGPnt0qLQHPf+RByJ6rhgKcrbXVxcUSdcQbasLCeD55yFWh0sG8NFG517kLD58G1W+Cou7qPJzoaSes5IXto08qSTPGAkEbob2hDQ4k6+2zibrwRydBLt2qBTxiblEBT0ek07r8c2eriRG1kazvt3hVu1VDrz4yJHcPC5CM5v/YDACon3qxICHmZlpxcCm+5lYLrrufDBR/yy79+YVz8OK/H4Q80795D7hlnUvrU0532nT/6fP43738cOfhIH0QmGCiMTGqrtD1YD7u9PMKAdXf3IHtLeymN0I64q69u04+VJN8lbKGttbsbc6720gielPLpCfXZILu0HgnJNYmEoEhImaxsq1Wp/kLpP1BXBLogSJvu1qElrZbAkSMJHDnSreNaW1qoX7WKynffc+u4XqOmUPnXE3+8ryRso9NhZNfV7d7mrrV3seCrBWw8sNHXofglImkr8An/mfYfvjn5G+anzW97s2o/lGxXWu2Hz+/65H5MeqsA/l5XtR/dSWQKjDld2V73vPPnB4aDofPE01Jfj6XWO8l3WZZtE8LGBiWZIaQR+h+Jd99F7JVXoIuN9XUoAieIDgkgxDgVS2M6BVVG1wZLGAMRqWBughzR9qXSuPwxoqkj25pM5JFXAiBbrRj37UO2Wns42z1Ieh21S5dSt3IlWg/rsvs7Tdv+onnbNpq2b/d1KIIBSkZ8KFqNRE2TiZLajm3KqeGpSEjUm+ptbuCy1Yps6sXfvSxD/hb45T/QXOOO0Ps92b00IWtP2UsvgSwj6fUgy12ak3kcq7UtaasmMO2gSpj5UhoBYEhMCDqNRH2LmQM1zW0LFL01I8ts7VBUq1L9hazWJPTQmYqpYD9Abmkh/4orKXnkESzV1b4Oxzk2/w+eOxRW9tDZYDHBhlazwWnXgcazBVGOIssygdpADFqDz4wG/R2RtBX4BLurrKpzYupUl6uIan/6iew5cyl59FGXxnE33qq0Valfs4b8a66lYcOG7g+cfr3yuvMbqNjb/bGyDJ9fAn9+2K1GWfWnn5E180jKXnzRyaidp7K5kjpjHRISpZVK9UhmgkjaCgTeQJIkhsW5aUFKkmDmzbDgaRg8yQ3R9W/WF63n462LKdz+FgCvBV1KYGAgAKaiIvYeexxZU6e53YnZHvrBg4m/9RZS33gdqT9pxvVBQmfNYtDTTxF90YUd3m80NVJv7EOLugK/xaDT2goJdhZ3XGA3aA3cN/U+Xp77MiH6EIoffIg9EydRu6wXOsuSBEuuhrVPtyWCBjhqpe2w+F7oBEMHDduR27fZNG59krgt26Uk4/UhkHhol4fZKm3DfWdCBhCg0zC09fc+q7S+g35zr1ClA3JXg9m9Gq0+Rf1bdbM0Qt3KlZQ+/QxNf/7p1nEBtOHhhEyfTviCBVibmtw+vkdJOgysZvjrE6g90PVxf3+taCmHxMFhZ3stvJ6QJImPTviIzeduZlCokLfzBCJpK/Apsiy3tV7tWqq8qm6zLmDMycFUWIilum+t6g+LVRKJuRUNWKyebzmrX7GC+uXLqf7iy+4PTDhEuTHLVtjQQ5J1++ew4wv47iaoKejysKY//0RuaUEX4/mqSVUaITk0mZxSZdKU4UIFg8B3yCYTjVu3Yiop9XUoAidIjjGii9jCj/uWuj7YhIth4qXO63z5IUtzlvLwjtdYGahnpeUwKpNn2faZCgqRDAZ0yckel58BZVIes2gRIZMmUdJSziU/XsJp35zm8ev6I/r4eMKPP56w2bM7vL9s3zKmfjSVW1fd6pvABAOKkUmKru2uA53NyE4bfhozBs0gSBcEkoS1oYHmv//p3YXUef3uHsxxBwiqpm1vKm3VhG3YcccRfe65gCKVEHXRRb5J3Oa3tkIPHg/aruWR1Hm6ryttoa2oI6udGZmaVHaaxLGKt4epAfavd1eIvmf4PKWIKtNJr5MeqPthGRWvvUbdypVuHVcl9Y3XGfTUk+iTkjwyvsdImaR8v60m2PSy/WNkGdY9p2xPvqJPVkBrJI1P5U/8GaESLPAJVtnK+d+fT05NDt+e8i2x6GD/OmXnSNeTtlFnn03whAm9c7v1IIOiggjQaTCarRRWNZEaE+zR60WefjpSYBCRjjiLz7gRQuNhUjfmbU3V8GOrQ/zMWxVphS4Y9NyzNO/4m4A0zwvzq5Ot1LA0fqlUdDAzRKVtv6Tghhup//VXEu78P6IvvLDnEwR9gtDwUoKSv2BL9WCgbxlA9mdyq3MASDVbech8LnPj23TKQ6ZMZsTW37BUVXk9rvCAcLYUbwGgpqWGCINnjSYHCuq9LCawa0MfgcBdjEwM49u/YFdx91JW0eefR+S//oVhWHovL7QA1j2rVO+ZjaAbuJX6NY0myuuV4oJeySNYrESefTbVH31Ew5o1DF+/jrKXX6bqnXcInjQJLN6Ry7GRp0ojdK1nC3DX5LvIrs7m8PjDvRBU92TGhwHFZJXUM310GtCWVHYaSVISm4W/+1el7fQblH9uJvToo0GjIXTGDLeP3e+ZfiPkbYDf3oKZt0DgQfOqfWsUGUl9CEy41CchCnyHSNoKfIJG0lDZXEm9qZ7cmlxiy/YrbQFxIxVhbRfRRkQQPGGCGyJ1L1qNxNCYEHaX1LG3vN7jSdvA0aMJHD3asYPTZij/uuPXB6GhFGIy2yQVukCSJILGjnEwUtcwaA2MjB5JQmA6VhnCA3XEhQozq/5I8PjxNP3+O1aji9qoAq9yaMJwlpZCg7UYi9WC1lWdrboSRa5FHwSHn+eeIPsZsiyzr3Y/AJ+E3092RTJXHqTVLel06OK8V5EsyzLN27fTtHkzqYZY8qzl7Kvdx2Fxh3kthv6OsaCApt9/J+iwwwgY0nFRU23R7QvVaAL/Z2Sisgi0u7hzpW1FUwWbDmzCIltYOGyhaxcaNAFC4pX54/51MOwo18brx6h6tkkRgYQYnH8Mj7vuWho2bqTpty3oU1KRAgIIHD4cZBldQgJx113r7pC7Z9QJoNVDxpxuDxsdM5rRMQ4+j3gYtdJ2T2kdQ8KVJHKzuZk6Yx1hAQ4YOB/MCc8q3wNBj4TPn0f4/Hkev461uRlNq5RUvyFznpIHKdsFW9/unDQfMgPO+ghqC31iRtsdj21+jM3Fm1k0dhHHDT3O1+H4JUIeQeAzHpn5CF+f9DXj4sZBWKLigKgaYvkxqoaVqmnVbyjcClveULYXPAU6+0lR2WLxmjGOysJhC/ls4WdMilCSO5kJYaI9o58Sdd65ZK5fR+xlolqzPzF+UDqyVQeSmaL6ItcH3L8Wvr8V1j7r+lj9lIrmCupMilb39vLBQJvzta+QJInCW2+j9MmnmFYeBbjQVjpAqV+9mqLb76D44Yc77bO1EAsjD4EXUOURskvrMZo7zttyanK4Y80dvPxXF626zqDRwIhjle0BLpGw1w0mZCFTppD+7bcMevopAMLmziX9+6UMeuJxt8ToFKMWwkkvQGr3lbZ9iczWjpXsknqCdEH8fPrPbDp3U+8StuBfCVtZVnRTGyt9HUmvMFdWkj1vPnsmT+mdcaIv0WhgWmtB1MaXO1duazRKN3J3HbE+YmflTvZU7cFsNfs6FL9FJG0FPuPw+MMZFjkMvVavVHie9QHMus3lcS3V1VS89Tb1a9a4IUr3k96qa5tT7h0zMoCW7GxKnngC4/79PR9cugu+vEJxslSxWhQNW2QYewakz+ry9Lrly9l7zDwq333P9cCdJKs1ES70bPsvGoMBSSNuTf2NoTFhyCZFv/r34j2uD5hxDGj0UJEF5Vmuj9ffaK4h9ztl8p4UMojyOiWhoppZWpuayLv8ckqfesrrDyZhc+YQevTRxEQrZhO9NnAZoGjDwgkaN47gI8Z3eN9kNVFQp+jEqzqLAoEnSY4IJCxQh9kqdzKRHBoxlCPij2BK0hRkWaZxyxZKn3m2Z2PbrlB1bXd9362Jrb/TlrR1Xb5NrSSU9HoM6a53KXqKP0v/5KNdH7GzYqevQwFgaGwIWo1EXYuZktoWEkMS0UhumHeamqCm0PVxfEnpP/DZhfDsWEXKxI20ZGVhLi9365gHo42MxFJZidzS4tgzb19j7L8gLEkx9yve3va+pW8nQ9V5oJi7eA7xZCzwO5p376H0sccofvAhX4dil/Q471faljz+OJVvvEn1V1/1fHDeetj2sSJ2bmlNBuxbAwf+AkMEzO9cHdSe2u9/wFRYiLm0xA2R94xVtmKxKs7p6vc0U+jZ+gWyxeLrEAQOEqDTEEgiAH8VuyHJGhgOQ2cq27vcYG7W31j9JPvyVgIQa1CSo0kRgYS2ttO2ZGfTsHoN1V9+haT3bpVPwh23k/LSi4RNUiqreq0FOECJWHgCaR9/ROyVV3R4P78uH7NsJkgXREJwgo+iEwwkJEmySSQcrGsbGxTLO8e9w71T70WSJGp/+pmKV1+lbsWK3l0sfTbog8HcBLVu6Mbop6jz1Ixedk1YamraDJztYG1uxlzppSrJ/esVLVdL9wuHv+z/hUc2PcKSvUu8E1cPBOg0pLXK0+0p6SwN0it2fAGPpcHSW9wznq/I+ll5TZ3qdu3pA3ffQ9aMmdT++JNbx22PpNGQ+tZbZG5YjyEjw2PX8Ri6ADjjXbjpbxjcKvNYmQPPjIZVj/fJBa86Yx0VzRWA6BLyJCJpK/AZ1c3VvLnjTZ5efpPygeQmNMFBhM2f32dFztWWKG9W2kaefjqhc+cQPHFizwfXFCgT65p8pUUGlMn2JT8qmlWqREIXJD/6CMlPPEHkGWe4HLcj5NbkMumDSZz//flklSqTr2E+biEWuEbTtm3k/usM8i6+xNehCJwg1qC08O9x1+e5zXH8B/eM11+o2AsbX2ZfazJWTYa3f8jXJyaSeP99xF5xuU9CBBga7qLrtqADNj3b8DQh7yPwGiMTFYmEXXZ0bdsTOmM6kf86nZApvWyD1wfB5Svh1iyIGNS7MfyAvWXK3L+38giFN99C9tFzaFi/vtO+mm+/I3vWbMqeedaVEB3np7vhf0fBji+7PSwjKoPZKbP7lPb58ARlsSKrtJ5tZdu4eeXNPLb5sd4PGJMJ5mbIXd2/Dcmyf1FeM49x67Cy1YpsNoMkEXiIZ7WNg8aOQRcV5dFreJTs5bDl9bb/b3gR6ksgfzOsfgJWPOq72Oygzl3iguIIDRDP355CGJEJfIZZNvPM1mfQyDLXrnmTgAuWKMlBFwkaO5bBzz3r8jieQq20LatrobbZRHig56ukwufNI3yeg8LvukAwNSrb656Dsacr7qi5q+HvL+Gou7o9XRMURMTCE1yM2HH21ezDaDXSYjGS25oI97Xuo8A1tJGRNG/fDno91qYmNEFBvg7p/9k77/Aoqu6Pf2Zreu8Q0gNIkSqC0lFAVEBUVBRBxYZieX2tr4j+RGxYQbCi2HtXikgRBASktxRCGqT3tnV+f0x2QyA925LM53n22c3OnXtPNpuZe88953tkWkCUdzSnyuFUpY1S0npOlnRtM3dCRT54Oa7gllNZvxDMBtJ8w4AqzDrp9z7TaasKDsb/uuucZKBElCYU7yqRDEUGRrMRlUKeUjaHaDCAStWgU9aqZysXIZNxIL3CayNtTzfstNWZdFQbqvEbPRqv0Y1LY7WI4J7tO7+DozOayCiS5tdtCS4w6/VUHzyIuawMVVj4OcfVYaGYSkup3rcX0WRCULazIGhT6KukDDyAHsOabDotfhrT4qfZz5Y2kBDixe9Acm45vWMrWJ++nmifaB7hkbZ1GNYPvMKgIkeKQO6IxfZqyiCjVv4kfoJNuxYUCmK+/QZTWRkK7zZqB3cVFErYWJvV2n0I7P1Eeu0VIr3fzDrc0aSVSRv38tzFvsiRtjJOI9AtEC+VO2ZBIMPdGyKbvul3Frzd1IR4S0W8TuQ7Ltq2xYx+GC5+QHqdexB2vy+lZFhuFKMfdq59ZzEmcgy/XfUbt5/3KAaTiIdGSYSv7OTryKgjI+n2ylLi/1gvO2w7EL2D4gAoNthI0823O4SfD4iQtMY2fbo6JzbDsV9AUHLSQ1rYlJVLESNtTae1B4Xvf0DZuGlc+7cCo9lIdkUH1/FzEEWffkry8BHkL19+zjFLxLIlgllGxhHURdqWnXPs06OfMvSToby0+yXbDmo221wvsyOQUViFySzirVVZ1wGtQaHRkLBlMz0+eB9t7LnXCfchQ+jxwfvE/PCDfR22AKf+BbNRclT6Rdl3LDuQcEakba+AXvx3yH959IJH296hINQ5Oi3Rqh2NtM3S3zQgDgLj7DKE0sfH7pkkpopKilZ/TM6zTcv5uSyjH4Yxj0vr7o+nSxHc3uGw71OXXIefmSUkYz9kp62M0xAEgWiF5JA52X2AlDplAzpCtUhLtO2JfMfp2gKYyssp/vprTBXNjDthEXS/QHr9639a5LCtOXqUjFtuoex3x6YyKxVKIr0jMVZLUQdxwV4oFHJqaUdGEAR8LrsMdais69iRGByRCIBRKKVCb6NrW88poFBJci2dHbMJ1j4OgH7IXLKr8wE4XSAtLi0Vr0VRpOKvrRhy85rUNrQn6m4RYDASWyo5HuRiZC2j5vARTCUlDTpU5EIeMs6gZ62mbW6ZjuLK+o7UEI8QRMR6EijGggIMuXltH3D7W5I+478ftb2PDkpKrZ5tbIhXmx1XCjc3PEeMaPCYIAh4jhhhf4ctQMYO6bnHhZLDshGqDFUUVBc47V7VGJbaF0m55fhr/ZndZzYXdbuonZ3WOm0turAdDYvdNpZGcDSCUkHu889T/MknGPPznW1O2xjzCIT1r/u5/LRLOmyhLktInrvYF9lpK+NUYqqkdKy0INvs0pp1Oo4NGkzKpRMxldtIXN4OWLSszq7Wa28ybp5DzpMLKV+7tvnGV39Q91qpafZGUfL991T+vd2uAvNNYfksZWkEGRnn0C88DLOxNtW2MNU2nV4wDx4+AWMft01/rsyBLyH3ELj5kjHoBsyiGQ+VJ6cKJQkdS6StMTeXzHnzSBk3DlHvnGg1z4tHErd+HVseHAXIxchaSvjiZ4n++mt8r7zynGOyPIKMM/DSqogMkIImzta1tUROnSw9iSiK5L3+OskXj6Tw/ffO7qblmA2SA6ILFpi0zFPjagM37Ikois0HaLSHzJ3Sc4+mNY63n97O2K/Gcsta16pREBPkiVIhUF5jJK/cRhq0sWNBUELBcSjJsE2fjkIU6yKE422vZ3ti2nSyH/wPxuJim/bdEAp3d/xmXkvQ3Xc1uaHg8ly9qu51C9bhzsKyqSdH2toX2Wkr4zxKMoguk3bATmptE2WrP5kOBgOmkhIUXq7rvIu1FCNzsDyC96RJaOLjWpZyvv9z6VmpAZNekkhogoDZNxN07z3433C9DSxtGaIosnDbQlbsW8HRHCnyQy5C1nko+/13Tj/5pOMqIcu0C39PDQpjCAC7Tx23TaceAeDma5u+XJ0+02Hs/2DCIk7qpYVNuEcPQCDQU0OAp1TJ2VRcjCYuDm18PApt61NsbYHSyxNNZCQxfrGAXIyspSg0Gtz79UUdEVHv/eKaYkp0JQD08O7hBMtkujKNSST08OmBgEC5QaoOro2OBkHAVNQOx0vPKdLzya1QU9r2fjog7SlCVvnPP2TcehulP/7YbNuqvXtJm34Vpx76b6vHaRFmc53TthlpO8u9IdTTtTKntColUYEeACTnVpBVnsWatDXsy9vX9k7d/eo+j44WbSsIcOt6uOINiG5nxPFZ6E+cQHfsGOV//onSQWvz8KeeInjBAlRBQQ4Zzy4cri3w18J1uDMwmU1klEkbFHKkrX2Rq0bIOI/jvxNdK2WQVnnKJl1qExOI37IZY26eS1dftsgjODrSNnDuHALn3db8Z3O2hq3lZ2h0p0/TvRvB8+fb2OKmKdYV833K9wgIdC97FZAjbTsTBStWoktKwnPECHwmT3a2OTItwFfZjVJSOZyfYvvOdRWg7cT/32p3GC0tsjVZWxgQPACtWXLgnbkZ5da7N3G//oJoMjnFzDOxRFbITtv2YYmyDfcMx0Pt4VxjZLocvcO8WX8k95xiZFqllm5e3ciqyCKtNI3Bl1xC4vgJKL3aESkaFA9BiVCQJDm2+l3dTus7DpY5f1v0ycvX/0Hltm2oQkPxnTq1ybZKPz90x46hz8jAVFqK0tfGG58FxyWHu9pDKsDVBK6sd5kQ4sWJ/EqScss5UPk7K/avYHr8dAaEDGh7pxfcJhVRjRtnMzsdhm83GHyzzbtVR0bS48NVGE6dRlDbv/h2p6AN63BncKryFHqzHo1CQ7jnucURZWyH7LSVcR7J64gxGIG61Kv2OloFQUAdEoI6JMQWFtqN+Npd9pMFUlECpYM0WFt0s2yo6Jjl2cVuGBZHQbhnOCdSpDRhVyrWI9M+fK+ajjEnF01srLNNkWkh3T3jKCpNpbrGzXadlmTCV7OhNAv+c0yqrNuZqC4GrU+932tU91GM6j6Kl9Ye4w9SG7yuOUS3sAkMuXn0XPUXb5+MQfni3U61pSNQtnYduuRkvMaOwb1Pn3rH+gf15/erfqe4xv6pozIyZ9PTEmmbe66sWLRvNFkVWZwsO8nQsKE2GvAyyWl7/Pcu47QVRZHUPIs8QuvnqQGzbkAdFor7+ec321YbE0O3V1/Bc/hw2ztsAfxj4OZfoCwblE2vK1xZ9iUx1Ju1h3NJzqtgVDcpSrDdUj99Z7TfsE6GQqvF88KmZTTsgajXo8/KQtvR1hAdcB3ew6cHys42N3cxZKetjPO4djU9ktch7H7KmnoV5N6B0xhaQYSfOxqVAr3RTFZxFVGB9te3OhPRbKb6339xHzQIQXGWSorZ1LDYueVnc/3oLmNREQXLluE7fTru/Zrecbc1lh38cI8eHDeY0SgV9AiQo5Q6C4Fz5jjbBJlWMjZiKjv298TdN6L5xi3FOwwKU0FXClm7mtXQ63D8eA8Up8PUNyFiYL1DlsI1rphBoNBqMHzzE/6iSLxSTotrjrJff6V83ToU7m7nOG2VCiXdvbvT3bu7k6yT6cr0Cpe0yJNyys8JJIjxjWFr9lbbRtP3vAy2vSZF2poMzTr+OgM5ZTVU6k2oFII1Lb81aKKjCbz11ha3t2t2ktoNYkY220wU64rYxfi43j3CshmanFvO7DP0m7scNWXwzS0QPx4uuL1TbIwbi4pIHj0GzGZ67tmNws2GgQT2ppXrcGciF1B1HLLTVsZ5aDzR9plOt2PvWFOv2uu0LXzvPQStGz6TJ7m0jo1SIRAb5MmxnHJO5Fc61GkriiJpV1+N7shRenz4IZ4XnqVHNfaxxk9uYGev7JdfKP7sc6oPHCTmm69tbG3TWCaDXgopJSMmyBOVUpbqlpFxFnV63TaUflGqpWrGh76B4791Lqdt2hY49otUvEQlLSpEUcRgNqBRakjOq59Oa9brSZs2HW18PBHPL0Hh4bxNKqWfH8H3348mJhqlt7fT7OgoeI8fh8LdDY+hNopWlJGxEdGBnmhVCqoNJjKKqogJqpuTRp/lzKrcsYPiL7/ErVdvgu64vW0Ddh8CnsFQmS9p28aNbedv4Pqk5kl6tj0CPVA7eJ4qGo0IKscv+Yt1xZTpyxAQiPKxTcFpW5IQIt23kvMq6OEtRTAX64opqSnBz82v7R1XFkLyWum+fv5MG1hqZ9I2Q8p6KDoBF95l066NxcWU/vAjHoMH4d6/v037bgqlvz9KT09EkwnDqdNoYzuQU7GV63BnMjZyLL5aX4I9gp1tSqdH9m7IOB1Lykx7U1JEUaRgxUpyFy/GVFLSbrvsjbN0bQVBwL1vPxSenhiys9vdn3v//vhccQV+M6+1gXWtw/KdEYxSgYP4UNeLRpNpH6IooktNRZdiB41UGZtjqYp9Ir8cvdFgu457XSY9H/vNdn06G7MJ1jwuvR5yC4T0BqCwppChnw7l8u8uJ71QSle2LC71qanoT5ygcudOhJYUlLQzQXfcTvGFPVmbs4ljRcecbY5L4zt1KhEvvNDgwvWFf17grX1vUVBd4ATLZLo6SoVAYqh0jTl2un4xMksElWW+ZczLo/z3NVRs3tz2ARVKGDwXLpwPvl0jujwlT7qWx7dBGqHw/fep3LET0Whs1Xn6rGyy7ruf9Fk3Iopiq8dtkIp8+P0RONJ8QbQzJczcVK4X6Rgb7IlCgNJqA5U6JWGeYYANJBJOboEf7oK/lrbfSEdgKZqWcInNu67avZu8F17g9BP/s3nfTSEIArG//kLiPzs7lsO2gxHpE8nU+KmMiBjhbFM6PXKkrYzjMerhoysgZhRc/ADRPtFszd7a7pQU0WDA/4br0aWloe7h+tWXLZpWlmqyjiT4vgWEPvYoChss+t0HDKDbgAHtN6oNWCZWVZX+gGumEMu0j8L33iN/6Sv4XHYZ3V7pIBPgLkxkgAce3T9G4XmM31O1TO053jYdx08AhRoKk6EgGYISbNOvM9n7MeQeBDdfGPu49e30snTMopkaowGTWYGXVkWojxYATVQUPT54H2NxscsU2/z4yMd8efxLbu17K70CejnbnA6HwWzgi2NfYBSNXJVwlbPNkemi9Arz5mB2Kcdyypncr66gjMVpm12Rjd6kx2PIEIIffLBF2qpNMu6J9p3fwbDM9eNaOU815OSQ99LLIAgkbP0LVWBgi89VeLhTsXEjol6PLjkZt8TEVo3dIBnbYedKSPsLzmu6IJqrp067qZVEBXqSVlBJcm4F0T7R5FTmkFaa1r5iZLFjpSjbguOS9JG/60UZWxFFSPlDeh1ve6et0tsHr7Fj0cbH2bzv5mjN/4qMjKsjO21lHE/6VsjcIaVhjHnMejNvr16WQqMh5D//sYWFDiHWGpHm2Ehb6Bw3MoPJQFZ5FgB5RVKhBbkIWefDY9AgBI3G2WbItBC1UoGHRo1OYWJfTpLtnLZuvhB9MZzYKEkkBN1nm36dRU0pbPg/6fWYx8AjwHpoUMgg/rzmT346mMT//VtCXIiX1UGr8PDAc4RrRTQMMITjltqNbj0dq83ekdBnZaH09m6wKJDRbGTBoAVklmcS6hHqBOtkZKBXeG0xspz6kbaBboF4qb2oMFSQUZZBfEQ8QbfPc4aJHRpLVl1ri5CJBgO+V12FqbS01XN3VUAAYU8/jdt559nGYQuQuVN67jGs6Xa4dhEyCwkhXrVO23JifGPYcXoHaWXt1G9294PICyQHd8p6GHqbTWy1C3lHpYJyKjeIvsjm3XteOOxcGT6ZTkGVoYqfUn8ixjeGC8IucJlAgs6KLI8g43gs6a09J4FCQa+AXoyIGNG+Xc0OiDMjbc/EWFTUpvNEg4Gijz/BWFhoY4taRmZ5JibRhIfKg5O5kmi+JYVYpvPgPmAAibv+kaNsOxC9tDOpSHmYSKWNozZ6TZGeO4NEwl9LoaoAAhPOWdAJgkCwRzCVFSGA62cQ9HlzHVO+SufS3BBnm+Ky5C5+jqRhF1LyzTfnHHNXuTO371wWDl8oL3pknEbvsFp5hJzyeu8LglAXXNFeZ9bZGPWQ+mddenYnxuK0bW1wgSYykojnFhO5fFmbxvWbPg23njZy2AJk7JCeI5vXlnflImQWEmpl1ZLyKs7Rb24X8ROk5+Q/2t+XPUmp/d+LGQVq50su2RJTWRk5zy4m47Z5tpMHkbGSWpLK4p2Leeyvx+S5iwOQnbYyjkUU4fjv0uue0gK8f3B/3r7kbW7v38aCBrUYi4tbrffkTCyFHgoqdJRW21D7sYWIRiMZt80j+eKR6LOyWn1+xV9/kbt4MWkzrkY0m+1gYdNYFg/dvaIorzGhECA6yHlFeWTsg6BUotBqnW2GTCvoGxKPaAjgZGGNbTvuORnOmwYXdPAoL7MZsv+VXk9c3Gjl9LOLkAEUf/Ellf/8g2hw/D2jMbwuvgj3wYNReMqRto1hKi0FQBPr+BRRGZmW0LPWaZteWEWlrv5c+mxnlrm6mqp//6Vq1672DXrwa/h4Omx4pn39uDhlNQZyy3RAXZadM2i340pfBaf3S687TaSt9L1Pya2wWean1HHtpnXaFjDq2t+fvbBsmNhBGsFUUYG5xsbzwFagcHOj+Msvqdy6FUP2KafZ0VlRKpSM6T6G4RHDnW1Kl0CWR5BxLDkHoCwL1B4QO9qmXZ96+BEqd+wg4vkl+E6ZYtO+7YG3m5oQby155TpO5FcwsIe/Q8cXVCrJeWA2U7VjB5qrr27d+Wo1bv364TFkCILC8fs/lkmVn7obYKl+rHS4HTKOQzSZEJTy39jVsVuRRd/ucO1Htu3TGSgUcPPPUsXmmHPvg89sfwYvjRfH8xMBwRppaywoIGfRIhAEeu7ZjaBu2NnraIIXLCAY0Jl0GEwG1I04obsy0Z99iqmkpEHH9uHCw2gUGqJ8otAoZSkYGecQ6KUl2FtLfrmOpNzyenPSef3ncWu/W4n0jgSgfP16Tj38CO6DBhH92adtHzRxIiBIa4PSrE5blOxEbUZdiLcWH7eWXx/1GRkIWjfUoe3LYjBVVFD47ntUbt1K9JdfSPP/tnDqXzAbwCsM/JrWaT1Twszi9HdF6iJty4n26QdAVnkWBrMBtaId97Kw/uAVChW5kP43xI21hbm2xWyWomuVGkiYYPPuiz/7nPw33yRwzhxC/vOgzftvDkGjIeT++1AGBqL0kTMxbc15gefx5vg3nW1Gl0GOtJVxLJa01rhx56RhlOnLKNWVtrlrw+lTYDCgCg5uj4UOxSKRcMJJEgkhjzxM3Pp1+LXSYQvgNXIkMV9/5ZQbMdRFfKhM0mS2tcUdZDoOppISMubdTvKo0Yh6vbPNkWmGmCAPNEHrOWJcToXe8ZrdHQJBgNgx0vMZ6E16vk3+llWHVpFeKN0XLJG25qoqvMaNw+PCYSg8XCurYP6G+Vzw6QX8fepvZ5visij9/Bp0tD+/83mu+ukq/sz40wlWycjU0asRiYQY3xji/OKsmwpuffqgCglBHRbWvgE9gyCyNmLTkoXXCUnNa5uebf4bb5IyejRFH7Vvs1JQqyn58ktqDh+mYstfbe/IIo3QY9g5966zOVPCLMTDdaVz4oK9UAhQUmVAYfbDXeWOUTSSXZ7dvo4FoS56NfdQ+w21BwoFzPoaHjkJAbE2716XkgwGA8rAgOYb24nAW2/Fb9o0lD4+TrNBRsYWyE5bGcdy/Ffpuedl9d5+dsezXPT5RXx1/Ks2dx3700/E/7mh/RVtHYi1GFmBcxwbbj17oomMbFcfbd6xbycWeQR9dRDg+rqPMm1H4eNDzeHDmAoLqT540NnmyDRDQog3av+dGN33caww1badiyLkH4etr0qpmh0Jswn+XgY1ZY02ySjLwCya8VB5otN5olEp6O4vOWg1PXoQ+dZyolatcpTFLcZd5Y5oMpGRfdTZpnQ4OkIKsUzXoLelGNnpxq9RAJrYWBK2bLaN1nyv2vXAsV/b35eLktJGPVuLrIpbv37tGl+h1RLy0H/o9vrreI0a2faOCpKk5xbo2cb4xrDx2o18MOkDl9a7dFMr6REg3WNT8yutUcE2kUgY8wg8lAIj7m1/X/ZEYx/JjogXXiDuj/X4XnmlXfqXcS6lulJZK9iByE5bGcdhMkBIH/AIrE2JqiPYXYqOza/Ob3P3gkKBOiKiQ+lfWouR5Tm3GBnQKl3ayp3/OF0/eHT30YyNHEtxiVRNt7WTYZmOg6BQEP7cYmJ+/AH3gQOdbY5MM/h5aFAaQwHYlX3c9gN8cjX8sQhObLJ93/Zk7yew7gl4b7yUltgAFgdekLY7IBAX7IVS4boLXgsX7tfxwWsmgt7+wdmmuBynFz5F9sMPU3M86ZxjxTXFlOhKAOjh3cPBlsnI1KdnaMORtgCrDq3if1v/R1FNkW2dcLX1LTi5FWranm3nytRF2rbOOdbj3XdI+GuLTYJR/K6+Gp+Jl7Yv0OKqd+D+Q9B/ZrNNBUEgyD2IPoF92j6eg4iv1bVNzq0g3i+eaJ9oTKKp/R379QAvF83+FEUoz7XrEIIgoOneHVWA8yJtRbMZfXo65Zs2Oc2GzojJbGLcV+O46IuLyKvKc7Y5XQJZ01bGcSjVcNXbUrSRor4u5fW9r2dW71l4abqW483ZkbYgpdzmvbyU8k0bifv552aLydQcTyLj5ptRRYQTv2YNgsY5GnyWwnWDt60H9NZiAjKdE+8xY5xtgkwr8FVFUEIqR/JTbNuxIEgFyf55W8rc6HVZ8+e4AjVl8Of/Sa8Hz5XSEhvAEt3jhpR2fOZmlCtrOvt3j8VTB1Un5Mn7mYiiSPnatZhKSwm4afY5xy1O+nDPcDzUriV5IdP16BVe57QVRbGec/bL41+SXZHN1PipBITVOWHObtdqguIhMAEKkyHlD+g7o+19uSgWffe2yHi5nOSbX/uy81yRxFAv/jiaS3JeOYunLrZPZLAoNisp4VDyjsCKEVLU9C1rXMs2G2IqLSV14iQAEnfvQunVtfwM9uJU5Sn0Zj0YIdAt0NnmdAnkSFsZx6M4d9Hpo/Fpl8O27LffyHnuOSp37GiPZQ7HEml7sqAKk9k5KQaCuzsV27ZiPHWa8j83NtvekJWJ0t8f9z59neawtVBUqaewUtI4jQuRK5fLyLgK4R5S1KDFKWVTek6Wno+vkTYBOwJ/vQyV+ZJz4oJ5jTazfF5mvaQBaJF9EQ0Gki4Yxolp0zEWF9vd3NYSduEYHp2jZOGd8oKoHmYzES++QNA996DtmXjOYYuT3pUL9ch0HeJDpMj+0moDOWX1q75f2/Na5g+YT5iHtKFUfeAAaVfNIOPmOe0f2LL5lt75NLENJjPphZKUT0s1bUVRtEs2m2g0UvL9D6TPmYu50r4Zfq/seYVX9rxiLUbmyliLkeVW2N5hm7kLPrwcvr7Ztv22l+T10rPW2y4O2/zly8l55v+oOXbM5n23BpW/P+qoHrj16YOpsNCptnQmLHOXHj49UDbg15GxPXKkrYxj0JVDURqE9bPLzaFi82ZKf/wJVUAgnhc2r7XkKnTzc0erUqAzmskqriIq0PGOR0EQCH34YQStG57Dm//svMePx2vkSExlTWue2ZOC6gIAUnOl71I3P3c8NPLlrLNT9e9eKjb+idfYsXgMGuRsc2SaIME/lqO5kF+TafvOoy8GrS9UFUDWbqkoiitTdAJ2rJBeT1wsZZ00gqXAYnmZH1AXaas/eRJzZSWGzEyUvr72tLZNRAcnciJcAH0xpbpSfLWuZ6MzEJRKvEaPxmv06AaPW/7esp6tjCugVSmJDfIkOa+CY6fLCfetKxh8S99b6rVVeHlRc+QIgpsbotHYvrT7obdBv2sgtG/b+3BR0gurMJpFPDRKwn3dWnSO7vhxMubMxXvSRMIXLbKdMQoFhStXok9Pp/Tnn/G/7rqWn7vmMWkdd9ECiBrRZFNRFPk26VvK9GVMiZnSTqPtjyVTLyWvLuux3RHkFlQaOPkXqD3BqAOVi0j4pfwhPSdcYpfuS3/6CUN6Bl6jR0GvXnYZo6XErVnj0rrKHRHL3CXGN8a5hnQhnB5pW11dTVVVXTGR9PR0XnvtNdatW+dEq2RsTtJaeHskfDy90SYr9q/gtnW3cSD/QKu79770UgLmzsXjgqHtsdLhKBQCMUGSo9aSPuUMvMePx+vii1qceitoNKiCguxsVeN8eOhDxn41lncPvwHU7ZLLdG5Kv/+ewnffo3ytfH9wdfqHJgBQKeZisnU0rFJdt9A43gGK16x7Ekx6iBsHCZc22kwURWv0Qk6BVBDI4rTVxMcTv+EPIt9eidCItIIz8VR7EuIuRQfbJbq6k2IpqCkvfGRchV6WYmQN6NqeiSY6mm6vvUbsL79Ae2Vb/HrYLajD2VilEYK9Wuw4qti8BVNJCca8ttf5aAhBoSDo7rsIfuABvC9ppbPu+O+Q9HuLCoCaRTPzB8zn+l7XE+UT1UZrHYf0t5Gy93LLKpn16ywu+vwiSnU20FgO6w9eoWCodJ1I8poyyNguvY6fYPPuRVEk5IEH8L/pJtwHDLB5/61FdtjaHsvcRc4SchxOn/lPnTqV1atXA1BSUsKwYcNYunQpU6dOZcWKFU62TsZmHP9Neo4Y0GiT/fn72Xl6J8eLW1+4xnv8eEIfebhDRt9Z0qVO5Du/GFlzGAsKnG0CAOWGchSCAn2NHwDxLUw5k+nYeE8Yj++0aXi0ICJcxrkM6R6HaFaCYCC7/JTtB7BWHP/N9n3bkrQtcOwXEJQw8bkmnRKFNYWUG8oREKio9EOpEIiuzb4QBAF1t254DBniKMtbTU9NJNdsMaH/z9NyReFayjdsoPrgIUS9vsHj1khbeeEj4yL0CrPo2tbPpjKZTWSUZbDjtCRDJigU+EyaiKZ7N9s6RTrZtaPOadvyTLrAW2+hx0cfEXR741I6bcV36lSC7rgdVWArdCgr8qA4DRAgsvngGKVCyQ29b+DxYY/jpmpZdLEzcdcoifSXNMVP5NeQW5VLuaHcuonaLgShzjFqiW51NmmbwWyEgFgIjLN594Ig4DNpEmFPPO6SmUEy7UeOtHU8Tnfa/vvvv4wcORKAb775htDQUNLT01m9ejVvvPGGk62TsQlGfZ12Ts/Gi8bE+Ej/+JYLQVfBUozMmZG2IBUkK1q9msz59zS44DYWF5Mydhwnr78BU4VzbX16xNPsmrULU9lgQI607Sp4jR5NxPNL5KJkHYCoAC9EgxSN/29Oku0HiJ8ACjWUZUN5ju37txWB8dD/Ohh6K4T0brKp5d4XoA0DUU1UoAcaldOnaS2me2AMU3eKeP1zFH1qqrPNcTqi2cyphx/h5DXXoEs7ec5xg9lg1XuUFz4yrkJvSzGy0/UjbYtqipjy/RTuWH8HelPDmxDtojwHvp0Hb4/qVI7b1DwpICO+FUXIBJUKz2EXuESUIgAZtfVCQs4Dt87phEusXUek5JXz/Mjn+e7K7+gT2Mc2nVuctpa1sLOx2NFE5k9nwlRSQsbtt5Ny6UREUwepg+DiWDKq5LmL43D6aqCqqgpvb2mCsG7dOq666ioUCgUXXngh6enpTrZOxiakbwVdGXiGQLfGo4Qs//itTa00lZSgz8rqsBdiS6RtqgtE2ua/9joVGzZQvWfPOceq9+5FNJkw62pcovqmRqnhRJ4BaN1kWEZGxv6olArckQrW7MtJtv0Abr4w93d4OA28w2zfv63wiYCr3oZJLzTb1JJu5q2MAOqKkAHkvfYaJd98Y/fiMe0hKiieby9SsHnWeShbE8XVSTGXleE+cCCq8HC0cbHnHM8qz8IoGnFXuRPiEeIEC2VkzqVnmCSPkJpfgd5otr4f5B6El9oLs2gmoywDkCqzl3z/A4WrPmz/wFofOPoz5ByAnIPt789FSDlDHsGVqNq7l+z/PowhN7f5xpk7pecW6scfKTzCwfyDVBmal1JwFeJrdW2TcisYEjaEBP8E1E3oz7eKuLEgKKDgOJRk2KbPtiKKdRG/8fbRsy3fuBH9yZMuk3Gj8PamatduDBkZ6NOd/Pl3Asr15dbaMnKWkONwutM2Pj6eH374gczMTNauXcull0q7Pnl5efj4+DjZOhmbYElf7TkJmtDiszhtW5uOUrZmLakTLiHr3gVtNtGZWCJtnS2PoPDwIPCOOwh98n9oExLOOe49bhzxmzYS/sz/OcG6cymvMXC6VKpuHB/s7WRrZByJsaCA6gOt176WcSxB2kgAkovsFHUZORTULpp6aTbX/7kFOrSWSFuFUXLgWTajjMXFFK58m9P/e9JlFkENEe0TzfcjFKzpZ0Ll7+9sc5yO0s+PHu+9S8LGPxss0mSZ60T7RKMQnD4dl5EBIMLXDW83FUazWC8DTBAE6wLdElxhLCzi9GOPkf/664hGY/sG1nhIut9QJ6nWwRFFkRO1xa3iWhhckPvCixS8867d5cjyli6l7OefKfnyq+YbWyJtI1smTbVi3wpu+O0Gfkr9qR0WOhbLJmlyXtNazm3C3R+6XyC9dna0rWiGCYvg/Osh+iKbd2+uqSFrwX2kTpqMISvL5v23BUGpJGLJEqI++wx1twhnm9PhscxVg9yD8NK41mZUZ8bp5dYXLlzIDTfcwAMPPMD48eMZPnw4IEXdDhw4sE19Ll++nJdeeomcnBzOP/983nzzTS644IJG25eUlPDEE0/w3XffUVRURFRUFK+99hqXXdZ4Kr9MCxFFSbwempRGgLrdmuyKbPQmPRqlpkVDmMrKENRqNJGR7bHUacTW7r4XVOgorTbg626jnd02EHTH7U0eV4eEoA5xbkTQtuxtrNy/kjivIUA8wd5afD2c95nJOJaqXbtIv2k26ogI4jb8IRcYcGGifKLJLoNTlQ6IbDCbW+QYdRgbFkHRCbj0WfCPbtEpFideVWUAcEYGgclEwJw5GAsLXSLLoTGifaMBSC9Px2g2olI4fYrp0lgcX3KkiowrIQgCvcN8+OdkEcdyyugdXhdAE+0bzaHCQ9bvriY6Cs+LLkIbH4e5xgZZWD0nS8Ulj/8GYx5tX18uQH65jnKdEYUAUYEezbY3VVRQ9MknYDDgPWG8XQv+Btx8MxU9ovC+pJlCVIZqOL1fet3CSFvrta32ntARSAyVgj9S8iooqinix5QfKdeXs2CQjQKCzrtSyhDydfJaVaGE/tdKDztgKizEvW9fjHl5qLt3t2nfoihiNBoxtSGzVjNmNAB6UYSaGpva1dXIKs4iXBNO34C+1MifZbMolUpUKlW716tOn1FfffXVXHzxxZw+fZrzzz/f+v748eOZPn16q/v78ssvefDBB1m5ciXDhg3jtddeY+LEiRw/fpyQBpxNer2eSy65hJCQEL755hu6detGeno6fn5+7fm1ZCzkHICyLFB7QOyYJpsGuQfhqfak0lBJZnkmcX4tE0cPun0egbfegthBLxxeWhWhPlpyy3ScyK9gYA/Xi1ASzWaXqVh+rOgY+/L3YdT7AfH1UohlOj9uffogaDQo/Hwxl5ejlDMyXJbzguL5uwxKjNn2G2TXe7DzHbhoAQy80X7jtIaiE7BjBZj0MODGFjttR3YfiZfai/U7pLlKQm26piooiNBHH7GXtTYj3DMcrVKLuryGjJ+/JnLoaNQRXTeqpbn7ZklNCQpBIWvCybgcvcK9Jaft6XI4I37m7Iw4QaGgx/vv2W7gxEmAIDkJS7PA17ZOH0eTUhtlGxXoiValbLa9IAiEPf4Y1YcOoY09V1LFlvhccgk+l7QgPb4yH7oPhfJT4BfVbHODyUBmeSZQV6ukIxAXImU9FlToyS0v55U9r6BSqLhrwF2oFTYIDBk+X3p0ctTduhH9+WeIJpNNgyr0ej2nT5+mqqrjSG50VgL0ATwS/wgeag/S0mxQrK8L4OHhQXh4OBpNywISG8LpTtvS0lI0Gs05UbXx8fGoGkgna45XXnmFefPmMXfuXABWrlzJr7/+ygcffMCjj567a/vBBx9QVFTE33//jVotXZSjo6Nb/4vINExwb7jpByhJB7V7k00FQSDGJ4ZDhYdIK01rsdMWpNQHwbPllVldjbhgL3LLdKTmVzrdaSsajVRu20b14cME3303otFI6pQpeAwcRMgjDzs97dW6WDBaHBuy07YrofDwIOHvbS4dcSgjMaRbIiv2n4eKcMyi2T4p4JUFkk7csd9cx2m7fqHksI0bB4kTW3za9b2uZ2LkDL76XUqfjG1FtXFXQCEoeGPcGwQ88Ra67c9Q/tijBNx8s7PNcgqi0UjyxSPRREfTfcVbDd43HxzyIPcMvAeD2eAEC2VkGqdnmLRhdDSnfqq4VR7BXgWDvYIhchhk7pCy9C6YZ59xHESqVc+2Zddyhacn/tdfj0uFbvj1gFt+l7JZWuCEy6zIxCSa8FB5dCitbg+NisgAdzKLqikp88Bd5U61sZrs8uwOFTHcJDVlsPt9Scs2tE+L/p5tRVA2v0nRUsxmM2lpaSiVSiIiItBoNK12CItmM+bqakS93unr2I7O6crTeOg9CHIPwt9N/iybQhRF9Ho9+fn5pKWlkZCQgKKNQXBOd9ped911XHHFFdx999313v/qq6/46aef+O23lusa6fV69uzZw2OPPWZ9T6FQMGHCBLZv397gOT/99BPDhw9n/vz5/PjjjwQHB3PDDTfwyCOPoGzkgqPT6dDpdNafy8rKWmxjl0OlkQTYW8jZqVddhdhgT/5OLeTEGfphzsKQk0vmHXeCIOA3dSq6kycxpGdQUVZO+DNPO9s863ejquKsFGKZLoPssO0Y9A0PoyZrNjVApc6Et5sdnLY9L4NNSyD1T9BXSbqIziTtL6mYjqCAic+1elFkiczq7u+Oh0aaohlyc1EFB7tMtkNTjIgYQeHFRynJK0Fwa3qjtjOjS0nBVFKCLiUFpW/j1dY1Sk2LpaBkZBxFr9piZMdz6q9vLM6rtNI0RFG0Ok5EUZTSoUNDbTD4ZZLT9tivncBpK9WqcLUiZGdiyMuj5Kuv8b7kEtx6JjbesIX3H6tWt290h5OvSgjxJrOompT8KqJ8ojhWdIyTZSdt67QtyYDyHIhsXLbRbqRthj8Wwb+rYcFem3cviiKIos3nKnq9HrPZTGRkJB4ebZvjiSYTNSdOAKANDm5QZ16mZZhqTCjUCrw8vHDTuGhdCRfC3d0dtVpNeno6er0eN7e2fWZOXwHs3LmTsWPPdeqNGTOGnTt3tqqvgoICTCYToWdNGkJDQ8nJyWnwnBMnTvDNN99gMpn47bffePLJJ1m6dCnPPvtso+MsWbIEX19f6yOyg2qpuiKWXfyWFiPTpaWRedfd5C9fbker7I9lQpfqAk5bTfdueE+ahP+NN4JSieeIEUR99hlhC59EaEdYvy0QRdH63cgvlhbCloqvMl0PURRdujBTV8fXXU2QlxaAtAI7FVoM6ydpxBmrpQWJMzGbYG3tpvGQWyCkd4tPLaguILM8k6TcUqBuM0o0mUidOImkIUMxnDplc5PtQcAttxD36y/4z7SPZl5HQJuYSOxvv9Httdc6hLNdRuZMLJG2uWU6iir11vejfKIQECg3lFNYUwiAISeHpGEXkjpxUvuLkYG0ERfaD6JsXyTJ0VgjbVsQXKBLTqb8zz8xV1fb26x65D3/AgXLllH8ySfnHjSbobqkVf1ZorA7ola3JXMvJbe81evRFpG0Fl7rBz/da7s+W4OlCFp8C2Qx2oD+xAmSho8g6/4H7NJ/WyMUQYr8VXh5ofT1RTy7UKxMq4jyjiLaJxoPlZODJDoQ7fnuWvuwgR3tQqfTYWzgJm8wGKh2wI3LbDYTEhLCO++8w+DBg5k5cyZPPPEEK1eubPScxx57jNLSUusjMzPT7nZ2SA5+A2seg1P7WnyKRS+rpalXuuNJVGzcSMXmLW0w0HWwFCM7kW8nx0YryH9zGdrEBMKeeBx1WBiCIOAxaCA+kyeT/9Zb5L+5zGm2FeuKKdOXISBwKl9KN5MjbbsmpxctInnUKGqOHHG2KTJNEBPsgaAqY3d2qn0GEASpeA1IkVnOZO8nkHNQKjYy5vFWnfpDyg9c9t1lfJH2MgDxtfcEY04OmM2IZjMqW0Sx2ZmcyhxWHV7FewdtqHPZAREUCrSxMXhd3LDj6VDBIW787UZe2fOKgy2TkWkeL62KHgHSgvzYGdG2WqWWCC9Jp9oyT1eFhIDJBGYzhmwb6JcHJcBdW2H0f9vfl5OxZE60JNK2+Ouvybp7PrnPPWdvs+rhf8P1uA8ahOfFF597sCAJXoiGd8dLhaVbQEcsQmYhobYYWXJeRd161JaZn5EXSFk4+cekiFtHIoqQ8of0OsE+TtvqvXsxl5ZiKiy0S//tRRsdjSYyEoWTA5A6OmqlGk+NJ0qF7SQwZJrH6U7bCy64gHfeeeec91euXMngwYNb1VdQUBBKpZLc3Nx67+fm5hIWFtbgOeHh4SQmJtaTQujduzc5OTno9foGz9Fqtfj4+NR7yDTA3o9hx1uQ1nKHqjX1qiytRRF0bn37ELrwSQJunNVWK12C2CDJAZleWIXR5OQdQKWCgjfeJP+tt+q9nf/WWxS88SYonXfZsCwSgtxCEc0a/DzUBHnJN9+uiDEnF1N+AVU7djjbFJkmUPpuwyvhOb472fhGaLvpeZn0nLRGinZ1BqIIe1ZJr0c/Cp6BrTq90lCJWqFGVy3JviSESot8dbdu9Px3D7G//GxTjTh7UVhTyKt7XuXjIx8DYJaLhjRIcnEy+/P3c6zwmLNNkZFpEEu07fGzdG2txcjK6oqRxXz3LT337EYT1Xyhqq5Chc7I6VKpQHJLNG1VAYGoIsLxGjPGzpbVx2PIEKI/+xSfiZeeezBzByBKNUlaKHVgiUztiAUWLZG2SbkV9tFvdveH7rWyCJaoV0eRdxTKskHlBtENOOhtgO/UqUR//TXBD9gn0lamcxIdHc1rr73mbDNcHqc7bZ999lnee+89Ro0axdNPP83TTz/NqFGj+OCDD3iulbuNGo2GwYMHs2HDBut7ZrOZDRs2MHz48AbPueiii0hJScF8Rqh8UlJSuyu8dXmqS+DkVul1ryktPs2aeqWvS71qCk337gTccAO+V17ZRkNdg25+7mhVCvQmM1nFjk2NOpvgu+8maMG9FLzxJsmjRpN29TXkvfIKBW+8SdCCewk+S3/akVgmg74qKdIjPtirw2lmydiGwNtvp8dHH+F/003ONkWmCeL8oxBFgXKdHa9r0ReD1leqcp21237jNIUgwJzfYOISGHpbq0+/b9B97Jq1i4rcEUD9DAJBpULTvWNUUY/xiWFyzGTuqhlO8rjxZHXBxZtZryf3hRcp/fVXRFPDmwjDI4bz0qiXuOk8+fol45r0rnXaHjvdfDEyTVQUQm0xZ5uhr4Sjv4CpYxbqS6vNnAvy0uDn0fx6MujOO4jfsAGvBiQDnUZGrUxh5LAWNT9TwizGp+M5bS333YIKHYEa6Z5rU3kEgIQJ0rMl6tVRpNQ6iaMvbrYweFsR1Grc+/XFY9DA5hs7EWfII2zfvh2lUsmUKS33ibgi5fpycitzqdRXsmnTJgRBsD6Cg4O57LLLOHjwoLPN7JQ43Wl70UUXsX37diIjI/nqq6/4+eefiY+P58CBA4wcObLV/T344IO8++67fPTRRxw9epS77rqLyspK5s6dC8Ds2bPrFSq76667KCoq4r777iMpKYlff/2V5557jvnz59vsd+ySpPwBZiMEJUJgXItP0yq1zO07l4eGPIRaYeMJoAujUAjE1Ebbnihwvq5t8N134zdzJsa8PGoOHaLwnXed7rCFujQltVmKnLdEo8l0PTwGDcRz2AVympOLMybyIiqO/x/uRbfbbxClGs6fCQNuBK0TNa41HjD8bqkAZxuoNoicLpUWE/HBHVOr20PtwYujXmTaBTdjPHWK6n37G3VcdlZ0x49TtGoVuf/3bKPFe8I8w5gUM4mR3Vs/z5WRcQS9wqUswmNnFSOzRtra2pl1JqIIyy+EL2dB+jb7jWNHrHq2rShCJgiC0zSwRb2e0l9+pXrfvro3M2szmXpc2KI+LBJmAD18etjYQvvjqVXRzU9yaOprpGyZYl0xpbpS2w1i0ZM9sRmMuqbb2hJLZG9CAxHVXYBX1yfx+vrj1CQlUXP0aD3H7Rsbknl1fZJdx3///fe599572bJlC6c6SH0CC2dmnpfryymoLqDCUOerOH78OKdPn2bt2rXodDqmTJnSaLa6TNtxutMWYMCAAXz66accPnyY3bt388EHH5CQkNCmvmbOnMnLL7/MwoULGTBgAPv27WPNmjXW4mQZGRmcPn3a2j4yMpK1a9eya9cu+vfvz4IFC7jvvvt49NFHbfK7dVmO/yY9W9JWW8EDgx/g5j4346ttvOIySDu6ldu3Y8jJ6RTFiCyFClLznK9rCxC26CmoTccV1GqnO2yhLrJDXy1Nply5Iq+MjAz0DPUHUcWJgkpMZjtepy97CaYth9Dz7DdGY6RutIksQ2qt/mGwtxZfD2nT8tRjj5O39BWMRUXt7t+RaHv2JPK990j4c0OHkHWwJQpPT/xvuB7fqVfKmSAyHRarPEJueb1rt8Vpe+ai3VxTQ97rr5N51922KUYmCBA7Snp97Lf29+cErHq2Lai74ApFJvPfXMaphx6iYOXb0hsVeVB0AhCg+9AW9WGZo0d4RuCusk80p71JrA0GySg0Euoh+Q5sukER1h88Q8BQCRnbbddvUxh1kHNAeh0/wS5DVP37L/lvLqNq71679N9elAqBVzek8Na/BSCKiLVOxTc2JPPK+iSUCvvdqysqKvjyyy+56667mDJlCh9++GG945aI1Q0bNjBkyBA8PDwYMWIEx48ft7bZv38/Y8eOxdvbGx8fHwYPHszu3bsRRZHg4GC++eYba9sBAwYQHh5u/Xnr1q1otVqqauWqSkpKuO222wgODsbHx4dx48axf/9+a/tFixYxYMAA3nvvPWJiYnBzc7Me81J7EeAWgKe6TvIlJCSEsLAwBg0axP33309mZibHjh2rN/7IkSNxd3cnMjKSBQsWUFnZuK+jKfuSkpIQBKFe/wCvvvoqcXFSkKDJZOLWW28lJiYGd3d3evbsyeuvv16v/Zw5c5g2bRovv/wy4eHhBAYGMn/+fAyGuswOnU7HI488QmRkJFqtlvj4eN5//31EUSQ+Pp6XX365Xp/79u1DEARSUlIa/d3ag1OctmVlZfVeN/VoC/fccw/p6enodDp27tzJsGF1aR2bNm06559l+PDh7Nixg5qaGlJTU3n88cfradzKtBKjvm5HrxXSCK0eJj+fjLm3kDJuPKKhY6ZPnUmcC0XaAhSsWAEmE4JajWgwnKNx6wwsGmpFJX5AXdEAma6JPiODwvffp/THH51tikwjdPN3R6NSoDeaOVXiXOkXu3ByK3w8Dd6b0OY03n15+7j252tZtl8qSmUpQmaqqKD0++8pfPddp0VftQWT2URWZTYF/bqh8Gxey7GzoY2NJWzhQkLPyOo6E4PZwEeHP2JL1hZMztJglpFphuhAT7QqBTUGMxlFddrUA4IHsOOGHayevNr6nqDRUPzxJ1Rs3Igu1UZFJ3vWrh+O/9biIliuREsjbQ05OaSMG8+JK67A7MToNN+rpqMKDcX9/P5SIExmrTRCSG9w92tRHx25CJkFazGy3Ar7RJUrFHWOU0fp2qq08J8kmPNrq7JfW0P5HxsoWL6c0u9/sEv/ZyOKIlV6Y4sft42M4d5x8byxr5jlWUpqFCqWrjvOK+uTuHdcPLeNjGlxX60NFPvqq6/o1asXPXv25MYbb+SDDz5osI8nnniCpUuXsnv3blQqFbfccov12KxZs+jevTu7du1iz549PProo6jVagRBYNSoUWzatAmA4uJijh49SnV1tdWxuXnzZoYOHYqHh1Rc8pprriEvL4/ff/+dPXv2MGjQIMaPH0/RGcEBKSkpfPvtt3z33XfsOyP63kfrQ7hXOF6ac69rpaWlfPHFFwBWidHU1FQmTZrEjBkzOHDgAF9++SVbt27lnnvuafTzasq+xMREhgwZwqefflrvnE8//ZQbbrgBkKRRu3fvztdff82RI0dYuHAhjz/+OF999VW9czZu3EhqaiobN27ko48+4sMPP6znI5w9ezaff/45b7zxBkePHuXtt9/Gy0uSZbzllltYtWpVvf5WrVrFqFGjiI+Pb/R3aw8qu/TaDP7+/pw+fZqQkBD8/PwajEQQRRFBEDB1sbS6TkH6VtCVgWcwdBvS6tP1Jj1ppWlUGioZFDqo0XamkhI00dEgCJ0iRdqVIm0tRccskgjWImTgtIhbg8lAVnkWAKcKpElVfAsiGGQ6L1W7dpH30su4DxiA79SpzjZHpgGUCoGQiN0UCTtYfSiPJ0bNtd9gZjOc3ittHEY1rGNv2/FMsKY2K6fbIEmmoQ2klKRwtOgo4RotMOwM2ReBsKcWos/KQunnZwuLHcIXx7/g+X+eZ3yP8bw29jVnm+NyZJVn8fLul3FXubPjBrmQooxrolQI9Azz5kBWKcdOl1klvNRKNeqzrnWCQkHgHbej8PBAFdi6IoyNEjsGVO5Qmgm5hyCsn236dRB1TtumN65qDh8GlQqFp5dT1zLamBjiz8yMyKi9NrVQzxagTFeGRqHpkEXILFjWFcl55ZzXN5odp3dYndE2Y+htkDhR+o47CrX9CpCBJFlmLLgCr9Gj7DbGmVQbTJy3cG2bzl226QTLNp2w/vzmnym8+WfLoyOPPDMRD03LXWjvv/8+N954IwCTJk2itLSUzZs3M+asooOLFy9m9OjRADz66KNMmTKFmpoa3NzcyMjI4L///S+9evUCqJeRPmbMGN5+W4qQ37JlCwMHDiQsLIxNmzbRq1cvNm3aZO1369at/PPPP+Tl5aHVagF4+eWX+eGHH/jmm2+4/XZJykyv17N69WqCg4Ob/f2619ZcsETPXnnllVY7lyxZwqxZs7j//vutdr/xxhuMHj2aFStW1Ivibal9s2bNYtmyZfzf//0fIEXf7tmzh08++QQAtVrN008/be0zJiaG7du389VXX3Httdda3/f392fZsmUolUp69erFlClT2LBhA/PmzSMpKYmvvvqK9evXM2GCtMkSGxtrPXfOnDksXLiQf/75hwsuuACDwcBnn312TvStLXGK0/bPP/8kIECqkLxx40ZnmCBjT6pLwDsC4sc3qufWFPvz93PL2luI9I7kt6saT4tyS0wkbs3vThEUtwexQdJEwdmRtmc7bKHOUetMx21mRSYm0YSb0oNyvTeeGiURvm7NnyjTafEcPhyvMWPwvNh+E1GZ9uPtVUWpmMHhwiP2Hejfj+CX+6HHCLjld/uOBbDvU8g5KBVBG/N4m7uxRPGIemlybFk0Kr088b/++vbb6WAshYoyC9MoWr2aqt17iHj5pU6xudocZr0eU0EBqvDwRqURLCnE0T7RKISOE0Et0/XoVeu0PZpTzuR+4U22DZo3z7aDazwgbqwUaXvstw7ltDWazKQVSA6M5oILvMePJ/HvbRjz8x1hWpPUk7KJugiqClulgTqn7xxuOu8mdCYHarXamMQzIm2nXGQn/ebug6VHJ8J7wgS8J9hHeqEjc/z4cf755x++//57AFQqFTNnzuT9998/x2nbv39/62uLvEFeXh49evTgwQcf5LbbbuPjjz9mwoQJXHPNNVY5gNGjR3PfffeRn59vdQZbnLa33norf//9Nw8//DAgySxUVFQQeNbmWnV1NalnZElERUWd47A1mo0YTAY0Sg1KRd214q+//sLDw4MdO3bw3HPPsXLlSuux/fv3c+DAgXqRsaIoYjabSUtLo3fv3vXGaIl91113HQ899BA7duzgwgsv5NNPP2XQoEFWRzHA8uXL+eCDD8jIyKC6uhq9Xs+AAQPq9dmnT596mfXh4eHWImr79u1DqVRand1nExERwZQpU/jggw+44IIL+Pnnn9HpdFxzzTUNtrcFTnHaWj4Ao9HI5s2bueWWW6xeeplOQN+roM900LfN+RjtE423xptg92DMornZRU1HShttipja3fiCCj2lVQarpqHDMZkbLDpm/dnkHCe5n9aPJ4Y9wd6sXL48JBAf4iXrBXZx1BERRK5c4WwzZJohyiearFI4VZlh34EsKYeZO6CyEDxtFPHVEDVlsOEZ6fWYR9o1liWKp7zMH6iTR+ioWKKsTlSlU7DybUxFRdQcuAmPIa3PvOlo1Bw6RPoNs9D27k3s99812MYi89ORU4hlugY9w2qLkZ2uL1f3Y8qP/Jz6MxOiJnBdr+vsaMBlktP2+K/SdbaDkFlcjcEk4qZWEOHbvLar0scHpY+PAyxrGdWHDiPWhOAxfWXzjc9CqVDiofCwg1WOweJkzyvXEemZwPDw4fQP7t/MWS5M3lH4ajb0vgLGL3S2NTbDXa3kyDMTW33eW38ms2zTCdQKAYNZ5N5x8dw1pnWSEe7qlktovv/++xiNRiIiIqzviaKIVqtl2bJl+PrW1e9Rq+vW/Zb1rbk2MG3RokXccMMN/Prrr/z+++889dRTfPHFF0yfPp1+/foREBDA5s2b2bx5M4sXLyYsLIwXXniBXbt2YTAYGDFiBCDp64aHh1vlFM7E74yMLs8GpK0q9BVkV2TjofaoF00fExODn58fPXv2JC8vj5kzZ7JlyxbreHfccQcLFiw4p78ePc4tVtgS+8LCwhg3bhyfffYZF154IZ999hl33XWXtd0XX3zBQw89xNKlSxk+fDje3t689NJL7Ny5s15/Z37eIH3mls/b3b356/Ztt93GTTfdxKuvvsqqVauYOXOmVYLCHjjFaWsdXKXipZdeYvbs2c40Q8YeCEKbq3gHuQex7bptXc4h56VVEebjRk5ZDakFFQzq4e8UO4LvbVxnxpnFyALcAriu13XkZycDSS0q7iAjI+N8zguKY1splBqz7TuQX6RU5CPnACStgYGz7DfWX0uhMh8C42Fo+yLMLJGXBSXS5D2+Vh6het8+VBERqIKDO9T9MMwzDDelGzWmGhTXTSXA3R91t27ONsshGDIzQaVCHd54VKLl7x3j03FTiGW6Br3PKEZ2JrlVuezM2UmoZyjXUee0NRYVUXP4MJ4XXoigtkHgQeIkQIDT+6E8B7zD2t+nA7AUlYwN8kJhxwJH9qDk2+84/cQTuPXpQ/Q3X3eoe48t8NKq6ObnTnZJNe7mON659B37DFSaDXs/AWM1TFhknzEAktdBQZL0P2Qn9JmZKL29HSrjJAhCqyQKQCo6tmzTCe7t78fd/X1555SKV/9MQa1UsGB8QvMdtBKj0cjq1atZunQpl15aP2J92rRpfP7559x5550t7i8xMZHExEQeeOABrr/+elatWsX06dMRBIGRI0fy448/cvjwYS6++GI8PDzQ6XS8/fbbDBkyxOqEHTRoEDk5OahUKqKjo1v1+1gi6LVKbaNt5s+fz5IlS/j++++ZPn06gwYN4siRIy3WeW2pfbNmzeLhhx/m+uuv58SJE1x3Xd19aNu2bYwYMYK7z/BZpLZSa71fv36YzWY2b95slUc4m8suuwxPT09WrFjBmjVrrI5qe+H0EMVx48axefNmZ5shYytKs9tdRVsQhBZNEjJum0fWvfeiz8pq13iuRGxttO2JfOfr2roqKbU6YQkhchEyGQlzdTVVu3c72wyZRhgS0RMAo1BKRRszMFpMrzOK19iLojTYUVuY8dLFoGp72r/epCerQrqHmXTB+LipCPbSIprNZNx6GymjRqO3UyVae6EQFPTwkSIosqcPI2jevCadmJ0J36lT6bl7F2GLnmq0TWco1iPTNehZ67RNL6yiUme0vj8mcgzPXvQsN/e52fqeKIqkTpxE5rzbbVeMzCsYrnoXFuztMA5bqNOzbU4a4fSiRWTdu4Dqg4ccYVaL8Bo3FoWHO5pwf8Sqlq9FTpae5Nqfr2XR34vsZ5yDsPzdknLtOF+pzIdNz8E/70o6/PbCUuws/hK7DZH73BKSLhxOybff2m2M9vLGhmReWZ/Eg5ckcu+YGFTBwSwYG8eDlyTyyvok3tiQbPMxf/nlF4qLi7n11lvp27dvvceMGTN4//33W9RPdXU199xzD5s2bSI9PZ1t27axa9euetICY8aM4fPPP2fAgAF4eXmhUCgYNWoUn376ab0U/wkTJjB8+HCmTZvGunXrOHnyJH///TdPPPEEu5tZR+nN0ve0Kaeth4cH8+bN46mnnkIURR555BH+/vtv7rnnHvbt20dycjI//vhjo4XIWmrfVVddRXl5OXfddRdjx46tF8mckJDA7t27Wbt2LUlJSTz55JPs2rWr6Q/5LKKjo7n55pu55ZZb+OGHH0hLS2PTpk31ipkplUrmzJnDY489RkJCAsOH27eWhtOdtpMnT+bRRx/loYce4vPPP+enn36q95DpYHx+HbycAOl/26S7xio0mnU6Krdto3z9Hyi0jV88OhqWKrOWCZ9MHVuytnAw/yBJucWAXIRMRsJUUUnShcNJv/Eml9CEkzmXfhFhmI3S/+vhAhst5huj52TpOfVPMFTbZwyjDsLPh9ixUjGRdpBZnolZNKNVeCAavUkI9ZaKsJaWogoNRXBzkwpudjAsurY2L+DSAVC4uaEOCWn0uEUfsSMX65HpGgR6aQnxlubYZ0bbJvonMjV+Kon+idb3BEHArU8fNNHRmErLzumrzfS/BgJim2/nQqTkWYqQNT5PFQ0Gyn5fQ/n69Yh619GAVfn7k3BvLN2CvkJx8JMWn5damsrRoqMcLTpqR+scQ8IZxcgAyvXllOvLmzql9YT1B88QSUowY7tt+7agK68rKJdgP6etqbQUAG0Loymdgcks8uAliSwYn4AmIgJ1aCiCWs2C8Qk8eEkiJnPD/ob28P777zNhwoR6EggWZsyYwe7duzlw4ECz/SiVSgoLC5k9ezaJiYlce+21TJ48uV6xrdGjR2Mymerp5I4ZM+ac9wRB4LfffmPUqFHMnTuXxMRErrvuOtLT0wkNDW3SDkukrUbZdKDCPffcw9GjR/n666/p378/mzdvJikpiZEjRzJw4EAWLlxYz8l6Ji21z9vbmyuuuIL9+/cza1b9rLo77riDq666ipkzZzJs2DAKCwvrRd22lBUrVnD11Vdz991306tXL+bNm2cttmbh1ltvRa/XM3euHYss1yKIjXnFHISiCT1SQRAwmdoXtekIysrK8PX1pbS0FB8X0iRyOCWZ8FpfEBTwUDJ4BrW5q9/TfmfZ3mUMCRvC0yOePue4qNdTufMf9Onp+M+6odOk76zalsbTPx9hYp9Q3r6p8+v/tRRRFLn4i4sp05dhSL+fmqowNj00huigpqvyynQN0q6agbGkmG5Ll+IxcKCzzZFpgPPfmYZZm8qd5/2P+UNn2m8gUYTX+kkVx6//os6Ja2vMZtCVgnv7ZGw2pG/g/k33E6SOI+3APGYOieSFq+v080S9HqEDFvBatncZbx94mxkJM1g45HGq9+1DHRaGpgENs65EcU0xo76UqmvvvGEnHuqOq/0o0zW46f2d/JVcwJKr+nH9BU3//4pGI4LKqcp7LsFVb23j34wSlt0wkMv7N+ycEEWRmsNHqNiymaA77qhfBMyZmM3wYgzUlMC8P6FbywpmFdUUsT9vPwpBwejIhov3dBS+2pXJw98eYGRCEHG9f+fb5G95cPCDzO1rY8fM93fB/s9g+D0wcbFt+wY4+gt8OQv8Y+C+fbbv/wyMxcUovb3t8v9fU1NDWloaMTExuLnJBaidgSiKHC06iiiKJPgnNOu47Sr89ddfjB8/nszMzCad3k19h1vqR3R6pK3ZbG700REctjJncLy2WnfksHY5bEFKr8wozyClpOG0UEGjwWvkxQTcOKvTOGzhzEhbWR7hTHQmHb0CehGoDaamOgCNSkFkgLzYlZHo8eEq4jdskB22LoyvSlq4Hsm3c6StINQ5alM22G8chaLdDluoK0qlMEqTvbMzCDqiwxbqUv/TStPIeXIhGbNvpqS2enJnpWrvXjLn30PxF1802sYSeRzmGSY7bGU6BL3DGy5Gti9vH18c+4L0snTre3Zz2B77FT6/QXJCuTiiKFrn8E1F2gqCgHvfPgTffbfrOGxB0j+tKQG1B0ZNd6r27m3RaQFuAYztMbbDO2yhTlc+KbecEA8payKvKs/2AyXUamWm/GH7vgFSaqUR7Bhla0Hl79+hNmxEsxmzznUi3F0dg9mAKIoIgoBa4aRC6S6ETqcjKyuLRYsWcc011zQbpWwLnO60Xb16NboG/mn0ej2rV692gkUybcaiIWiDyCZL2mBaaVqjEgmdEYumbXphJUaT2cnWuA5uKjfen/g+T57/OYgaYoM8UXaw4g4y9kPp49OpNm86IxEeUoRWevlJ+w82dB7M+RUmPW/bfvd9Dn8sklIObYQlVb66MgCoWyx2dCxFtk6WncRj6BCUQUEdakHXFqr37KFiwwYqd+xstI1chEymo9GrVtf2aE79697KAytZvHMxu3JapxXYJjJ2wPFf4cgP9h+rnRRU6CmtNiAIENMRs8EypXT6KvN5pIy7hOz//AfRaGzmpM6FRR4ht0zHFTEz+fv6v3nkgkdsP1DsWCk7Nf+YlK1qS0QRkmudwQmXNt22iyEajdQcPYouORlRDhBsEWdKI8jrLfj888+JioqipKSEF1980SFjOt1pO3fuXEprtVDOpLy83CH6EDI2oqYUTm6VXvec0u7uenj3QECgXF9OUU3ROccrd+yk8p9/MJXbWGPIyUT4uuOmVmAwiWQW20mPsQNj0QmT9WxlGqMrbfJ0JOL9JU3C/BobL0waIjgRoi8GpQ2dhDVlsH4hbH1Vct7aCIsTr7BE0jyLr43MyrhtHln33d9hC21aIm2Laopg0lgS/tpC8Pz5zjXKzniNGUPII4/ge+WVjbaxRFbLRchkOgq9wuoibc+8v1o3ZmqvYRZOL1pE6qTJ1Bw7ZkMjatcVyevAZLBdv3bAUpMi0t8DN3XDEbS6EyfIe+01qg8fdqRpLSND2nRyGzoShacnqoBAjAUFTZ4iiiIfHPqAdSfXWZ07HRlvNzXhvlIKc16JgLfGToWPPQKg+1DptSUq1lYYqiFurKQHHX2xbfs+g4xbb+PUI49gOH3abmPYGkGlQlAqERQKRINrX09cBcv/dVNFyLoSc+bMwWQysWfPHrp16+aQMZ3utLWEWp9NVlZWg8LNMi5K8nowGyAoEYLaL0TupnIjwktKp22okEneK6+QMftmKv+2k3i7k1AoBGKCpEX7CbkYmRWTWdoJTa512iaE2GkCJdNhKfrsM1Ivv5ySL790tikyDXB+mFSwpkrMxSx2wCyCra9AZR4ExMHgOTbpUhRFqxPPUBOMu1pJNz93zFVVUqHNtWtRuLvbZCxH46n2JMRdSitNr87qEpEZ2vh4AufOwXvc2EbbyEXIZDoacSFSZlNZjZGcshrr+9aMuNprmAV92kn0J09Sc+iQ7YzoPhQ8gqQAkfRttuvXDlictnHBjUfZlq9dS+HKtyl4c5mjzGo5tZG2iriLiPnxR2K++Rp1WFiTpxTrinl1z6s8tPmhjnl/bwBLcEhyrp3XYvGXgNZH2hi2JRoPmLoMFuwFtX3mEcbCQiq3baP0x59QeHQsuR9tXBza3r1RyBq5LUJv0gOgUXRMya7OgNNy1QYOHIggCAiCwPjx41GdkTZnMplIS0tj0qRJzjJPprVYpREus1mX0b7RZFdkk1aaxuDQ+kL4mu7dMRUXo43tfAuf2GBPjp4uIzW/gvG97a+R0hG4f+P9HC06ilB4FRBFQidJIZaxHeayMvQpqVT+vR3/665ztjkyZzG0exziv0oEhYGs8lP08Olu3wHLc2HLS1CUCje1U0u1KA22L5deT1wMKttMWotqiijXlyMgYNYHEhfhiUIhIKpURL73LvoTaagCA20yljOI9o0mrzqPk6UnOT/4fEAuVGSJSoz2iXaqHTIyLUWrUhIX7ElSbgXHTpcT7is5gCzf4bMjbYPuvAPxtttw79/PdkYolJA4CfZ9ItXPiB1ju75tTGpe83q2bn374j1xIt4TxjvKrJZRkQdFJwABug9F7e7XotMs34Fwz3DcVR1zo/FsEkO9+Su5gOS8Ct7a9xZ78/bywOAHOC/wPNsOdOFdcPH9oOx4OqEKLy8i330X/YlUlB0s0E5Qd7zP25lYnLZypK3zcNrMedq0aQDs27ePiRMn4uVVd3PTaDRER0czY8YMJ1kn02pG3At+UdBnms26jPGJYVv2tnMmhADdXllqs3FcDctE74RcjMxKWlkauVW5iMXSDr4sjyBzNj6TJ6OJicXjgqHONkWmASL9vRANQQjaXHZnH7e/01btBntWgdkIhakQGNf2vtYvBJNechQk2m4zWavU8syIZ/jl8HE2iGqrNIKg0eB10UVw0UU2G8sZxPjG8E/OP+RU5lB98BA5Tz2FoNEQ/YXt5CVcBX1mJobsU7j1OQ+ld8OZIKIo0s2rG1WGKjnSVqZD0SvMh6TcCo7mlDG2lxRBb/kOZ1VkoTfprdXEPYcPt5MRl0lO22O/SXrlLhq9n5LfvIyX18iReI0c6SiTWo6br7TJWZAMZzhsRaMRXWoqbj17NniaJSOyM8m+WHRtk3LL0Wr+ZefpnSQVJ9neaau1w3pGXwl5RyFioLThYScUWi1eIy+GkfaTX5BxDWR5BOfjNKftU089BUB0dDQzZ87ETQ5P79hEDJQeNqSx1KvOjiWlSnbaShhMBrLKJV3HysoAlAqB6MAOWNxBxq5ooqLQREU52wyZRlAqBAKME8gpqsSsD7b/gG6+ko7biU1SJsiIe9vWz8mtcPQnqVjIxCU2dRR4abyYnjCdv/7dB2STENq5ZF/uHnA39w+6Hy+NF4ZTp6g5cgSUSsyVlSg8O9c1vPTnnyl44018Lr+cbi+/1GAbQRBYeclKB1smI9N+eoZ5w344drqujkSQexCeak8qDZVklmcS59eOjbGWEDsWVO5QmgG5hyDMhpG8NiS1VsYrriMGF6i0EDdOetRiyM7m5KwbMVdUkLB5U4PX7s4o+2K5H6fkVXB5nxh2nt5p/T3tgihCdbGkc9teUjfCl7Og22CY92f7++uEiCYTxrw8zDodmqioLiHh1FZMZhNGs1SM0LI5J+N4nK5pe/PNN1NTU8N7773HY489RlGRVHTq33//JTs728nWyTiTxlKvOjuWSNtUWdMWgMzyTEyiCa3CHdHoQ1SgBxqV0y9dMjIyraS/76UYSi6kpMxBi1lLUcxjv7W9jw3/Jz0PnguhNo6wqcWi1W259petW0fV7t2Ya2qaOs3lCXALwEsj/U7qiAi6vfE68Rv/7HQOWwCFRoM6IgL3fn2dbYqMjM3pHS45sI7n1DltBUFodJ5efegwRZ9+attCihoPiB8PkReCvsp2/dqQar2J7BKpiHBj8giVO3ZgKrOxfqkdUYWHo9BqETQadKmpDbbpjLIvlkjp06U1hHn0AOy4Hj21D94YAKsm26Y/S1GzboObbtcOzDodRas/pvrQ4Y5ZAFgQMBYVYa6oQNTrnW2NS2MwGxAEAZVChdKOkdsyTeN0YbEDBw4wYcIEfH19OXnyJPPmzSMgIIDvvvuOjIwMVq9e7WwTZZpCFGHd/yB6pDSZsqEmjyXNJrsiu17qVd6rr1GxeTMBc27Gr1ZmozMREyQtaAsr9ZRU6fHz6Nq7WpZIaz91NwoQrClLMjJnYyotpfyPPzAWFBJ0x+3ONkfmLCxZBA7bkOo5GX7/r1RYpbIQPNugD3v1+7DpeRj7uM3N25K1Ba3CjdSCAkBNQqgXoihy+smFmEtLifn+O9x697b5uM7C59JLnW2C3Qi87TYCb7sN0dx4ER6zaEYhyBuOMh2PXmE+gHTt1hlNaFXSwj3GN4bDhYfPyYjLe/llqnbsQNBo0Fxzje0MuXa1XdO928uJAuneFuCpIcDz3Lm7qaKSzHm3I4oi8evWoo6IcLSJjWOoho2LIXKYtOGpkK5VgkJB9xUrUHfvhkLT8HqkM8oj+LqrCfNxI6esBpVZkgRpqDC2TfCPgpIMEM1Qkgl+kW3vSxQh+Q/pdfwltrGvAWoOHSL3uedQBgWR8NcWu41jLwSFAnVIKKiUCErXvaa4Am4qN3oH9LZG28o4B6fPHh944AHmzJlDcnJyPYmEyy67jC1bOt5FoMuRcxC2L4Ov54DJYNOug92D8VR7YhJNZJZnWt+vOXYU3bFjiDU6m47nKnhqVYT7Sv8LqbJEgnVnW2WSJk2ynq1MYxjz8zn9xP8oeOstzLrOeX3oyEQFuaFwy+Dfwo2OGdAvEsL6Swuh5LVt68O3u1SB2TPItrYBz//zPLetvxWdMgO1UiAqwAOxqgqPgQNRd++OJs7O6cYO4OVdL3PbutvIrugamVOCovFp9dPbn2bC1xP4MeVHB1okI9N+wn3d8HFTYTSL1kJbUBdZeXbauOeIEXiOHoUq0MbXTRd22IKUSg91G5RnYzx9Ck10FJpu3VCFhzvStObJ/hf+fhN+/c85MkDa2JhGHbYGk8G6Rovx6TzyCIC16LGuStrwzSjPsI/jyt0ful8gvbZEybaV/GNQlgVKrSQRZS8EBZ4jR+I1alSHlRZQBQeh8vd3yeKoH374IX5+fs22EwSBH374ock227Zto1+/fqjVamtNqeZYtGgRAwYMqDeOugMWy+tMON1pu3v3bu64445z3u/WrRs5OTlOsEimVRyvTTuNGyelLtmQxlKvwp58ku4r3sJrlAuK+NuIWKuurSyRYNnZ1lVLk/+EkM6l+yhjOzRxcXiNGUPgrbfI6U4uSDd/JZ4xb3FK8x4VekdF214mPR/7tXXnldowrbcBRFEkzi+OIG0EZn0w0YGeqJQKFJ6eRK5cQfwf6xtdJHcktp/ezs7TO0ktSUUURUp//plTTzyBqbTU2abZjJamhp4oOUFuVS5qhbzwkelYCIJgjbY9llOX2m+JrDw7bTzo9nn0ePttvMeNtY9BVUVQkGKfvtuBJdCiMWkEbUICsT//TPQ3X7ueoytzh/QcOaxJ7XbD6dP1T6uQJMw8VB6EeITY00KHYwkSyS10w03phtFstN8GZMIE6dkSJdtWkmudvtEX23xdfiYegwbS4913iHhusd3G6AzMmTMHQRDOeaSkNH39mjlzJklJSdafz3aitoYHH3yQAQMGkJaWxocfftimPmScj9OdtlqtlrIGtH2SkpIIDnZAsRKZ9mFZCPe6zC7dWyaEZ6Zeabp3x3vsWNdKK7Ixdbq2cqStJYKjuNQPkCNtZRpHEAQiV64geMGCRiu4yziPvhFhmGoiMFbGc6qs2DGD9roMfHtAUELLzyk+CW8Mgq9utpt2oiAIvDnuTWZ1W4Fo9LFG9HQ25vady/9d9H/09O+JIAgUvLWC0m+/o2r3bmebZjMKli3nxBVXUvLtt022WzZ+GR9P/pjhEcMdZJmMjO3o1YCu7ZkFgx2ma3ngK3gpXpK+cTEs0j+NOW0tuOT8JGOn9NzjwgYPm6urSb/xJlIuuRRDbq71fYvDPsqn8xVzSrQUI8uvIspHKnRrt2JkFimDtM1gbEfQgSVSN8F+0ggdko1LYPOL9d4SRRGzTod53f9Jx+3EpEmTOH36dL1HTEzTUenu7u6EhNhmEyQ1NZVx48bRvXv3FkXvnk1meSZZ5VnoTHIGozNxutP2yiuv5JlnnsFgkFLrBUEgIyODRx55hBkzZjjZOpkmKc2CnAOAAAkT7TLENYnX8PLol7ksxj5OYVclNkiOtLVgibQtK/NDEJqfDMvIyLgmXloVPoUPU51xGxVVDvo/DusP9x+ACYtafs76hWDSQU0JqN3tZRlQl04bX3td65AFPZrg8tjLmRY/jVDPUAD8rp5BwK23oOnRw8mW2Y7q/fvRJSc3K8niq/VlQMgA/N38HWSZjIztsETaHj3DadvDuwcCAuX6copqis45R9TrbV9QMWIgiCZI+wtqXKugV6rlet5AcIG5utp1r+9mM2TWOm0jG3baKtxr74WiWG/TzeLE7Ex6thYsNTSSc8utGxR2K0YW1h88Q0BfARnb29aHrhzSa89NsJ+GvLmmBnNHy2ZTKCXN5jMdtyYTph8fR/H3y4jYb8NBq9USFhZW7/H666/Tr18/PD09iYyM5O6776aiom7Nf6Y8wocffsjTTz/N/v37rZG6Z0bMFhQUMH36dDw8PEhISOCnn34C4OTJkwiCQGFhIbfccov1vIakF3744YcGN11EUaRcX8498+7hmquu4eWXXyY8PJzAwEDmz59v9d8B6HQ6HnroIbp164anpyfDhg1j06ZN1uPp6elcccUV+Pv74+npSZ8+ffjtNylju7i4mFmzZhEcHIy7uzsJCQmsWrWqnZ9858LpTtulS5dSUVFBSEgI1dXVjB49mvj4eLy9vVm8WA65d2mO/y49Rw4DL/tERQ8OHczE6IlEeElRtTXHkyj59ltqjh+3y3iuQmztAv5EQdeOtC2uKaZUJ6XRmvVBdPd3x13j2ppmMs5HNJmoPnQYc2XX/v9xReqkXxz0txGEJlM9z+HkVjjyIwgKmPhc685tBYZaDXir07Y2oufkjKs5Mf2qTnuPC7z1VkL/+1+0Ca2IfHZxwhcvpvtby/EeM8bZpsjI2A1LpO2x03WOUjeVG70De3N+8PmU68vrtc955v84PngIpbUOBJsRlACB8WA2QEo7U8ltiMksWufsDQUX5L36Kinjxtv+87AFBUnSJqXKHcL7N9os9Mknif9zA75TpljfswRWWJyanQmLHNup0hoiPKXiYHYrRqZQQHytREJbdW3VHnDzz9ImdaD9NPHLfvmFpCFDyXnWBfw0+srGH4YzNoxGPwyj/is5bv98FvSVCFueR33kPYx9b0McemfL+rURCoWCN954g8OHD/PRRx/x559/8vDDDzfYdubMmfznP/+hT58+1kjdmTNnWo8//fTTXHvttRw4cIDLLruMWbNmUVRURGRkJKdPn8bHx4fXXnvtnPNaSnev7rip3di8aTOpqals3LiRjz76yOoAtnDPPfewfft2vvjiCw4cOMA111zDpEmTSE5OBmD+/PnodDq2bNnCwYMHeeGFF/Dykq6VTz75JEeOHOH333/n6NGjrFixgqAg29eS6Mg4XXnZ19eX9evXs3XrVg4cOEBFRQWDBg1iwoQJzjZNpjnsLI3QEBUbN5L/2mv4Tr2SiBdecNi4jiaudnc3vbASo8mMSun0/RWnYNnB91GFUC5qrNFoMjJNcfKGG6jZf4Duy5fhPX68s82ROYO4YC/+Ti0kKa8Y6O64gU0GyNgBURdZq2Kfg9kEax6TXg+eA6F97GbOou2L2JK1hdKqicBA4oO9MOv1krPWZELZhhQ2V8RgMrA3by/ZFdlMT5jubHPsgjo0BHXouCbb/H3qb/7K+osLwy9kdORoB1kmI2M7LKnieeU6iir1BHhKmttfXv5lg+0VHu6IBgO6pGTbG9PzMvj7DamuRt+rbN9/G8gurkZvNKNRKejmf26GRtX27RhPn0bhYT+d0TZj0bPtNhiaKDbk1jPxnPcskaedrQgZgK+HmhBvLXnlOrSiVDjObvIIIH2XNR6QOLlt5yuUEDVcetiRmiNHEfV61/guP9eEVGLCpTDr67qfty+Xnre8JD1qUR16D8qPwdwzah+81g+qCs/tc1Hr9fh/+eUXq3MSYPLkyXz9dZ1d0dHRPPvss9x555289dZb55zv7u6Ol5cXKpWKsLCwc47PmTOH66+/HoDnnnuON954g3/++YdJkyYRFhaGIAj4+vo2eG5zCIKAj9YHN6Ub/v7+LFu2DKVSSa9evZgyZQobNmxg3rx5ZGRksGrVKjIyMoiola986KGHWLNmDatWreK5554jIyODGTNm0K9fPwBiY2Ot42RkZDBw4ECGDBli/Uxk6uN0p62Fiy++mIsvtmOVQxnbYjJCRa2mUc8pTbdtJ1uytnCi5ATTE6ajDg/Dc8Rw3M4/365jOptwHzfc1ApqDGYyi6uJCWq4Em1nx7Kj7YZ0o0kIdUEdMBmXw71PH/SpJzDmFzjbFJmzcPPOxDP+OX7M8+VxfnPMoKIIbw6Ckgy49Q+IHNpwu32fSZI/Wh8Y+4RdTTpZdpISXQnVNUoEQYpAFlQK4tauQZeUhMpGWmbORm/Wc+u6WwEYHzUeH40PotmMLiUFhVaLJirKyRY6hh2nd/DJ0U8wiSbZaSvTIfHSqugR4EFGURXHcsoYEdd0FJT/rFn4XXst6shI2xvTa4rktE1eJ23IuUBVc4uebWyQJ0rFuRka0V9/TeX27XgOG+Zo05on55D03KPltpkqKlF6edLduzuFNYWdMtIWICHUi7xyHYYa6ftut0hbkHRoO4AWbeiT/8P/phtRuLk525QOwdixY1mxYoX1Z09PT/744w+WLFnCsWPHKCsrw2g0UlNTQ1VVFR6tdIb3718XHe/p6YmPjw95eXk2s99Cnz59UCrrsl3Dw8M5ePAgAAcPHsRkMpGYWH9jR6fTERgYCMCCBQu46667WLduHRMmTGDGjBlW2++66y5mzJjBv//+y6WXXsq0adMYMWKEzX+HjozTnLarV69uUbvZs2fb2RKZNqFUwd3boTDVrikYAM/tfI7simz6BvVlyNSp+E6datfxXAGFQiA2yIsjp8tIzavouk7b2h18s06aLMmRtjItIfj++wl9/HEElcvsS8rU0is4HEVWGVViNWbRjEJwQBaBIED3oZLT9vivDTttdeWw4Rnp9eiHwdN+aVmiKNZd2/TB9AjwwE0tTYQ13buj6e7ACGQ746n2JMQ9hLzqPE6WnqR/cH/yli6l6P0P8L/hesIWLnS2ie2ibN06TKWleI0Ygbpbt0bbWf7e0T7RjjFMRsYO9Arzlpy2p8vPcdqazCaUiroFvTo83H6GdB8KHkFQVQDpf0Os8zdCLFI3cY0Uy1W4ueE9dqwjTWo5l70Ew+8GpbbZpqIokvPMM5R+/wPRX3zOkpH2K+DkCiSEeLMtpZCSMj8AimqKKNWV4qv1da5hZ1OQDDtWSFHoCfbNVhYEAW0zhbQcxuOnGj8mnCWn998U2PqqFGWr1IBJL0kmXPyAJIl1JvcftJmJnp6exMfHW38+efIkl19+OXfddReLFy8mICCArVu3cuutt6LX61vttFWr629aCYKA2WxutL1CoThHX/tMbdozKdeXSwXbRHOT41RUVKBUKtmzZ089xy5gjTK+7bbbmDhxIr/++ivr1q1jyZIlLF26lHvvvZfJkyeTnp7Ob7/9xvr16xk/fjzz58/n5ZdfbtmH0AVw2op2zpw51lDvxoTZBUGQnbaujp0dtgCjuo+iuKYYdzsXhHE1YoM9OXK6jBMFFUCos81xCjf0voHBoYN56IuTAMR30grrMrZF6ePjbBNkGmFI9zjEPUoEhYGssmx6+NohCqshel4Gh76VtNgbKkpWmAoKFQTEwgV32NWUopoiyvRlgIBZH9TpN6NifGPIq84jrTSN/sH9cR8wAMHNDdFocrZp7ab408+o2rmT8Gf/D7+rr260nSU6qzMW65HpOvQK82bdkVyO5dTp2h4vOs4Dmx5AKSj5efrPjjFEoYTESbDvE0kiwQWctpZI2w5ZLFcQpHtfi5oKmEpKEGtqKF+3HrdevexsnHNJqF13pOUZmN13NsHuwQ0WbLIZJiNk/QPF6TDg+pafd/x32P0+lKTb3WnrUmhaEdS0fbnksB37hLQ5v/lF2LgYY3kVxt5z0CYk1P1tW9NvK9mzZw9ms5mlS5eiqJXr+uqrr5o8R6PRYDLZZs4UHBxMeXk5lZWVeHpKv+e+ffsabFtYXUiloRKj2dhknwMHDsRkMpGXl8fIkSMbbRcZGcmdd97JnXfeyWOPPca7777Lvffea7Xr5ptv5uabb2bkyJH897//lZ22Z+A0p23v3r3Jzc3lxhtv5JZbbqkX2i3j4piMUuVWVfM7srbg8WGPAyCazYiiaN+bpQthmfil5nXdYkphnmF4KAPJL2y8Iq+MTFN0pWtGRyDSzwsMQaDN5Z/s445z2sZPkJyy+ccazhCJGAD37oaSTFBp7GqKxYHnoQiiXFRbN6OKP/8cQaPFa8xoVLXpZJ2BaN9odubstP7e3qNH0/OfnQga+37OjsBz+IWgEHBrYg5rMBvILMsEINa3ZY4RGRlXpFe4tCF6PKeu6FiAWwCZ5ZkoBAV6kx6Nsu7/unLHDio2bsTjwgttH2U6eI6UNdFW/U8bU+e0re/sMeTmcfrxx/GeMB6/667rFPORoLvuIuDGG1EN6Nfp51gWLefk3Ao+nPtf+w+YexBWTQaNF/Sd0fL5iKV4Wbx95RWKPvsMQ3oGPldcgXtf++n+25xaB63VYQsw+mFEUUS16TlEnQ6xx2IEB0g+xMfHYzAYePPNN7niiivYtm0bK1eubPKc6Oho0tLS2LdvH927d8fb2xuttm1+mGHDhuHh4cHjjz/OggUL2LlzZ72CYmeiM+kAms2KS0xMZNasWcyePZulS5cycOBA8vPz2bBhA/3792fKlCncf//9TJ48mcTERIqLi9m4cSO9e/cGYOHChQwePJg+ffqg0+n45ZdfrMdkJJxW3ejw4cP8+uuvVFdXM2rUKIYMGcKKFSsoKytr/mQZ53LyL3gxFn550KHDVu3eTdLgIWTec49Dx3UW1irrBRVOtsS5WFLOQn20+Lg5X7dMpmNQuX07J2+8kZwOnn7d2VAoBDwEKW12f44dCtQ0hrsfRNfq5h9vREtX4wkh9o8ashQyURgl3VpLpG3+suWcfuIJDKeaSPfrgFgkASwSAYJG0ykctgBBd95J1KpVuCWeW6DHQnZ5NkbRiLvKnRCPzqFVLNM16RUmObCO55ZjMktZkkHuQXww8QP+uPoP1Ir6c7TKbdso+mg1FRs32d6YyKGS49bbNTLRUvOlAIuzgwsqNv5J5bZtlP7wo2s6N7e8DF/eCKkbW3yKW2IiHoMHs2zvMkZ+OZKPDn9kRwOdi+X+nF1STaWu6WhDmxB2PniGgL4CMra37BxdOaTXtrWzJm7ZTz9T9NFH6FIcOH+zBWZTfYdtLcKYRzAP/w9KPx+HzUvOP/98XnnlFV544QX69u3Lp59+ypIlTcuMzJgxg0mTJjF27FiCg4P5/PPP2zx+QEAAn3zyCb/99hv9+vXj888/Z9GiRQ22tUTYKs+WmmiAVatWMXv2bP7zn//Qs2dPpk2bxq5du+jRowcAJpOJ+fPn07t3byZNmkRiYqK18JpGo+Gxxx6jf//+jBo1CqVSyRdffNHm37Ez4lTBv2HDhjFs2DBee+01vv76a1atWsVDDz3EtGnT+OCDD9q8gyBjZ47/Lt1MTHqHDWkWzRQc24e5qgqxEd2VzoY10ja/a0ba5lTm8EPKD+QXBgIecpStTOsQRap378GQmdXpI0E6GkFu3ck07yOl5IRjB+45BU5sgmO/wQgpHYuMnVCYAudfDwrH7GNbnJfVlVI0bXyIF6LBgO/lU6g5noQ2zv6yQ47EUqCmoarbXeF/0/J7R/tEO0bDWUbGTkQFelqL5KYXVhIb7IUgCAwNa7i4o+dFF2OursFrZOcuNF1UqaeoUloTxQbVn6t6XjySkP8+hCrENZzL55C0BrJ2Qa/LW31qWmkaZdUlaIXOsQnXEP6eGoK8tBRU6DiaU4SPTwmVhkoGhgy0z4AKhZQZtP8zKXq2JdIfaVvAbAD/GLvLFgbMuZmqf3rjMaSRgq6uytjHGj2kmGi/4I7GIlgfeOABHnjggXrv3XTTTdbXc+bMYc6cOdaftVot33zzzTn9NCQxWlJS0uTPANOmTWPatGn13ps3b5719aJFi3jkiUc4UXoClULFRx+duzHz2muv1ftZrVbz9NNP8/TTT5/TFuDNN99s8H2A//3vf/zvf/9r9LiMk522Ftzd3Zk9ezbR0dE89dRTfPHFFyxbtkx22roiolgXpdTzMocMWVxTzCXfXILZpGfLT9+jErvGosdSfKyoUk9JlR4/j847KWqII4VHWL5vOf6qGOAOEkK8nW2STAfCfdAgwhYtwnPEcGebInMWUT7RZJbA6coMxw7ccxL8/l/I3AGVhVL07W8PQc4BKD8lFaRwABaZgMqKAEBy2gpqNaGPNb6o6MhYdFwzyjMwmo2oFCr0GRnkLFqEsaiY2B++d66BbcRYWIjSzw9B2XQEilXPVi5CJtPBUSoEEkO9OZBVyrGccmKb0W/1vHAYnhcOs59B1SWw/wsoSILLX7HfOM1gkUbo5ueOu6b+9UDTvRuBt97qDLOax1ANp/ZJryNb/3f63+kh3L36AL6RPnCebU1zJRJDvSio0PFH2nY+Tf8fMb4x/DTtJ/sNmFDrtE1eD5c+23z75FppBDtH2QL4TJqEz6RJdh9HxvlYpBHOlLyRcR5O935lZ2fz3HPPkZCQwHXXXcfQoUM5fPgw/v7+zjZNpiFyDkJpJqjcIXaMQ4b00/qhUqgwKERyQ9S49Ww8DbEz4alVEe4raet0xWjbQPdApsZNxd3QD5D1bGVah8LNDf/rZqLp0aPTR/J1NPoESVV0S00OlgHw6wGTX4J5G8EjAPZ/LjlstT4weK7DzLBEXpr1wYT5uOHdyWVfwj3D0Sq1GMwGTlVIf3Olvz+VO3aiO3YMQ06Oky1sG1n33MvxoRdQ8ddfTbaTi5DJdCYsEgnHztC1PV50nFf3vMqnRz91rDEmPax5VCrAVJrt2LHPILVWxiuuo81TT+2VIjS9QsE/utWnixnZcDoP8bcNtrfNhUio/btWlAfgrfEmwC2g0SLqNiF2LAgKSYO/JLPptqIIKX9Ir+2sZ9tZEc1mTOXlGAsKnW2KS2Fx2mqVchClK+A0p+1XX33F5MmTSUhIYNeuXSxdupTMzExefPFFenXySpQdjo1LJAFvqIuyjRsHGo9aYe+mdVjaiyAI52jidRXqJBK6nq7t+cHn8+zFz1KVNx6QnbYyMp2Fod16AmAUSqjQO/DatnEJ1JRIRcf0FbDhGen90Q/D7g/sfi8DMJgMZFdIzgWzPthamdpYXIxoNtt9fGegEBRE+UQBdQ5Mpbc3ES+9SOwvP6MKddG04SYQzWb06emIVVWou3Vrsq3FSW+RiZCR6cj0CpOKkR07XVeDJL0snQ8OfcBvJ87VCxdFEX1WFvqsLNsasnEJ7PkQIi+Qfk76ve6YA9YmZ2KpvRB/VuRx6U8/Ubljh+vKumXskJ4jh0EbNrcDbryR8MXPEr54sY0Ncy0SaouRZRdq2XbdNj6c9KF9gwE8AqB7rfyApcBYY1QVgkIJSm2dbr+dqPznHwzZ2fZ1WDsDUUSfno4h57Tr/q86Ab1ZknyRI21dA6c5ba+77jqOHj3KAw88wNixYzl58iTLly/njTfeqPeQcQEUSqni4uYX65y2vS6rq8SoaF6cur3EekRy3WYTFT//gmgy2X08V8FajKwLRtoCVOtNZBVXA3U73TIyLUU0GinfsIHcJc93qeuGq9MvIgyzUfp/Ppib4riBz7yX/fUKVORCQCzoKhx2L8ssz8QkmlAJbohGb+vGXMYtt5I09AIq//nH7jY4A8vG65m6tr5TpqCNj++QkfCCQkHCX1uI/fknNNHRTba1bDbL8ggynYFe4edG2lqiyNPK0s5x6BS8uYzUCZdQ+PY7tjXEcj1Xe0g/H6tdnzhwbWLBElgRF+JpfU80GslZ/BwZc+ZSvX+/w2xpFZk7peceF7b61H9z/2VJ9iq2DXRD4e5uY8NcC8v6Izm3wnH3K0vUbHozxcg8g2DBPrhvvxRMZSdEUSR7wX2kjJ9AzaFDdhvHGQhKJQovL5S+vp3PId0O5Ehb18JpmrY9alNWP/vss0bbCILAggULHGiVTINYKi1utOykClCQDNtea7ASoz3oXenHkL9F9Hv/gPlOV/VwGF050jatNI3SMm9EEfw91AR6yTcNmVYiCJx69DHM5eX4XD4F9379nG2RDOChUaE2hWJSVbD79HGGRw5wzMDn3MuAiMGw+XmH3cssTkuNOQwQpCJkJhOGjAzMlZWoIyLsboMzaKoYWUdFUCrRJiQ02aakpoRiXTGANdpYRqYjY4m0zSiqokJnxEurood3DwQEyvXlFNUUEegeaG2vTUwEtRqzrsa2hpx9PU/bImVP/LXUYddzCxYJs7gzIm3NFRV4jx9P9f79uA+0U9Gq9mA21zltI1vvtN2bt5dvkr6hylDFlNgpQOctLJlYG2mbVVxNld6Ih0Zl/991wA1SVmtEC747ggA+4fazBTCVlKCOjEQ0GnHr2dOuYzkDbTObr10NURTR1xacl522roHTnLYnT5501tAybWH0w6CvlBy1guBQhy1ARGAU6wYKBLr7cX4nnBA0Rl2kbddy2hbXFHPlD1eiFFQgPEVCSICzTZLpgAhKJb7TpyEaDCg8PZs/QcZh+Kq7UUQqR/IdGGkL0j0raQ1k7wEEOPS1Q+9laWWS09JQIzk1EkK8EJRKEndsR5eW1myqfUfFEolnkUewULF1G5V//03AjbM6pcPa8vuGeYbhobZfFJSMjKMI8NQQ4q0lr1xHUm45g3r446ZyI8IrguyKbNJK0+o5bb3GjaXnnt0oNHZIsT3TcWs2SA7bMY871GFbYzCRWVwF1HfaKv38iHhuses6MmtKIDAe8pMgvH+rTz9Tq7vy778pfO89fK64Er/p02xqpivg76khyEtDQYWejw58z2+Zq7kg7AKeHP6k/Qb17SY9msJUm8qvtL8uvsrfn5ivv0LU6xHs8b8s41IYzAbrtUut6Nx1FzoKXSdkUab9XPI0KDUgmqVnB06KuvcaynuTlLx1ialLpS5YJoDphVUYTJ1T77AhLNFYboI/iOqOV9xBxmUIe/xxwp96Cm1srLNNkTmDbh6RAGSUn3T84LO+AUEJiA6/l81ImMHyse9QmjscqNPqFtRq3BITXXNxbwNifGLw1njjpa5/LS9YvpyiDz6gcnszKaAuRs4zz5C3dGmzRdQ81B5MjZvK+B7jHWSZjIz96RVu0bU9VyLh7I0ZhUZjH4ethdEPg+KMGCQ3H/uN1QBpBZWIIvi6qwnyOvf3dNlrukcA3PYHPJzaJqefRfYlxieG6sOHqfx7O8VNZM92dCz36pzSGk6WnSSlxMEbzg2RvA5ejIXfH3XYkJ3dYdtZawu0Fos0gkapcd1rWBfDaZG2Mh2QzS9KlVqVGul584sOW+xG+UQ1mnrVmQnzccNdraTaYCKzqIrY4K7hvLRM+tXmEEDWs5WR6WwMCxvDzi1qAnv0dfzgu94D0eSUe5m/mz8ByvMwVRd1KdmX8wLPY9t1286Z/PtMnowmLhZNTMcp0mXW6yn++hswGPCbObPJton+iTx78bMOskxGxjH0DvNmS1I+x3LqipHF+MSwLXub4wsGb34RzEbJcWs2SoWZHIhVzzbY03p9MxYVIRqMqENDHGpLm2hjlKYlayTGNwa/q4dhLitv9nrYkUkM9WbHiSKqKqXMv7M3J+xCaTZsek56nv3DuceT14GuTJrP2BmXjRi3EaLJhC41FdFgwK13bwRF145rlKURXI+u/Y2UaTkWYf+xT8CT+dKzpaCLA1BX6ojwlPR6HHKjdBEUCoGYoK5XjMwSaaurDgLqdrhlZNqKPiu72ag4GccxOCIeU2UvsgocPCF08r0M6hb5CSGSTl7+suXkvfoa+vR0h9ngaARBaHDBFzD7JiKefRaPQYOcYFUbMZsJW/gk/jfd1GnlLGRkmqJn2LnFyKy61WXn6lZXbt9O5p13kbf0Fdsacub1fGGhJI2w5SXHXs/zztWzLfnqK1JGjyZ3yfMOs6PV6Nu+piiuKaZUVwpAD58eqPz9CfnPg2i6d97roSV4JL9IiuQuqimyfgZ2Q+0O+z6DExuhNKv+MVGE5D+k15aiZXbCWFRE8kUXk3XvvYhGo13HchoKBZhMIIqIOp2zrXEJ1Eq17LR1IWSnrUzznDkpskQjjX7YYYtdURRJmXAJSxafIrRY7FSFTFqCRRqgKxUjs0RqlJb5A5AQKjttZdpO7vMvkDphAsWfdt7UvY6GRa87w5HSL06+l5XqSnn939dZc1Kqcm65tpd8/TWFb7+NsbDIruPL2AaFmxv+11xD2BOPNxt5lFGWgcGiOygj00mwFCM7drrMKlkW7RMN0GCkramsnIpNm6jYttV2RjR0PR/zSN31fNVlcPAb243XCJa5+ZnBBfosycGmiXNRWaaKfFgSCW+PBqO+1adb1mERnhG4q9xtbZ1LklBbjOxEnpEQDymC2u7rUY8A6D5Uep28vv6x/GNQlgVKLURfbFczqvfuxVRUhC4tDUHVOZO0BUFAExWFW8+eKNy7xne6KQLdA0n0TyTYPdjZprSKkydPIggC+/btA2DTpk0IgkBJSYlT7bIFstNWpnnMpoYLtVgWu2b7pmWYSkowV1SgrTFS5N3whLAzE9sFI20t0dTGmiC8tCrCfNyca5BMh8atdy9QqTB1gpt2ZyHMxw0P3xQUAevZfHK/YwZ18r0spSSF9w6+x65SafMgIcQLURQJvH0eftdeizYxwa7jO5svjn3BlO+msGzvsnOOGXLzOl2kscFsYOoPUxn66VDyq/KdbY6MjM2IC/FEpRAoqzFyurQGqNO0za7ItqbWWvAYNJDQxx8nfOFC2xnR1PX8vGmQvg2+v6MuGtFOpORZ5BHqnLYRzz5L/JbN+Fx2mV3HbjOZO6WUeqMOVK3XKD2zCNmZ6NLSyHnmGYpWf2wDI10LS6RtZnEVPbyjAQdlflqiaFPO+h5bnLjRF4PGvkUuPUeOJOrzzwh9xHHauc5A4eGBoLZ90a2cnBzuvfdeYmNj0Wq1REZGcsUVV7BhwwabjjNmzBjuv/9+m/YpCEKL+x0zZow1q8rNzY3ExESWLFnSpWoR2ROnbZcYjUZMJhNabV3YdW5uLitXrqSyspIrr7ySiy+2786RTAsZ+1jjxxygA6jy96fn3n/56a/3MJx6u0vJI0DXi7Q1mAxklmcCYNYHExfu1al1lGTsj/ell5I4fgJKL09nmyJTi0Ih4BW8i2r1fjanJzIhbqD9B3XyvcxH48O1idfyy/5iQIrMEgSBgFmz7D62K2AwG8gozyC1JLXe+0UffUTukufxuewyur2y1EnWtZzKHTvQxMQ2q1dZUFWAVqXFLJq7jA6/TNdAq1ISG+xJUm4Fx3LKiPBzJ9g9GA+VB1XGKjLLM4nzi7O2VwUHEzD7Jtsa0dT1/OpV8J0SDn0LX90Es3+EyAtsOz5gNoucKKh12p4l46UOcWE924zawo89hrXpdEvwjCW62kL1vv0Uf/Y5qohw/G+4vlNFZQZ6aQnw1FBUqcdfLclAOCSIKGECbHwWTmySoqItTvaUWqdtgn2lEUAqJugx0AFzNDvy1r63UAgK7jz/znOOrdy/ErNo5u4Bd9t83JMnT3LRRRfh5+fHSy+9RL9+/TAYDKxdu5b58+dz7Ngxm4/ZXtqjXzxv3jyeeeYZdDodf/75J7fffjt+fn7cddddNray6+G0SNt58+axYMEC68/l5eUMHTqU5cuXs3btWsaOHctvv/3mLPNkXAyFmxvhfaUUka4mj2CNtC3oGpG2mRWZmEQTasEN0egjFyGTaTcKd3fZYeuCRLoNxFAyGJO+Y6VftZUE/wQeveAJCjMmAF1Pq3t8j/G8d+l7PHHhE/XedzvvPBAETBXljZzpOpirq8m49TZSRo/GkJvXZNtwr3C2X7+dNTPWoBDkxDaZzoVVIqFW11YQBKuurdMz4hQKmLYS4ieAoQo+vQZyj9h8mFOl1dQYzGiUCiL9pZTqDhFVlrlTeo68sE2nW9Zhlr+3BZ/LJuM79UoiFi8GpbJdJroilvWI2hwKOGg9GnY+eAaDvqLO2a4rh/Ta13bWs+0sKAQFy/ctZ+X+lfXeX7l/Jcv3LUchKBDNZoyFhRhOnbLZ//Hdd9+NIAj8888/zJgxg8TERPr06cODDz7Ijh07rO0yMjKYOnUqXl5e+Pj4cO2115Kbm2s9vmjRIgYMGMDHH39MdHQ0vr6+XHfddZSXS9ffOXPmsHnzZl5//XVrtOvJkycBOHToEJMnT8bLy4vQ0FBuuukmCgoKAEk+QKPR8Ndff1nHeu755wgMDmRPyp4m+20IDw8PwsLCiIqKYu7cufTv35/16+ukPXQ6HQ899BDdunXD09OTYcOGsWnTpnp9bNu2jTFjxuDh4YG/vz8TJ06kuFgKdlizZg0XX3wxfn5+BAYGcvnll5OaWj8QoLPitFnktm3bmDFjhvXn1atXYzKZSE5OZv/+/Tz44IO89NJLzjJPxgXpF9SPTy/7lM8v/9zZpjgUi/ZjUaWe4srWa091NCyTIC1hgNDlHBsyMl2FkWFXUHP6GgzlnVsW4EzSCysxmkU8NUrCfd3QpaRgrJ08d3YivCIYFj6MIPegeu+7DxhA4s4d9HjnHSdZ1nKM+floY2NRhYW1qDK8IAgEuAU4wDIZGcfSK7y2GNnpus0WS7p8Q8XITBUVVGzbRsWWLQ6xD5UGrl0N3S+AmhL4eDoUn7TpEBZphOggD1RKBaaKSlLGjOXUo49hrqmx6Vg2w1ANp/ZJr9saaduIPIJCqyXihRfwHD68U2bIWepr1FRJ9zCHZH4qFNLmA9RF15qNtTIgUyEwrvFzbUBNUhIFK1ZQtXevXcdpK1WGqhY9Zp83m9v73c7yfct5a99bQJ3D9vZ+tzOnzxwADDk5GIuKqKwua7Cf1lBUVMSaNWuYP38+np7nBo74+fkBYDabmTp1KkVFRWzevJn169dz4sQJZs6cWa99amoqP/zwA7/88gu//PILmzdv5vnnpWKHr7/+OsOHD2fevHmcPn2a06dPExkZSUlJCePGjWPgwIHs3r2bNWvWkJuby7XXXgvUSSrcdNNNlJaWsnfvXp5Z9AxPv/o0AcEBjfbbHKIo8tdff3Hs2DE0mjoJlnvuuYft27fzxRdfcODAAa655homTZpEcnIyAPv27WP8+PGcd955bN++na1bt3LFFVdgMknyZZWVlTz44IPs3r2bDRs2oFAomD59Omazg2pjOBGn5S1kZ2eTkFC3UNuwYQMzZszA19cXgJtvvplVq1Y5yzwZF6LwvfcwV1XjO/VK+kf1d7Y5DsdDoyLC141TpTWcKKhgsGfnXgBaIjRMOin6To60lbEF+qxscp9fgjE/n5gvv3S2OTLUaQB2lSyC5OJkDp+Soo8s0ginHnucmoMH6fbmG/hc0jUjZgS1GqUddOTsgaZHD2J//gnRIBcXk+na9LZG2pZZ37sg7ALMZvM5EZgAlX//TfaC+9D27o3XqFGOMVLjCTd8CR9Ogbwj8PFVcNffoLZNnYTU2loTlntZ5d/bMObmUrX3XwSti1ZdP7UXzAbwDAH/c/9OzXGmhNnZ8gidncTaYmRFJZKvIqM8A6PZiEphZ3dK/ATI/he8w6Wf3f0dIukEULllC/mvv4H3kaN4vOl6EgnDPmv9xsOK/St47+B7GMwGJkZP5J2D7/Bv3r+smrQKlb8/KJVM/OlyinUl55x78OaDLR4nJSUFURTp1atXk+02bNjAwYMHSUtLszpEV69eTZ8+fdi1axdDh0qZxmazmQ8//BBvb+l7eNNNN7FhwwYWL16Mr68vGo3GGulqYdmyZQwcOJDnnnvO+t4HH3xAZGQkSUlJJCYm8uyzz7J+/Xpuv/12Dh06xOzZs7n1ultBBHe1e4P9NsZbb73Fe++9h16vx2Aw4ObmZs2sz8jIYNWqVWRkZBAREQHAQw89xJo1a1i1ahXPPfccL774IkOGDOGtt96y9tmnTx/r6zMDPi2/S3BwMEeOHKFv377N2teRcZrT1s3NjerqauvPO3bsqBdZ6+bmRkVF19DwlGma4q++xpCRgcewYWiiopxtjlOIC/HiVGkNqXmVDI7q5E7b2p3r8nI/oOulEMvYB6WPNxV/bgSzGUNODuoWTD5k7EtssCcIRlJKUtCbhqBRtr4gSkdBb9Jzzc/XYBJNCKrHiQ/pjiiKiEYjCALa+Hhnm+gQNmVuYl/ePi6JvoQ+gX2abe+qtKRYyeN/PU6VsYq7zr+LngE9HWCVjIzj6BkmOQ5S8yvRGU1oVUquSriKqxKuarC9e9++qHv0QBsf3y7NxFbjEQA3fic5bkf912YOW6irNWFx2nqPG0ePjz7CXFnhupGmGbUp2T2GQRtstEiYuavcCfUIbbCNqayM0u+/RzQYCLzttvZY61JY1iPpeWrcwt2oMdWQXZFNlI+d16Z9Z0C/q+07RiNo4uPxnjwJr4sucsr49kApKDGYDagVai6JuoS1J9daj6lrnYnQ/v/flkosHD16lMjIyHoRrOeddx5+fn4cPXrU6rSNjo62OmwBwsPDyctrWqZp//79bNy4ES+vc9fSqampJCYmotFo+PTTT+nfvz9RUVG89tpruKvcW2T72cyaNYsnnniC4uJinnrqKUaMGMGIESMAOHjwICaTicTExHrn6HQ6AgMl3f99+/ZxzTXXNNp/cnIyCxcuZOfOnRQUFFgjbDMyMmSnrb2w6HIsWbKEv/76i9zcXMaNG2c9npqaavXCy3Rt/G+4Hl1KCtr4OLaf2s6mzE0MDB3IpOhJzjbNYcQGefJXcgGpBZ1/I8Mij2CoDkarUtDd375VUWW6BkofH8L/7xm0CQmogruGhqqrExvkhWfci5jVZew5fT7Du5/vbJPsRma5tNBV4oZo9LZG2sZ+/x3m6mrXjcqyMT+n/sy69HX4u/nXc9oai4rIffZZdMnJxPz4I4Ki42vAbs3eSrGumNv73+5sU2RkbE64rxs+birKaoyk5lVyXoRPk+3VERHEr1vbZBu74RMOd28HlW2vs6m18ggWZ56gUuE5zPYFz2xKaF/oezXEjW3T6RqFhpk9Z2IWzY06pqsPHiR3yfMovLzwu+76TlNTICFEcphlFtUwMLEHySVJnCw9aX+n7Zmfc0kmZO2S/n7u/vYdF/AeMwbvMWPsPk5b2XnDzla1f//g+7xz8B3UCjUGs4HUklR23rDzHN35NTPWtNu2hIQEBEGwWbEx9VmbxYIgNCsLUFFRwRVXXMELL7xwzrHw8HDr67///huQJB2KiooalHNoCb6+vsTXBiF89dVXxMfHc+GFFzJhwgQqKipQKpXs2bMH5Vma1xansrt7087iK664gqioKN59910iIiIwm8307dsXvb7zy0c6bVa8cOFCXn/9deLi4pg4cSJz5syp9+X5/vvvuagT7erItJ3AOXOIePZZVIGB7M/fz2fHPmNr1lZnm+VQYi1pxPmdP404oywDALM+iNhgL5QKF41WkOlw+M2YgXv//gidsEBGR8Rdo0Rtlhzou7Jdr4KuLbHIviiMIZyt1a1wd+8UTsqWYNFAPFsLUOntTfmmzeiSU9DVapu5GqaKClLGTyBrwX2IzSwQSnWlFOukwhldLYVYpmsgCAK9ws+VSDCZTWSVZ6Ez6ZxlWsOc6bCtyIO1T4CpfTInZ0fadggSL4Wr34eBN7bp9O7e3fnfhf9j4fCFjbbxHD4crwnjCXnoPwiqzjPfCvLS4O+hRhQhUNsdcJCu7cYlsPlFMOrgz2fhm7nwbW0E8+YXpeNdFA+1R4sfq4+s5p2D7zB/wHz+velf5g+Yz4r9K1h9ZDVuqroIfNFsxs2sbLCP1hAQEMDEiRNZvnw5lZXnrt9LSkoA6N27N5mZmWRmZlqPHTlyhJKSEs4777wWj6fRaKzarxYGDRrE4cOHiY6OJj4+vt7D4phNTU3lgQce4N1332XYsGFcd+N15FXmYRbNjfbbEry8vLjvvvt46KGHEEWRgQMHYjKZyMvLO8cWi/RC//792bBhQ4P9FRYWcvz4cf73v/8xfvx4evfubS1Q1hVw2iph9OjR7NmzhwULFrBq1SrefffdescHDBjAgw8+6CTrZFyVYeHDmNtnLpdEdS3tP8uE0DJB7Mysu3od14a/hlkXIuvZysh0cvzU3QA4WpDiZEvsi6UwT02VlALWVa9tFgfm2dXlBbWa8KcWEvXJx2hjWq+z6AhqDh3GkJ1NzaFDCJqmpTwsGSOhHqGtXujJyHQUetdKJBzLqStGNv2n6Uz+bjKHCg41ep5oNNrdtkYxm2D1NNi+DH6cD20sYFNSpaegQtq8iQ32pOjTTyl4910Mp07Z0NiOiaBQELlsGf7XXYfCzXZyFM5GEARrtO15nhN5afRLjlmPKpSwcTE81w0OfCG9Fz+h1mG7WDpuBwy5eZjKy5tv2AGwFB2bP2A+d55/JwB3nn8n8wfMZ/m+5azcvxIA0WSi5uhRdMnJiG1wVJ7N8uXLMZlMXHDBBXz77bckJydz9OhR3njjDYYPHw7AhAkT6NevH7NmzeLff//ln3/+Yfbs2YwePZohQ4a0eKzo6Gh27tzJyZMnrdIB8+fPp6ioiOuvv55du3aRmprK2rVrmTt3LiaTCZPJxI033sjEiROZO3cub7/3NkcOHeHFl15EqJWIaKjflnLHHXeQlJTEt99+S2JiIrNmzWL27Nl89913pKWl8c8//7BkyRJ+/fVXAB577DF27drF3XffzYEDBzh27BgrVqygoKAAf39/AgMDeeedd0hJSeHPP//sUr5Cp4Z29O7dm/vuu4+ZM2eiOCvK5Lbbbqu34yDTNTHk5mGq3YkCGBgykAeHPMjoyNHOM8oJxAZLu2EZhVUYTJ27QqKbyo2SkiBAJevZyticqr17yX9zGfqMDGebIgNEeEpphenlJ51riJ2xOPGMuiA0KgWRAR7kPLuYrAce+H/27js8qjJ74Pj3Tk9m0jNppJFCFQTpKigKtpVV116xryL2srrW9beudW0g6tp7L9gbVoqIBSz0kE56r9Pu/f0xmQASwiSZmTvl/TwPDyS55UDCzL3nnvccun7zfrBFqMuLywN2/HvsLO6YY4iePHmvCVG1RI0fR/Zzz5F64z/3uq3n79fXQCZBCBcje4aRbajaUWmbYclAr9FT11W32/Zdv/9B0V+OpuSkk3f7WsBotDDnVpC08Otr8Ok/wcvekzvzDCFLjzMRbdDS+Myz1P33frp+33OyWnX1W6B246AT1eB+bet0dPowqNBSmOq+L3F25nFE7hFkWALQyvGg6+Dgf7oHyHk0lbgTtrNv9NtQsrqHHmLz1Gk0PPusX44fSLIi75Kw9fAkbj1VpZJW6/6l0fhk4GheXh4///wzs2fP5uqrr2afffZh7ty5LFu2jEcffdR9Tkli6dKlJCQkMGvWLObMmUNeXh6vDXBo8jXXXINWq2XMmDFYrdbegV8rVqzA5XJx2GGHMW7cOK644gri4+PRaDTccccdlJaW8vjjjwOQmJLIrf+9lYfvfJhff/11j8f1VmJiImeddRa33XYbsizzzDPPcNZZZ3H11VczcuRIjj32WNasWUN2djYAI0aM4LPPPmPdunVMnTqVGTNmsHTpUnQ6HRqNhldffZWffvqJffbZhyuvvHKXeVjhTlK87ZIcIFu3buXpp5/m2Wefpa6uDkcITOhtbW0lLi6OlpYWYmP77+kkDMz2f1xPy9KlpFx7LUnnnat2OKqRZYV9bvuUTruLZVcfFFpLsQbhmMXLWVfRwqOn78eR49L3voMgeKns3HPpWLmK1BtvJPHMwS0PFHznts/f4K3ttxNNJqvnf6x2OH5z+ken82vdr3RVnEaB+QA+uWIWWw+dg6Oykuznn8M8Ncj7IPpIu72dGa+4q0tWnLqCWEN4XjM98NMDPP3705wy8hRunH6j2uEIgl/8XNbE35asJCXGyA83zgHcrUEsegvaPqr/HNu3s/WQQ0GnY+RPP6JRs5f3utfgnZ5+04fc5B5SNgCvrynnurd+ZWZhMs/Pn0TzG2/Q/vU3DLv/v2gG2Q/S7969BNa+CAddD7NvGNQhZr46k2ZbM2/99S1GJIzod1tFlulYvpzuTZtIvuCCQZ0v2Dy7opjb3l/P3DGpPHGW91WQPnFPHnQ27PjYjwlbgLJzz6Nj5UoyH11CzOzB9UD2he7uboqLixk+fDimAFRuKw4H6HTBO0zQjxq6GqjuqCbWEEtWbNbedxC80t/PsLd5xKBootbV1cXzzz/PrFmzGDlyJCtXruSWW26hoqJC7dAElbla3U/v9ZmZvZ9r7G7kx+ofqe3sf2JiONFoJIYnuy8Cw7mv7eubXufWFbeytW0tsOOJtiD4SszcucQceQSGIF2CHWkmpLlv+jqVmt5Kh3CjKEpvOwDZbu1dQZB2222kXHsNplGjVIwusCwGCylRKcDuLRLAXYlX/78nsG3bvRI3lHj+bp4evoIQjkamupeK17bZaGh397CNM8b1mbAF0KWnk/XE/yj8+it1E7YA+54MR/QM5/ny37DmqQHtvnM/W0mnI+HUU8l6/LHgTdgClK1y/z5s0qB273B09A5syrRk7mVrsBcVUX7h36l78CEcNTWDOmewGdHzM7+1tp1vK77luT+eo90eoNZ1J7+4489ag18TtgDZTz9FwbffYO5Zxh8pJL0+IhO2QG8vcoM2OFc8RTKdmidfs2YNTz75JK+++ir5+fmcfvrprFy5kiVLlgyo8bIQvrIeXYLc0QE7DQ7653f/ZMX2Fdw641ZOGHGCitEFVr7Vwh/bWymqa2cuqWqH4xffVX7H1+VfY9ccg06TR05SEF/8CiEp4dRTSTj1VLXDEHpMycxH+VGLpHFQ1lJJbnz4Pdlv7G6k1d4KSMj25N6krWXmgVhmHqhucCrIjcultquWktYSxlvH7/K1+sWLaf/6ayStFmNe8DxYcbW00Pz2O0SN24doL3rMeXoYi/YIQjgzG3XkJEVT2tDJpuo29i/oPxErSRKWmTMDFJ0Xpl/krlz89h748GowW2HMX73adUfSNkSuU9vroLHI/eesKYM6hFlv5puTv6HV3upVr25jYSGWQw7BkJUVNsM2C3qKSUobOvjXyvup7aplQsoE9rXu6/+Tl/QM4dYawGV397T1c+JWn5Li1+MLwcXucvfpNmpVfqgm7Ea1V9Dx48dz4oknkpSUxMqVK/n555+5+uqrI/bJhrBnGrN5l0b2npugvqp0wpmnr+22MB5GduKIEzky8yxcnbnkJpvRa8PjIk8QhL5lxJvBmQzAmsqNKkfjH57p0jo5ARR97yCTSLWnYWQAlkNmY5lzaNBVwnf9+hu1d99N1U0373Vbh+ygvM09k2F4bHD9PQTB1zzVtp5hZF3OLq779jpO/uDk3gRAUJv9T5h8HiTlQ8YEr3fz9LQtMDho+/JL5O5uPwXoI+Wr3b9bR0NUwpAONZC2NllLHiH1huvRWa1DOmewsFqMxEXpkRUYnziDw3MPx6AJQFWiZ+jY7Bvh5jr371/d4f684FOKy4WjqgpbSQlB1kXU70SlbfBSrdJ206ZNnHzyycyePVtU1QoD0nvD13MjHCnyevrYFoVxe4RZmbPYUpKFbFtPQZj37RXU5WppwdnYGLST6iOFJEmYpXQ6qWFdzRZOHBuAScwB5klOOrvdN60FKRY616wBrRbTyJHBvZzWDzwPXvsaRpZw0kkknHRSoEPaK4052p1MHjZsr9tWtlXilJ2YtCZSzeG5KkYQPEalx/LZ+ho2VrvbmZm0Jr6t+JYORwflbeXkx+fvsr2ruZnWTz7BWVeP9dKFaoS8K0mCo+4FW6vXyUyb00Vpg/tafNhvq6m46w6ip0wh54Xn/Rnp0JR/7/49e5q6cYQ4SZIYkWphTUkTBydfzDET9v6eMGQ7J2w9lbWe37+6Y9ePfaTyuuuQ9HqSL7wQQ06OT48d9DQanI1NoMgodjuS2q1cAsQlu3DKTkBU2gYj1crYtm3bxsiRI7n44ovJzMzkmmuu4ZdffhGVtkKvti++oPK662j96KNdPt/fDV84y4+ASluArbXuag3Rz1bwl9aPP2bzjP2pvu1faociAFaTuyXC1qbwfE33vFfZu5PQaiRyk6Opvf8BSk87nbavvlY3OBV4+ryG0oPX6P32I2vxYlJv2PvwHs/fKzcut7f/oyCEq9Fpu1baSpLUbzW93NVF9W3/ov7xx4OnOlWj3TVhu+EDqFm/x81LGzqRFYgx6oiJ0qPLSMd8YJC3uinrqbTNmj7oQ9y+6nYu+vwifqr5acD72ktKaHzuuUGfO5gU9KyW2VIToPsx2dX30LGDrnN/Xnb59nQ2G20ff0LLW2/79LihQpIk9Kkp6IcNQ9L23Z87HHlWRug0uj32JRfUo9rV5LBhw7jxxhvZunUrL7zwAtXV1RxwwAE4nU6effZZNm/erFZoQpDoXPMjre+9T9e6X3f5vOeGr7K9MjSWXvmIZxBZU6eDxo7w+3uXt5azvHI5G2rdy0o9fR8FwdeMo0aBLONqbUWRw3P4VSjx3OBXdZapG4ifeJJ4si2FnMRojDot+vQ0dGlpmEb2P307HHm+36Wtpbj2cLMpd3RgD9FhtL1DyHr+noIQzkb2JG03Vbfhkt1LiT3X6Z7ezjvTpaURc/jhJJ13Hoo9CK9lN34Ir58JLxwHTSV9blJU607W5aVYSDj5ZAqWLSPxnLMDF+NAObqhaq37z0OotF1TvYYV21cM+N7L1dLCtnl/pebOu+jeGPptkEb0FJVsqW1DVmT/D8aefcOeK2kPus79dR+SJIlhDz9E8sKF6LOzfXrsUKFLTkaXkICkU3X8U0CJ1gjBLSh+Eg855BAOOeQQWlpaeOmll3j66ae577772Gefffj111/3fgAhLMUcfjjaxESi9t21ubs1ykq0LppOZ2efS6/CVbRBx7D4KCqbu9hW106iOVHtkHzq87LPeeCnB5DkicDJImkr+I0hN5eCb75BnyoGLASDsdYCvmmCFmel2qH4RUWbO/ko25PJz3C/rg27/341Q1JVujkdo9aIzWVje/t2smJ3HT7X+tFHVF57HeZp08h+emAT3f1BtttBlnfprd8fMYRMiCQ5SWZMeg3dDpnShg7yrJbeXs59rYiTJInMhx4McJQDkD0DkkdC3QZ34vbcT8Gy67WCZwiZp42XJElIhiBOdEgaOOVl2L4WEgb3uuSQHb3vZQN9bdPGxREzd657sHQY8PSl31hXybSXLsapOFlz+hp0mqBIqwyZZDAQM3s2MbNnqx2KEEA22Z20Fa0RglNQrduKi4tjwYIF/Pjjj/z8888cfPDBaockqCh6v4kk//1CzNN3fSosSZIYRhaGfW09F/fdnUlIEuSLnraCn3iWPgnBYcqwkQC4pFY67OH32vbmX9/kUMt/cXVlUygeRqHVaDko8yAOzz0cmd0r3Q0FBeBy4ayrDYohIB3Ll7Np8hQqrrjSq+2PyD2Ci/a9iBkZM/wcmSCoT6uRdhtGFootUHpFJ8KZb0N8NjRugxf/Bt0tu2yytafSdlS0U40IB05ngMK5cNC17h6+g1DRVoFTcRKliyI1euC9ujPuvYesxx/DNGrUoM4fTDzt28rrNCiAU3ZS2R6eD50jlaIoyHY7rrY2tUMJGE8Fvai0DU5BlbTd2YQJE3j44YfVDkMIUv0tvQpn+b3DyMKvr60nAS/brGQlRGPSi346ghAJ9klPo2PblbRtup0ue3hUquxMp9FRVR8DikGsIOjx34P/y30H3UdO7O4DToyFheR/8QV5778fFHMObJs3g9OJJirKq+1nZMzgkgmXMDFlop8jE4TgMCotFoCNVe5hZDvPntjTgxfF6cRWHKTX8LEZcOa7YLZC9W/wyqng6Or9clFdBxrZxYzbFlD0l6NxVIZ/ws5TWJEbmzuo1+Vw6g2aEmMk1qRDVjSkRWcC4VNEpCgKTW+8QfemzZHdPkyWsW3ejL20FMUZIg9nhsjTHkFU2gYn1ZK2RUVFnHvuub0fZ2dnk5iY2PsrJSWFTZs2qRWeoDJXSwtdv/2Oq73vqqv+ll6FM0+lbVEYVtr29n20W0U1muB3itNJ1c03s3XuYTibmtQOJ6KZ9FoyonNB0ff2Cgw3ngdthSkxVF53HcV/O572775TOargJEkShswATOT2UtLf/07BV1+SfMkCtUMRhKDk6Wu7oafSNjsmGwmJNnsbjd2Nu23vbGxk0+QpbDt6XvAMI/uzpHw4420wxkLpCnjjHHA5URSForp2clur0XZ14KqvR5c68MrTgJFl+OpO2PwpuByDPszOAxaHwtXeQfPb74R0QlCSJAp7qstjtRlA+NyP2otLqL75FkpOOilikpV9kbRaJKMRjcmE4vLtoLdgZdabMevNIZW0LSkpQZIk1q5d69Ntg5FqSdtFixaRutObXFNTEzfccAMPPPAADzzwAJMnT+aBBx5QKzxBZR3fr6bkxBMpO/vsPr8e0kuvhsBTabstzCptm7qbaLY1A+6+j6IaTfA3Saeja+1aHOXldK7+Qe1wIl7va1t9eD2Qeq/oPa788lqa+QWA/BQz3et+pXv9etAE7WKngJAVmfquerXD2CtJktCnp2PIzNzrtrWdtSyvXE5Ve1UAIhOE4DAqfccwMgCTzkSGxZ3M6us6XZuQgMZsRmMy4QjmgYPp4+HUV0FngvgskDRUtXTTaXdRlpjJ8OXLyXzs0eAeVtSwBb65C14/C4bQbsZTSeopmhkMxeVi27x5VP3zn3SsWDHo4wQDzzAyrdOdywiX+1Gluwvz/jOInj4NTTD3aQ4AY0EBxoICNMahJTGrq6u59NJLycvLw2g0kpWVxbx581i2bJmPInU7+OCDueKKKwa9f7o5ndy43N3aI3h73IMPPtjd31uSMJlMjBkzhiVLlgw6Hm9kZWVRVVXFPvvs49Ntg5Fq7zLLli3jqad2HTBx/PHHk5eXB0Bubi7nn3++GqEJQUDp7kKbnIwxP6/Pr3umMpe0lKAoSlAsoQwET6VtWWMnDpeMXhseN/2eix29kiiWEAsBY738ciSDgejJk9UOJeLFJ1RjTH+HpSVrOHXqbWqH4zM/VP3AF+WfoDHNZZhuMtEGHVlP/I/uTZuIGj9e7fBUs6VpC6d+eCoWvYWvT/56t6/L3d3U3H03Xb+sJfe1V4d80xQoKypXcMvKW5iePp0nDntC7XAEISA87RHKGjtptzmxGHXkxuVS2V5JcUsxk1In7bK9JEkMf/ttdNZkpGB/eJV7AFy0HJIKQJJ6V03kJEVjSoiHhCBvg1L2vfv3YZPcvW0HyVNJOpQBi5JWS+xhc2n/5lsUR2hXcRb0DCPr7HAPhQ6XSlvTmDFkP/202mH4XN2ixaDVYF2w+4qZuiVLwCVjvXThLp/3RW6hpKSEAw44gPj4eO69917GjRuHw+Hg008/5ZJLLmHjxo1DPkcwueCCC7j99tvp7Ozk+eef55JLLiEhIYFTTz11t23tdjuGIT4Y0Gq1pKWl+XzbYKTaO2VJSQkZGRm9H59//vnExcX1fpybm0tFMD99Ffwq7phjGLH8O9LvuKPPr+fE5iAh0Wpv7XPpVbhKizURbdDilBXKGjvVDsdnPBc7LpsVQCRthYCImTMHy6xZaKKj1Q4l4iXE2DDE/8i2ztVqh+JTxxQcw8HW+TjbR/S+rhmys4mdOxdtTIzK0akn3ZyOzWWj1d5Kh2P36mrJaKT9i2XYNm6k65e1gQ+wR8f331P1r3/R9uVXXm2v0+goiC9gRMIIP0cmCMEj0WwgNdb9YMVTbeupyNxTr099akrwJ2w9kgt7B3gVVzdxmGZN6AzLLe95T82a1v92e+Gr9gjWK64g76MPiTlk9pCOozZPG7e6RvcDi3CptA1bWg31Dy9yJ2h3UrdkCfUPLwI/FUEtWLAASZL44YcfOP744xkxYgRjx47lqquu4vvvv+/drqysjGOOOQaLxUJsbCwnnXQSNTU1vV+/7bbbmDBhAi+88AK5ubnExcVxyimn0NYzKO3ss8/mm2++4aGHHuqtdi0pKQHg999/58gjj8RisZCamsqZZ55Jfb17ldPXX3+NwWDg62++xiW720Dcc889pKSkUFNT0+9x+xIdHU1aWhp5eXncdtttFBYW8t577wHuStyFCxdyxRVXkJyczOGHH77X+ABkWeaee+6hoKAAo9FIdnY2d/Tkh/7c8qCpqYnTTz8dq9VKVFQUhYWFPPPMM31uC/DNN98wdepUjEYj6enpXH/99Th3agty8MEHc9lll3HdddeRmJhIWloat912m7fffp9S7d1So9Gwffv23o8feOABkpKSej+uqalBr9erEZoQRPa05Miz9Mqit1DbWRvgqNQjSdKOvrZh1PvRc1Hf3el+DRBJW0GILFMzxmGrOxRt62Fqh+JTU9KmYHUdhdydJV7XdmIxWPjobx/xw+k/YNabd/u6JElYr76KzMWLMO0zVoUI3TqWL6f5lVdp//prr7aflz+Pd455h2unXOvfwAQhyIzsqbb1JG09K+LCamCwy8mMHxZyV/H/uPCtG+nYKekStDyVttnTB32InVuY9TU8ciA0UVGhk6zvx4ienraVde7fG7sbabG1qBnSkCkOR8j1sZU7O5E7O3cZeKjY7e7P2+29n7MuWEDSxRe5E7ePPALsSNgmXXwRSeedt9txXW1t2IqL6d7sHsqmOAbWE7qxsZFPPvmESy65BLN59+uc+Ph497lkmWOOOYbGxka++eYbPv/8c7Zt28bJJ5+8y/ZFRUW8++67fPDBB3zwwQd888033HXXXQA89NBDzJgxgwsuuICqqiqqqqrIysqiubmZQw45hIkTJ/Ljjz/yySefUFNTw0knnQTsaH1w5llnsqZkDV+u/JKbb76ZJ598ktTU1D0e11tRUVHYd/o+PPfccxgMBlasWMFjjz221/gAbrjhBu666y5uvvlm1q9fz8svv7xLi9Wdebb5+OOP2bBhA48++ijJycl9bltZWclRRx3FlClTWLduHY8++ihPPfUU//73v3fZ7rnnnsNsNrN69Wruuecebr/9dj7//HOv/w18RbX2CGPHjuWLL75g6tSpfX79008/DdmeE0JgvD7vdWL0MRHTGsEjL9nC75WtYdX70XNRL9utpMWaiDGJBzaCf3mWSsUdfTTt33yLsSAf84wZ7q/tYamU4D+TM3Ox18+lRgKb04VRFz6Tprf2DiGz0Pr558jtHZinT0Ofnq5yZOrKiun/wj/+2GMDE0g/zDNnocgK5ml9X6sKguA2Oi2GbzfXsbG6FdixjH5PlbaK3U7tAw/SvX49WY8/hsZkClSog6fV8ZuSz8TSIsyNZdhXvYt5+uCToX7XXgeNRe4/Zw3+NcxTRZpuTidKF+WDwEBRFDq//56oiRND43v/J6mxRmKMOtpskGi00miro7ilmAkpE9QObdDav/2WymuuJfYvR5Hxp8RVsNq0n7v1SuHKFegS3a0qGp5+mroHHyL+xBNI/7//69228dnnAKhftJiGxx5HcTgwz5xJw6OP4SivYNh99/Zuu/XQObiamhj24APoMzNR7HZa3nuPhJ2SiXuzdetWFEVh1KhR/W63bNkyfvvtN4qLi3sTos8//zxjx45lzZo1TJkyBXAnd5999llielZpnXnmmSxbtow77riDuLg4DAZDb6Wrx+LFi5k4cSL/+c9/ej/39NNPk5WVxebNmxkxYgT//ve/+fCTD7ntqtso3VzK/Pnz+etf/wqwx+Pujcvl4pVXXuHXX3/lwgsv7P18YWEh99xzT+/H//73v/uNLz09nYceeojFixczf/58APLz8znwwAP7PG9ZWRkTJ05kck/bu9zc3D3GuGTJErKysli8eDGSJDFq1Ci2b9/OP/7xD2655RY0PQ+Xxo8fz6233tob/+LFi1m2bBlz5871+t/DF1R71HXOOedwxx138OGHH+72tffff5+77rqLc845R4XIBLU5GxooOeVUtt900y5Pzv4s1hAbcQlb2DGwJxwrbWWblcJUUY0mBEDPUqntN95EzR130PzW24D/l0oJfbPGGLEYdcgKlDaER+uXqvYqlpUtY0uD+6FUQYqFpudfoOqGG+hYHV5tIMKVedpUUq+7FstBB+11W0VR+r1mEYRw5hlGtrHKXWlbkFDAOfucw4XjL+x7B72elvfeo3P1amwh1Nfxru6/8cvEESSPbcNS+zRs9e0wIZ/ytEawjoaohEEfxnON7qme9oXyC/9O2Tnn0vrhRz47ZiBJktR7v5KgHwaEfouErl9/Q+nqCv97a70exeFA0uuxHHhAv5vqUlMxDB+ONIjeq95eD2zYsIGsrKxdKljHjBlDfHw8GzZs6P1cbm5ub8IWID09ndra/lcbr1u3jq+++gqLxdL7y5NELipyP9AxGAy88eobfPHBF9htdh544AGv/45/tmTJEiwWC1FRUVxwwQVceeWVXHzxxb1fnzRp1/7me4tvw4YN2Gw2Dj30UK/Of/HFF/Pqq68yYcIErrvuOlauXLnHbTds2MCMGTN2+Xk/4IADaG9v36VF6/g/zZ/w5t/dH1SrtL3gggv48ssvmTdvHqNGjWLkyJEAbNq0iU2bNnH88cdzwQUXqBWeoCL7tm10rV2Ls74+/N84BsHTHiFcKm0dsoOKNveLo2y3hk6fMCGkeYYR1D+8CH1WFtGTJ/UmbJMvu7TPYQWC/0iSRE6Kk42Nm/iuNI4RqX0/RQ8lK7ev5LZVt+GMHgEN51KQYsE+eRJIEqYxY9QOT3W/1v3KSxteIs2cxpWTruxzG3t5OR0rVxG173hMe6lWUVtpayknf3Ayo5NG8+wRz6odjiAE1MhUd3uEDdWtKIpCoimRY9i/XQABAABJREFUqyZdtcftJUki+ZIFaIwm9ANYbqum1m4Hte12rolZwKaZb6DfuBReOwPOeg+ypqgd3u62/+L+PXto/Wx9MYTsz6KnTqHrp59wtbb67JiBVpgSw89lzehl91LtPVWVhwrr5ZcRd/RfYA+tCYPRyJ9/AkCK2lEBnnTuuSSeddZuf48RK5ZT/8QTNDz6GFJP4tbV2uY+hnbX1V0Fy75wH9dk6m3nEX/ccQOKrbCwEEmSfDZs7M9tQyVJQpblfvdpb29n3rx53H333bt9LX2n1V6e5GZjYyONjY19tnPwxumnn86NN95IVFQU6enpvdWqHn8+7t7i27Zt24DOf+SRR1JaWspHH33E559/zqGHHsoll1zCfffdN/C/TI/B/Lv7g6r/K1955RWOOeYYXn31VTZt2gS4f8BvueUWTjnlFDVDE1RkKChg2AP377WvTnlbOfeuuReH7ODROY8GKDr19Vba1oVHpW15WzlOxYlGMaI4Y0WlrRAwOydua+74D4rDIRK2KtLELSfa8gGfl1dx3tTQT9p6bnRlezLJFiPx0Qa4/HKVowoeLbYWPir+iIL4gj0mbesXP0LL0qUkXXBBwJO29opKFLsdQ26OVz0YS1pL6HR29jlYTRDCXX6KGZ1Goq3bSVVLNxnxe19Gn3jaaQGIzHe21bn/b1tjo9Cf8CS80gZFX8LLJ8I5H0PKaJUj/JPZN8L4k0AztNv9Q3MOJc4Yx9hk3/UXTzj1NBJOPRWtJXSv+T33K/Zud89Mz3t+qJI0GoyFhWqHMSB9DRKWDIY+K2Mbnn2Whkcf673O33ll3Z+v+/s87gBnLSUmJnL44YfzyCOPcNlll+2WsGxubiY+Pp7Ro0dTXl5OeXl5b7Xt+vXraW5uZswAHvAbDAZcLtcun9tvv/146623yM3NRbeHZHxRURFXXnklTzzxBK+99hrz58/niy++6E249nXcPYmLi6OgoMDrmPcWX2FhIVFRUSxbtozzzz/fq2NarVbmz5/P/PnzmTlzJtdee22fSdvRo0fz1ltvoShKb5HgihUriImJITMz0+u/Q6Covv7zlFNO4d1332X9+vWsX7+epUuXcsoppyDLMh988IHa4Qkq0CUkEHvkkcTNm9fvdgaNga/Kv2LV9lU4XANrDh7Khie7X/SbOx00dtj3snXwa+5uJsmUBE4rIFEgKm2FALIuWND7xF3S60XCVkW5PVU8VZ1lKkfiG56lkrIthYKUwVUthDNP1VZZa1nv1OI/Mx94IFGTJ2HIHdrwm8FoeuF5th11FLV337P3jdlxw+7LJcSCECqMOm1vUYGnr22LrYU11WvY2Bg67Q/6s62sjjM3fMKBrjoUrR5OegEyp4DLCZ2Naoe3O40GrCMhKX9Ih9nXui/njTuP6em+69+rtZhDOmELUNgzjKyxOQ4I/fYI4ayvlXTWBQtIvuxS93CyJUv63E+RZVzt7Tjr6wfV/uiRRx7B5XIxdepU3nrrLbZs2cKGDRt4+OGHmdEzR2POnDmMGzeO008/nZ9//pkffviBs846i4MOOqi3N6s3cnNzWb16NSUlJdTX1yPLMpdccgmNjY2ceuqprFmzhqKiIj799FPOOeccXC4XLpeLU08/lZmHzOSvp/yVZ555hl9//ZX//ve//R7XV/YWn8lk4h//+AfXXXcdzz//PEVFRXz//fc89dRTfR7vlltuYenSpWzdupU//viDDz74gNGj+36YtmDBAsrLy7n00kvZuHEjS5cu5dZbb+Wqq67arUI4GARdRFu3buWf//wnmZmZHDfAMnQhsqREp3DjtBtZMmcJRFAXhSiDlmE9FQzhUG27X+p+fHTcF7QU/R3YcREkCIFQt2RJb8JWcTiovO46tUOKWGOt7qfzLc7tKkfiG71JW3syhSkxyB0doufpTtLN6Rg0Buyyne0dfX/P4+YdTe6LLxJ//PEBjg5kux3JZMI42rsKX8/325dLiAUhlHj62m7o6Wv7ysZXOPfTc3lx/Yt9bq8oCo7t290DGru7AxbnYLUvX85pm77g5E8ed3/CaIHTXodzPoLc/ntjCntmr6jE1dKidhgDVpjiTjpX1aTy7OEv8MJRL6gc0eC1fPghNffeS9evv6odin+45D5X0nkSt7j2nIi0l5TgqK7e6wrgvuTl5fHzzz8ze/Zsrr76avbZZx/mzp3LsmXLePRR9yphSZJYunQpCQkJzJo1izlz5pCXl8drr702oHNdc801aLVaxowZg9VqpaysjIyMDFasWIHL5eKwww5j3LhxXHHFFcTHx6PRaLjjjjsoKy3jxntvxOaykZ6ezv/+9z9uuukm1q1bt8fj+sre4gO4+eabufrqq7nlllsYPXo0J5988h57yhoMBm644QbGjx/PrFmz0Gq1vPrqq31uO2zYMD766CN++OEH9t13Xy666CLOO+88brrpJp/9/XxJUoLgDqKrq4s33niDJ598khUrVjBz5kxOOeUUjjvuOFJTUwd8vEceeYR7772X6upq9t13XxYtWsTUqXufmvnqq69y6qmncswxx/Duu+96fb7W1lbi4uJoaWkhNjZ2wPEKu2r78iv06WkYCwoGvBQhUpz51Gq+21LP3ceP4+Qp2WqHM2S/V7Zw9KLlJJkN/HRzYKcxCpFr5yfvWouFmv/cCUDSxReRIpaxB9xP5ZWc/eURAKw6dRUWQ+hW4ThcDqa8NAWX4qJ9yw3cdtT+zHlzEe3ffkvazTcRd8wxaocYFP723t/Y0rSFRw59hFmZs9QOZzeK04kiy2i8GEIy/+P5/Fz7M3fPvJuj8o4KQHSCEFyWfL2Vez7ZxLx9M1h06kQ+L/2c+3+8n0OzD+WaKdfstr2iKGyZOQtXfT25r75C1IQJgQ96AG6742Uyli1l+KR9mHvvzX1vVLcJYtLAFBfY4P7sp2dh2zcw4TQoHPx1dWN3I7/U/EJefJ5fHkjV/ve/NDz5FNYrriD573sYWhekFEVh3G2f0W5z8vmVs0K66KT8koW0L1tGynXXkXRucA2C7+7upri4mOHDh2MymQJ+fntpKUgSutRUNEZjwM/vb+Vt5bTaWkk1p5Iclax2OGGpv59hb/OIqlbarlmzhr///e+kpaXx4IMPcswxxyBJEkuWLOGiiy4aVML2tdde46qrruLWW2/l559/Zt999+Xwww/f65S3kpISrrnmGmbOnDnYv47gA7LdTsXChRQf9zecTU1qhxO0PEvQPP21Qt2WWndVRn5K6CZphNDy56VS8SecgHHkSMwHH0TDo4/tcamU4D9j09JQnO7XgLXVW1SOZmjK28pxKS4k2d2ruyDFgq2oCLm9HW3C4Kd4hxtPK4G9DXBRnE4cNTX+D+hPJJ3Oq4Qt7NQeIS7XjxEJQvAanea+4dzU0x5hbs5cPj7+4z4TtuCuMIvad1+Mo0eHRKXtt6YM/jP1LKIuvLjvDcrXwFOHwSungUPlv8+mT+CPt6FuaK0p1tau5Yqvr+Af3/7DR4HtypCfD4qCvaTEL8f3J0mSKOi5b9lcE9orH+PmzSP+xBMw7z9D7VCCjiEnB0N2dlgmbAFsLhsARm14/v3ChWqDyMaPH09rayunnXYaK1euZOxYd3Pz66+/fkjHvf/++7ngggs45xz3U6LHHnuMDz/8kKeffnqPx3a5XJx++un861//4rvvvqO5uXlIMQiDJ7e0ELXvvjjr6tBZrXvdvrqjmjXVa4jSRTEnZ04AIgwO+VZ3f8RwaI9w4vsn0tJuRNL9hcKU0K8aFkLEn5ZKaaKjGf7O20gajTth289SKcE/THotejkVJ+38WLmZA7Mnqh3SoHkSeE5bMiBRmGLB+sbr2IpL0A8bpm5wQaQ3adtPL8CO71dTsXAhhuxshr/9VmACG6AWWwtNNveDZtHTVohUnvYIRXUd2JwujDrtXvaAzMWLeofABDOHS6asoRNwD13rk84Aigyly+HNc+Gk50Grwq22okD5avefs4eWhNNKWsYmjWVUon8GQcYeeSRR4/fFmBeabWVGpFpYW97M12XL+a27iMmpk0PyfjT2iMOJPeJwtcMQAkxRFOwu93wcg9a7B9SCOlRL2m7atImTTz6Z2bNnD2gyXn/sdjs//fQTN9xwQ+/nNBoNc+bMYdWqVXvc7/bbbyclJYXzzjuP7777ziexCIOjs1rJfeVlr7dfW7eWfy7/J/ta9w3JN8nByguTStum7qbeARWK6/jeJ9aC4G/WSxfu9jnPhHjrggUosowiy15NjRd8J16fQT1FbGjYqnYoQ1Lc6k7aynYrMSYd1hgjkiRhGjlC5ciCi2e5bX9Ttw3DhyO3t+OorETu7kYTgOWRtQ88SPfvv5N49nwsXqzA8sSfGp1KtH73qdOCEAnSYk3ERelp6XKwtbadsRk7WgTIioxG2v39NBQStgAlq37B2N2JwWIhLXYPr0Hp+8Kpr8ALf4NNH8L7l8FfF7sHggVS/RboagSdCdLGD+lQB2UdxEFZB/kosN1pjMaQTdgCFKa4H1Ssb1xHWcNSup3dEXU/GknC8Z7AITtQFAVJkjBoRNI2mKn2k7dt2zZGjhzJxRdfTGZmJtdccw2//PLLkN686+vrcblcu7VVSE1Npbq6us99li9fzlNPPcUTTzzh9XlsNhutra27/BLUMTx2xw1fELRnDhhPe4TSxk7sztCtCDTrzTx3xHPEtJ4FiqH34kcQ1NS9cSOlp51Oyzvvqh1KxBlmzgGgrK1U5UiGxrPcX7ZbKUixhExiItA8Sdv+Km31qSkMX7qUwpUrApKwBehYuZKOFSuQ29q82t4Tv2iNIEQySZIYmea+jtvYM4zs3jX3MvPVmbyz5Z297h+s1/GKotB103W8+vGtzLVX9v96nnsgnPgsSFpY+xJ8frO78jWQyr93/z5skrv6N0TIXV0h1xqvINV9P9beksvZY8/mkOxDVI5o4Lp++12V9kOhQnG56N6yhe4NG1Dk0L3n7ounNYJBYxDXqUFOtaTtsGHDuPHGG9m6dSsvvPAC1dXVHHDAATidTp599lk2b97s9xja2to488wzeeKJJ0hO9r7x8p133klcXFzvr6ysLD9GKfQnJzYHCYlWe2vv0sRIkBprxGzQ4pIVyho71Q5n0AxaA2OT9qWmyt0eRVTaCsGgY8VKutaupf7RRwc1LVYYvMLEfAAabBUqRzI0vZW2NiuFKRaa33qb2oceonv9epUjCy6eVgL1XfW02fecIDWNHIGk3ftSa19J/9dtpN12K1GTJ3u1vafS1vMgWRAi1eiepO2mGvf/Z5fiotnW3G81fdXNN7Pl4Nl0rV0biBAHzNXcjE3SIUsaDGPG7n2HUUfBMYvdf161GJY/4N8A/6yspzVC1rQhH8opB+YaqPmdd9ly8GzqHwmteQIjeoaPba/O4LKJVwblQM292X7ddWw96GDav1uudijBSaMBpwsUBaXbpnY0PuVpjSD62Qa/oKjxPuSQQ3jxxRepqqpi8eLFfPnll4waNYrx4we2pCM5ORmtVkvNn54W1dTUkJaWttv2RUVFlJSUMG/ePHQ6HTqdjueff5733nsPnU5HUVFRn+e54YYbaGlp6f1VXl4+oDiFPSu/6GJKzziTrj/+8Gp7k85Eujkd6H95ZbiRJKm3RUKo97Utqe/EJSvEGHWkxoo3DUF9iWedSeK555Lz0otIOtW6CEWkiWmFAHRRjayEZkWDoig7VdomU5BiofXDD2h49DGRtP0Ti8GCNcrdv35vw8gCyTRmDAmnnII+JcWr7T2xi0pbIdKNSncPI9tQ5V6F6HmQ0V81vbO2Dmd1Nd1/BOfroy4hgVcuvpuzD/snOZlJ3u004TQ47A73n4u+BFcAHwCX9bQEzJ4+pMM0dzcz5aUpHPvusThkhw8C2zOd1Yrc0kLnTz8FbcV1XzLiTJgNWhwuhdKG0GtZJ9tsSFEm0Gox7ePFAwkVySpVuUqShCEnG+PIke5/qzDSW2kr+tn6lS9+doPqbjQuLo4FCxawYMEC1q5dy6OPPjqg/Q0GA5MmTWLZsmUce+yxgPsfadmyZSxcuHv/wlGjRvHbb7/t8rmbbrqJtrY2HnrooT1W0BqNRoxhOkFQTYqi0Pnzz8itrUg6vdf7DY8bzvaO7ZS0lDApdZIfIwwueVYzv1W2hHRf23e2vMMv5Q1IumjyU3LE0gwhKEh6PanXXat2GBFpamYByhotkuSgtKWS4fGht5KlydZEq92dsJDtyRSmxBB79Dz0w4YRte++KkcXfHLjcqnrqqOktYRx1nF9bqMoCvWLFtGx+gcyFy9Cl5AQ4Cj750lIiUpbIdKN8rRHqHZX2noeZPSXtE266O8knX8eJh/NOPGHoroOmkyxve3JvLL/QrCkwuh5gRtI5ugGUyxodJA5ZUiHKmktwSk76XB2oNd4f182GOb9Z5D15JOYZ0wPqXsBSZIoSI1hXXkzv1RU0Cx3kBuXS3KU9yt41aQxGsl7+23kzk400cHZj91gMKDRaNi+fTtWqxWDQYWl/BoNuFzuX2Gkq6sL2Skj6SW6u7vVDifsKIqC3W6nrq4OjUaDwTD45HhQJW09bDYbX375JUuXLuXxxx8f0L5XXXUV8+fPZ/LkyUydOpUHH3yQjo4OzjnnHADOOusshg0bxp133onJZGKfffbZZf/4+HiA3T4vBEb2U09hLynGkJvj9T65cbms2L6i3wvCcJTfO4wsdCttn/79aUpaS9AYzqMwJbif8AqRy7ZtG4asLCS9f29aBEiNjUZyJIGhlh8qNoZk0rahq4EMcwaVzd2gGChIsRA/6jji/3ac2qEFpeGxw1lTvabf1TKSJNH2+RfYtmyh84c1xB5+mN/iafvyS9BoiN5vP7SxsXvd3ik7KWsrA3b06BWESOVZLl7XZqOh3db7f6KirQKHy4Feu/v7aPTEiQGNcSAUpxNFo2Fbrftae8BtvMafuOvHzWUQn+2j6PqgN8GFX4O9EwxDS8IFsu2LpNFgOfAAv5/HHwpTLKwrb+bJzf/H9t9+498H/JtjCo5RO6wBCdaELbiHyg8fPpyqqiq2b9+udjhhpaajBpfiQo6SadA2qB1O2IqOjiY7OxvNEAbZqZa0tdls3HbbbXz++ecYDAauu+46jj32WJ555hluvPFGtFotV1555YCPe/LJJ1NXV8ctt9xCdXU1EyZM4JNPPukdTlZWVjakfzDBfyRJImrcPkSNG1jCfOdhZJEkz2oGQrc9gkN2UNHm7lsp260Upop+tkLwqf/fE9Q9/DApV15J0nnnqh1O2JMkCbMmgzZnF8WNjWqHMyiFCYU8dvDbzL5vGSa9hmHxUWqHFNRmZs7ErDczNX1qv9slnX8eitNJ9H7+TfDUPbwI28aNZC5eRMycvU8Bl5B4+vCnKWkpIdWcutftBSGcmY06cpKiKW3oZFN1GzPyrUTroul0dlLeVk5efJ7aIQ5I81tvU7vkUQ61TuX9EQeRnTTI5JaiwBe3wg9Pwvz3IdPPKwOHmLCFHb3ZA932RVEU5I5OtBZzQM87WIWeRL7D3U4n0u5HA8FgMJCdnY3T6cSlQrWrbLPR9vnn2MvKSb74opCqBt+TTkcnl39wOQAv/+VlLAZxH+4PWq0WnU435J8Z1ZK2t9xyC48//jhz5sxh5cqVnHjiiZxzzjl8//333H///Zx44oloBzl0YuHChX22QwD4+uuv+9332WefHdQ5BfV4s/QqHOUle3radqAoSsi9gVS0VeBUnEiKAcUZK4aQCUFJl5QETifdGzeqHUrEmG65nHd/qcGcO0LtUAZta207oCXfasFVU41sNKJLTFQ7rKB0cNbBHJx18F63izvG/5VLiqJgGjsGxeHA5OWKK61Gy8SUiUxMCd5qQUEIpFFpMZQ2dLKhuo39C5LJjctlfcN6iluL95i07fz5F7p++RnL7NkY84Insdv+3bfINdUYEx1kJ0Zj1A1yIKLshKpfwdEBLx0P534K1pG+DRbAaQedb/pT9vbq7hkYGQidP/5I9f/9G0NuLpkPPRiw8w6Fp7q8vS0BzKFzPyrbbGz7y9GYxo4l/Y47gj5JLkkSer0evQqr3hStlrJ/3Y7icJB60okYMjMDHoOvFbUXUWWvItGUSHJsaLTziGSqJW3feOMNnn/+ef7617/y+++/M378eJxOJ+vWrQu55JPgGx2rVuFqayNq3wnoU70b/gE7Lib6W3oVjoYnm5EkaOly0NhhJ8kSWn2WPReDLlsyoKEwJUbVeAShL3HHHYs+MxPztP6rAAXfKUyJB2rYVh+6/bq31Lr7ORakWKh78CFali4l5dprRbV2kJMkiYw77lA7DEEIaSPTYvn0jxo2eoaRxQ13J237qUBs+N//aP/6aySDMaiStsPuu48PnlnKVxvsjLcOIaml1cPJL8Lzf4XKn+D5Y+G8T33bKsHRDffmg3UUnPEmRA2t93dve4QAtn3RxMRi27QJR2UlrvaOoE8kwo6WGfVNsRjMwTVUsz/df/yBo6LC3c/WHLztEYKBpNcTf9JJaMzmsGmVlhSVxOX7XR5Sg/8imWpJ24qKCiZNci8N2WeffTAajVx55ZUiYRvBGp99jvZvviHttltJOOUUr/dLiU4J6aVXgxVl0JIRF0Vlcxfb6jtCLmnrWXblslnFEmIhaEkajUjYBlh+iLd+OfOjMylp6ERjOJrClBG42tpAkjAMz1U7tKDV0NVAcUsxIxJHEGvYcx9ZZ1MTnT+sQZ+RMeBWSv7ySckntNnbmJE+g8yY0K++EYShGt0zjGxTTc8wsp7iiv6SWeYDD0QyGtFnBdf/IY3JxE8ZY6ktLSV/qCvCjBY4/U14+gio3wQvHAfnfAIWq2+CrVoL9nZoLgVT/JAOtXMLs0AmbU0jR5Bx331YZh4YEglbgGHxUUQbtHR1WTEApW2lOGUnOk1Qjg7qZRo7lpwXnsfZ0CjyL15Iu/kmtUPwqTRzGuePO1/tMAQvqdbc1eVy7TJBTafTYbGI5dGRzFhYgGncOIwjBrYkVpKk3hYJnkRgpPBcQBbVhl5yw3PxLtut5FstaDTigkEIbnJ3N/VPPIHc2al2KGEtO8lIVNaTbDPdQLs9tF7bHC4Hv9X/RrO8GUU2UpBiIWvJI4z8cQ3mA0JzyEogXPD5BZzz6TmsrV3b73YNjz1O5eWX0/zGG36JQ7bZBrzPKxte4fZVt/Nr3a9+iEgQQs+odPeDl03VbbhkpTfp1981euIZp5P50IPEHHxwIEIckKI696oPzwDgIYlOhDPfgbgsaNjqbpXQ3Tr04wKUfe/+PWsaDDEJ52lhFqWLIiXa+9WPvhB39F/QxsUF9JxDodFIFKRYUJxx6CUjTtnJ9vbgH5ilMRqJnjKF2CMOVzsUQRD2QrVHQIqicPbZZ2M0uqsDu7u7ueiiizCbd32q9vbbb6sRnqCClGuuGfS+3iy9Ckd5yWa+3VwXksuIPT2fZLuVgkzxwEYIfuUXXUzn998jt7aRcvVVaocTtvKtcWhN1Ui6dn6p2sLMnNDpFaqRNLx01Euc9PR7u/Tq1phDo2JILXlxeXQ6OrG77P1uFz19Gh0rV6DPyPBLHMV/Ox7Fbifz4YcwjR7t1T7T06cTrY+mMKHQLzEJQqjJTowmSq+ly+GipKFjl0rbUJnBoDidVF55FdHTp1FanQT4KGkLEDcMznwXnj4cqtbBtq9hzF+Hftzy1e7fs6cP+VA797PVSOoN8FacTiRdcFesAhSmxPBrRQuxunQaHCUUtxSTHevD1hdCUFCcTuzl5RiHB6763F++r/qelOgUsmOyg74qXFAxaTt//vxdPj7jjDNUikQIByMSRrApfhNmfWTdGIdypa0nwS7brDsmrwpCEEs86yzsxcWYgmRZdrgy6rTEtp9OdbMGl81Hy0YDRKvRkqDLo6NxPDqNRE5SZL0nDdY9s+7xKjEQM3s2MbNn+yUGuaMDe3ExyDK6FO8ryy6ecLFf4hGEUKXVSIxItbCuooWNVW0cOiYHCYlWeytNtiYSTXseyih3daG4ZNWXxnf98gttn39Oxw8/UHnQP0GjpcBXSVuA5AI44y13ta0vEraKslOlrQ+Stj2FFYEcQrYze1kZtff9F1dTEzkvPK9KDANRmNrzgNaVApRQ0lrCQRykblD9cGzfTutHHxE9eTJREyaoHU5IcLV3sOWAA1BsNkas+QFtTOjOYpEVmYXLFmJz2fjouI/Iis1SOyRhL1RL2j7zzDNqnVoIQkN98n7uPudy7j6RN+AlP9l9URtqlbZN3U0025oBkO3JvdVoghDMYg6ZjXn/GWhMJrVDCXuj46axvaqW8kaX2qEM2Jaeh2i5yWbaXnqRrrXriDvuOCwzD1Q5suClZiVXbwxmMyNWraR74yZ0SUlqhyMIIW1UWizrKlrYVN3KX8ank2HJoLK9kuKW4j0mbatvv52mV18j9frrSTzrzABHvCt9dg4p115DVUMbcqOWZIuRuGgfDyDKmOD+5WFrA4NlcK0N6rdAVyPoTJC+75BDU2MI2c4kk4m2L78EpxN7SQmG3FxV4vCWp/ikuyMJogj6lZ8dq76n9r7/EjV5Erkvvqh2OCFBazGjTUzE1dKCo6ICrZercYJRq62VvLg8qjqqyLD4Z+WS4FuiFloICg3/e4Kml18m4bTTSP77hWqHEzLyep76lzV2YnfKGHTq3/h6w/MEX3HEg2KgICV0n1YKkUUkbAMjz2pm2UbYFmLDyN4vep9PNxcj6eMoTEmj/dvldHz7HdFTp6gdWlhRFAVXczO6hKFNR/8zbVzcgAYPNnY3okFD/BCH/ghCuBmV7r6u21DtHkb2jyn/wKQzMTJh5B730SYmgSxjL1E/4aVPTSHpvPP49pcKeG1d74BMv2mtghf/BvmHwGH/HnjitrynyjZjP9AZ+t/WC72Vtj0zQwJNn5JC2q23EDV+36BP2AKMSHX/vDe2xKMPgaStLi2VmLlzidp3vNqhhJThr7+GNikJSRMa99t7Em+K5/V5r4dMuxpBJG2FIGHftg1nTQ3IQ6uqkhUZCI6qnUBIjTViNmjpsLsoa+wImeSnp1eWy2ZFr5XISYpWNyBBGKCOVauoW7SYzEcW+zxxJEB6gow+fjUr638Cxqodjtde2/Qa6xrWoTWdRkHKdJLOP5/oKVOInup9IjASOVwOzvvsPEpaSvjgbx8Qa4jd47bdmzZTftFFSBoNBcu+CGCUu3vuj+d4+venOXvs2Vw9+WpVYxGEYDIqzf1/eGO1e8jW7Oy9tzVJOOVk4k88EX1qYAdf9aeotmcImb9XhJWthNr17l/mZDjwyoHtH5cJY46BYZN8Eo4n6ahWewSAhBNPVO3cAzUsPooovRZbVxJ6diS9g5XlgAOwiOGoA6azhlbLrr0RCdvQERmZLSHopd74T3JffYXYeYPv67Rw2UKmvzydX2p/8WFkwU2SpN5qW89021DgmSAs25PJTTKj14qXIiF0KLJMzX/upOvnn2l47HG1wwlL1ngZU/o7bJfe7X0YF+wURdnRq7un7Yt56lSSL7gAY16eytEFN71WT3lbOU22JkpbSvvd1pA5DGddHY7aWpx1dT6LoebOO2l8/gVcbW1e7+N5AJlmTvNZHIIQDkaluYsIyhu7aLc5vdpHl5wcFAnbjpUr6fh+NYrTydaedjc+7Wfbl32Od1fYAnxxG/z07MD2zz8ETnoeDrh8yKEoisLjcx/n7pl3q9Ye4c8URVE7hH5pNBIFKRZkuzup19jdSIutReWoBKFvwf7/SdidyJQIQUEbG0vUhAkYMocN+hhO2UmXs6v3JipSeJZsFYXQMmJrlJVUYyGu7oze5v2CECokjYa02251t3NZeIna4YSlqZn5KIoWJAclzZVqh+OVJlsTrfZWUCTRq3sQPMkBz0O9PdGYzeS+9CIjV3/vs6oXZ1MTjc89T81//uMe6OMlT6zDY4MjsSEIwSLBbCA11gjApuo22u3tvFf0Hs/+/qy6gXmh9qGHKDv7bJrffrv32trvlbYA+18KB17l/vMHV8L6pf4/Zx8kSWJM0hiOyjsKk07dllCu1lZq73+A0lNPQ5GD+wFuYYoFZCNmbSJmvZmazhq1Q+qTs6kJubNT7TBCktzZSc1dd1N2wYUoTu8eRgWji5ddzLHvHssPVT+oHYrgJZG0FcLGNZOv4f1j3+evBT6YwhpCPJW220Ko0vbMMWcyWX8bzpYp/q9eEAQ/iJ40ibRbbg7p6bHBLCUmGsmRDMD35RtVjsY7ngeGsiMOCQNZzVV0/vILrvbQeW1Wk2cZrjcPXqP23RdNtG/b6iRfupD4E09EG7vn1gw7c8pOytvKAfWG9QhCMNu5RUKHo4Mbl9/Igz8/iMPl2OM+7ctXUHXLrbR+9FGgwtyF4nJhLChAm5hI1MxZlDT0tEfwd09bj0Nvgf3mgyLDW+fDtq/3vk9zOTQUDeiBU6iQtFqaXnmFrrVr6Vi5Su1w+lXQU4Syn/bfrDp1FSMSRqgcUd8an3qKTVOmUvfII2qHEnIkk4mm11+n47vvsJf2vyoomG1p3EJRS5HqD2UE74metoLqbNuKaf9yGaaxYzHPmDHo4xQkFPgwqtCR39seIXQqbWHHhPWCVJH0EkKfs64u7HpdqUmSJMyaNDqo4bfaLcBctUPaqx2tEVLISoim/eUXaXnzLZIXXIz1sstUji749SZtVegFqEtIwHrJwKrmK9srccpOTFoTqeZUP0UmCKFrVHoM32yuY2NVG6dNzebAYQeSYc6gy9WFXqvvc5+utWtpfv11FFs3sUcdFeCI3UnCjDvuQJFlShq7cLgUovRaMuKiAhSABEc/AF1NsOE9+Ph6uHgFaLR73ueH/8HKh2HaxXDkXUMOYVnpMrZ3bGdG+gzV7600ZjMpV1+NNikR8/RpqsayNyN65ooU1wZ3r1B7aRm4XOjT0tUOJeRIGg3WhQvRxsagTUxUO5xBabe3U9tVC6g3aFAYOJG0FVTX+eMaau/7L+aZM4eUtI1UeT1P/7fVdYTEFEiH7EBRlMD1CRMEP5K7uqj+1+20ffYZeR9+gD5dXAT7Soopm2LnOoqag3sKs4cn2ehpjaB1mdGlpGAcsedp6cIOve0RvJy63fjyy3R8t5zUG67HkJ3tz9D61DuoJy43YoafCsJAePrabqpuQ5IkHp3z6F73scw8EMXWTfT06f4Or1+SRtN7nZpnNaPRBPDaWqOF45+ET66Hmdf0n7AFKF/t/j19vE9O/27Ru3xd/jX/nPZP1ZO24B5QFwo87d6K6tpxyQraQP7MDEDmoodxVFWhsYj7r8FIOvcctUMYktJWd4Vwkimp36GvQnARV5mC6vQZw4g96ijM++8/pOM4XA7+9+v/uHH5jf0uvQo3w5PNSBK0dDlo6LCrHc5eLa9YztSXpmJLfBqNtCPpLAihSDIasZeXI3d20v7td2qHE1aG91QAVHeWqRuIl3rbI9itFKRYSL3hBgq//YaYww9TN7AQ4an4KGstwyW79rp920cf0/7VV3Ss+n7I5+5evx7ZPrD3T8/3W83p6oIQzDztETZUt3o9+CZq331JufpqVSbby93dOGpqez/u7WerRnGBzuiuuI3badaHq48emo5u2N4zgDnLN5Wo09OnMzdnLmOSxvjkeJEiMyEak16DQ6rl758tZOGyhWqHtEf69HTR3itCeXrxiyrb0CKStoLqLAcewLD7/0vSOWcP6Tg6jY6nfnuK94reo7y93DfBhQCTXsuwePeyrVDoa1vaWopLcYGiIysxGpN+LxUEghDEJI2G9Nv/Rc4rL5Nw8klqhxNWxljdFT6tru0qR+Idz4WwbLPuMoQs2Fc/BIsMcwYGjQG7bGd7x96/5/Enn0zKtddgnjZ1SOd11NZS/Lfj2Tx1GnJ3t9f7eSqrxY2PIPQt32pBp5Fo63ayvcX9f6vT0Ul1R7XKkfWt/dtv2XrQQVRccSUARbUqJm3/7Pe34fFZ0FG/6+er1oLLDmYrJOb55FSnjz6d+w++n32t+/rkeL6gOBw0v/0OZeeeO6DX6UDSaqSenxUNq2u+ZcX2FTjl0B1WJfRNURQclZW0f/ut2qEMSu8qIfHAOaSIpK0QNiRJIic2B/B+eWW42DGMLPj72p419iz+PvxpbHWHuyetCkKIM+bnEz1xotphhJ2pw9xtBVyaZtrtwf3a5nA5qGirAHZU2goDo9VoyY51tznwZhhZ3LyjSTrvPAy5uUM6r3P7drTx8RiyMtGYvB/K4bnOGB4rhpAJQl8MOk1vwnNjVStfln3JtJencc031/S7n+JyYdu6FVtRUSDC7GXbvAUAfWoKsKPSVvXXc0c3fHEr1P4BLx4P3a07vlbWs9Iga5q7H2640mioX7yYjpWraP1QnSF13ihMsaA44jkw4QIeOTT4Bn3V3HkXVbfehm3LFrVDCVlKZydb58yl/MK/42xoUDucAfNcX4kBqqFFJG0FVSkuF3JXl8+O53kB8uaGL5x4ptqGwjAyjaShtikKxZFIQYpYmiOEF1dzM21ffKF2GGFhbFo6itP92vZT1WaVo+lfeVs5LsWF4jKgOGNJXfoyxcefQPPb76gdWkgZaF9bX4iaMIHCVSvJeeGFAe0nKm0FYe9Gpbuv8zZWtzHM4l7qX9xS3G+7hIb//Y9tR8+j/vHHAxKjh3XhJRR8+w2J5567y+yF/BSV23jpTXDG2xCd7K6sffU0dyIXdvSzzfZND+Cm7iaqO6q9bmcRKJJW6x7qedVVWGYfrHY4e1SYGgNoMHbOZP+M/dFpgmd8kKIotLz/Ps2vvYarPfjvF4OVxmzGkJ+HceRIXI2NaoczYJ5rF5G0DS0iaSuoyl5czKb9JrHtr8f45Hiemyc1pk+raUelbfC3RwDYUhMk1QuC4EOOmlqKjjyKiiuuxLZ1q9rhhDyDToNeSQXg5+3BnbTtbY1gt5Iaa0LesJ7uP/5A7giN1+Rg4Vmu5+17uGyz0fH9arrWrRvSeSVJQhsf7/X2LbYWGrvdN2tiiaEg7Jmnr+3G6jayY7ORkGi1t9Jka9rjPqYxY5Cio5FUGPCnT0lBn5pKfbud1m4nkgS5SUEweyG5EM54EzQGKPkO3jrP3ePWk7TNmg7f3ANf3Tmk07yz9R3mvjmXG5ff6IOgfadu0WIctbUkX3gBusTEXb+2ZAl1ixarFNmuPCsIt9QGYVJUlkm77VYSzzkH09ixakcT0vLee4+8pe9iLCxUO5QBkRW5dxCZuHYJLSJpK6jKXloKioJkMPjkeGpU6QSDUKm0bbG1cNXXV7Gh+3VAEe0RhLCiT00hauJEjMNzkW02tcMJCwn6TAA21Ad2mexA7TaE7J83MOyhh7AcfJC6gYWY3tUyXiZtG595hrKzz6bhqaf9GNXuPNcYqdGpROujA3puQQglvZW2Va1E6aJIN6cD/a+IMx9wACPX/EDG3XcFIsQ+ea6nsxKCaPZCxkQYf6L7zxs/gPcuhXkPwwGXw9Yv4Ks7QDO0WD3fl8yYzCEG62NaDfUPL6JuyZJdPl23ZAn1Dy8CbXCkNEakun/etzZW8N7W9/my7EuVI9pB0mqJPewwUv9xHRof3XdHKkkTHD9vA1XVUYXNZUOn0ZFhyVA7HGEAgqdmX4hIlkMOoXDlClwtLT45nqe3nGfpVaQMgPH0DCtv6sLmdGHUBckF5p9sa9nG56WfI0fHAYeSL5K2QphJv+PfaM1mnz2IinSZ5mzqOqC8rVTtUPqVHJVMin4MZV2ZFObGYMjMxJAZZDe9IWCgD16jp05FZ7Wis1oHdT7H9u1UXnU1URMmkPKP67y+ZuitVBGtEQShX6PS3EmsbfUd2JwucuNy2d6xneKWYvZL3a/PfSRdYG9PFUWh9NTTMOTmYr3ySvSpKb2tEYJuRdixS8Bpg9/fhHUvQ+FcMFjcCdvZN8JB1w3p8L1tX4KsCs+6YAEA9Q8vwrG9CsVuQ5eSSuOTT5J82aW9X1dbVmI0Rp0Gl6GIG1e8xqTUSRySfYjaYQkCsOPaKicmJ6hadwh7J75bgqokSUKXmLjbUpfB8gwx8Sy9SjT55rjBLiXGiMWoo93mpKyhs6enUvDprUazWcmIM2ExipcgIbzoEhLUDiGsjEkaxY/1WTjkZLVD6dcxBcewdHkGRU214mHUEOTF5XHD1BvIjcv16sFr1MSJFHz7zaAf0Hb99jtda9ci220DOsa8/HlMS59Gl9N3PfkFIRylxZqIi9LT0uVga207w+OGs3L7yqBqY2bfto2utWvp/uMPUm+6CdhRaetZyRZUTngKnN3uatt3/g4uu08StrDTgMUg7He5c+LWI5gStgBajUS+1cLGJveDxGCasdL68ccYR47EMHx4xBQ1+Yvc2UnlNddi27qVvA/eD5nKZc/Po3jgHHpCs7ZbEPYgShdFhtld7h9Mb5T+JkkSeb0tEoK3h+LOfR9FYkMIZ+6BDx9Q+8CDaocS0uYMn0lnySV01cxRO5S98lRmjaovpvndd7GXlakcUeiJ1kdz2ujT2D9jf69uKiVJGtLNZ/R+E8m49x6Szz9/QPtpJA1p5rSgTGwIQjCRJKm32nZjVduOvtV7uUbvWLWK0rPPofr22/0cIRhycsh+7jlSb7wRrWXXa2nPSragc8pLoDW4E7Zag08Sts3dzTTbmgHIic0Z8vH8wbpgAfRUYkt6fVAlbD0KUy3IdnfStqG7gVZ7q8oRgbOpicorr2LbUX/B1dysdjghT4qKovOnn3CUlWEPoRkWwVpJL+ydSNoKqlEUheo7/kPDs8/6dFhLpA4j81xYbqsP3r62nif4st1KYUpwVgMLgi/YNm5k+7XX0vD440MekhTJPK9rlc1ddNqdKkfTN4fsoLmrnfKmTgCSVy6j6vobaH7jTZUjiyxy18CrXnVWK3Hz5hF71FF+iEgQBIDR6Z5hZK07WqC09t8CRbHb6fz+ezq+X+33+CSdDvO0qSSccnLv54p6HsIFbYHBN/fsSNi67O6Ph8hz35RmTgvaXt11S5aA04mk16M4HLv1uA0GhSkWkI0Yca+8CoYiIldTM9GTJ2MaM0asCPMBSZJI/9e/yH7+OQzDQ+fh7WmjT+POmXdyeO7haociDJBYmyyoxtXYSNMLL4AkkXDKKT47rmfpVaQNI8tL7qkOqA3eSts/D+sRhHBlGj2ahNNORZeSgnH0aLXDCVkJZgMJ0XqaOrvZVNPIxKwUtUPaze/1v3PWx2dhys7BWHcZsdkFaCZPxjR+nNqhhaSKtgp+qf2FpKgk9s/Yf6/bO2pqKP/7RTirqihcuQJJ69+e7k7ZyT++/Qc5sTlcOP5CTDqTX88nCKFupKfStrqN83sqvCraKnC4HOi1+j73idp3X9L+73ai9tknUGH26rQ7qWx2PwQqCMZK22/u2bWHredjGFLFree+KVir8DxDxzwtETwf27cVYzloFnHz5qkdIkBvizrJmQK6JopbihlvHa9qTMa84eS8+AKKoqgaRziJPSL0Ep95cXnkxeWpHYYwCCJpK6hHkki6+CJcTU1oTL676fF26VW4yQvySluH7KCirQJw97QtTA3CC2FB8KG0W25RO4SwYMl4D4fuO17fdBETsy5WO5zdeIZSIRsoTLGQdPZ8ks6er25QIeyr8q+4Z809zM2Z61XSVpeUhKOiArm9HdvWIkwjR3h1HmddHR0//EDU+PEYsrK8jq+yvZLPSj/DpDWxcOJCr/cThEg1aqekbUp0CtG6aDqdnZS3l+8xgaCNjyfhxBP9HlvbV19h27qV2MMPx5Dtnouxrac1QqLZQII5yHpV/jlhCzt+H2Li1lP9HIxJ2z8nbMHdKsFZXU3z62/Q+uGHGLKyiJowQd1A6am0BTo7EtHEBdfKT9HLVhBCk0jaCqrRJSaScvnlPj9uxLZHSPFU2rZ7NcAl0CraKnAqThRZj+KMDc7qBUHwE0VRUGw2nz6gihSJ0bE0O2S2NZeoHUqfji04lj+2DuPJrRspmChe14ZqVOIoJqdOZmTCSK+2l3Q6sh5dgiE3F53V6vV5Or5fzfZrr8W073iGv/aa1/tZ9Baum3IdHY4ONJLoMiYIezMiNQZJgro2Gw0ddnLjclnfsJ7ilmLVq76aX32N9m++AZeL5IsuAoJ8CJns6nvomOdj2TXoQ3uKXYKyV7dL7nPoWNptt9G17lcATCpUZfclOzEag06DvSsJU5z6RUSKLIOi+H0VSqRRHA46f/wR25YtJJx5ZtDdd/9ZZXslX5V9xcjEkUxJm6J2OMIAiaStEHby4/KZlTmL/Ph8tUMJqNwkM5IErd1OGjrsJFuMaoe0i51bIyRbTMFXvSAIfmKvqKT69n+hMZrIXPSw2uGEnINSj+PXr/bBOmaU2qHsUUW9BsWRSGGCKSgfmoWSKWlTeOaIZwa0T/SUgd+ASAYDpvHjid5v0oD2S4pK4swxZw74fIIQqcxGHdmJ0ZQ2dLKp2j2MbH3D+r0ms1wtLXT+9BOK3eG3pcgxh81FcbmwHHJI7+c8Q8iCso3X7Bv2/LUhDiPrbY8QhJPlrZf2vapB0mgY/taboNEgaYLjIZpOqyEv2cyWNnc7J7Xb9XWtXUf5hRdiOfhght13r6qxhBNFlim74EJwOomZOxd9erraIfXrl9pfuHvN3eyXsh/PHfmc2uEIAySStoJqHFVVaJOS0Bh8m7yzRlt55NBHfHrMUGDSa8lMiKK8sYui2vbgS9r2VD7LNtHPVogsckcHHStWImk02EtLMeQE51TmYDUuPRvFWdu7ZDUYbe0ZWjPuu6Vs/sebJJ17LskX/V3lqIT+xB5+GLGHH6Z2GIIQEUalxVDa0MnG6javV8R1rVtHxYJLMAwf7rekbfzxxxN//PG7fK53CFkErQjbuYWZ2tXPAyXpdk1nND7/ApaDD+ptd6GGwtQYNjUkA1DWVoZTdqLTqJN26Vq7Frm9fVDDOoU90xiNWA44AMlgQLHZ1A5nr5JMSczJnsOIBO9aSAnBRSRtBdWUnn02jvIKcl58kej9JqodTljIS7ZQ3tjFtvoOpuUlqR3OLjxPmmV7MgXZkXMhLAimkSNIv/1fRO23n0jYDoJniWpxfQeyrKDRBE8Vq0N2cPmXV1AuKSAdRnx1Gc7WViRjcD00C0Vdzi5kRcas926Jcusnn9L+zTcknnkGpjFj/BbXqu2riDfGkx+fj0ErVowIgjdGpcXy6R81bKxq5dr9jueo4UcxzDKs331MY8diLCzENH4ciiwHrJJyR3uEyLlWrWyrxKk4idJFkRIdfAM/vdX4/AvU/Oc/ND7/PMPffRetRZ0WFyNSLCjr4tGgxyE72N6+nexYdZLIifPPwjxjOoghZD6X9fhjaofgtRkZM5iRMUPtMIRBCo51BELEUZxOXM0tIMvoM/u/aBuspu4majpq/HLsYOW5wPRUCQST3kpbewqFKTHqBiMIARZ//PEYhwdhn7gQkJUYjSn5G5SUZ1lV8Yfa4eyivK2c7yq/RRe/GrPeSN4D9zH83XeIPeootUMLabevup2pL03lrc1veb1Pywfv0/LOO7R/t3yv2you16CnaF//3fWc9MFJbGneMqj9BSESjU7fdRhZTmzOXisPdUlJ5L3/Hhl33OHzhK3idNL21VfI3d27fN4lK2yrd6/qiKSkbVlbGQA5sTkh3as79sgj0Odkk3DaaaolbIGeYcsadC53AlzNOSuSVotp9Gi/PswUBMG/QvdVWQhpkk7HiO9XUfDNNwMaHOKtp357ilmvzWLRL4t8fuxgltdTkea54AwmvT1tbcmiPYIQ0RyVldjLytQOI2TotRqi4zejj/2D1RW/qx3OLna8rlnJT4lBazRiGjUKfWroVioFg3hjPDCwG924o+eRdMH5mPfff6/btix9jy0HHEjtf/87oLhabC00djcCMDxWPIQRBG+NTIsFYHNNGy5Z/Yq/rrVrqbh4AUVHHLnLA5yKpk7sThmjTsOwhCgVIwysWZmz+O7k77h3Vmj3PNVZreS9+y5J556jahyFqe6HFN1d7lWPave1FfxLttvVDqFfsiJT21k76IfVgvpE0lZQjSRJ6FNT/DKwZViMu3q3zd7m82MHs96kbV1wVdo6ZSfHFRyPs3Ucst1KoUjaChGq9fPPKTp6HlU33iQungYgQe9+Td9QX6RyJLva0fbFSkEEVWX5m2d6+UBudGOPOJyUq68matzeJ4h3//47rsZGFIdzQHF5ksgp0SlE66MHtK8gRLLsxGii9FpsTpmShg5e2/gaN3x3AxsbN3q1v6/7cbqam9FlpGOeNnWX+xBPa4ThyWa0QdSKJxDiTfFBOYRsoDRRO5LtisNBwzPPogQ4qZaTGI1Bq8HRmUpOTB5ROnUeALR/9x21DzxI19q1qpw/3MmdnRQdfTSbJk1G7uxUO5w9quqo4tA3DmXWa7OQFVntcIRBED1thbA0O2s2P5z+g2pvkmrxJA3KGjuxOV0YdVqVI3LTaXQcmXkOD1UWEGPSYY0R/R6FyGQaPcbdV0xRkFtb0cbFqR1SSBhmyaG2HcrbS9QOZRc72r4kM6VhC3UPf4v5gAOInjRJ3cBCnCdp668lpSnXXUvcMX9FExs7oP08SWRPfIIgeEerkRiRFsO68mY2VrXxVc1XrNi+gsmpkxmVOGqP+3Vv2EDFJQuRDAbyP/nYZ/HEzJmD5dBDUf6UaCmq7WmNIIoLwkLlddfR9vEn2DZuJOPuuwJ2Xp1WQ57VzMbqQ7l63vXMHqnO6pvWTz+l5c23QHYRNWGCKjGEM010NHJLKzgc2LZsIWrffdUOqU+eVWFJpqSQbn8SyUTSVlBFw1NP46isJO64Y4kaN87nxzdqIzMpaI0xYjHqaLc5KWvo7F2eEwy21LirFwpTLH6prhaEUGDIHMbwN17HUFAg/h8MwIjEPH5phwZ7hdqh7GKX9gjbf6H+o3eQu20iaTtEubG5ANR31dNmbyPG4N17mSLL2LZsQenu7vfmSWMyDermyvP99sQnCIL3RnuSttWtzCuYx+S0yYyz9n8PoEtNxbF9O2g0yB0daMy+61MqSRLSn44XiUPIAK795lqs0Vb+Pv7vxBnD52Fy/HHH0fHdcmKOPCLg5y5IsbCxuo3NNW3MHqVO0tYyaxY4XZgPOFCV80eCzCWPoEtJQZcSvG2xPA/Aw6GSPlKJpK2gitbPPqV73a9ET53ql6RtpJIkiXyrmXUVLRTVtQdN0ra8tZxfqsoBRfSzFSKesbBQ7RBCzsS0EbxWBt1KDbIiB02lwM7tERIPGEd0lIR52lSVowp9FoOF5Khk6rvqKWkp2Wtix6P5zTepvuVWoqdNI+e5Z30el6i0FYTBG5nmvibdUNXG1Yf9xat9dImJ5Lz0IsYRI3yWsO0v+bu1Z5BvJF2rNnc380nJJwAsnLBQ5Wh8yzJrFgXLvlBlVZN76HIVW2rbURQFBSXg1y6xhx1G7GGHBfSckSYU8hji2iX0BcddjxBxkubPJ+mC8zHtM9Zv53hpw0uc/cnZfFzsu+VUoSCvpzqgqC54hpHd++O9vFHzd/QJ3/dcxAiCoDidNDz7rOg15oVpWQUoihY0DoqbKtUOB4Cm7iZa7C0A6Fwp5B77FzLuuAPLQQepHFl48FSzDqRFQvTkyUjR0WhiLHvsGd2+fAX1jz1G128DH2rniUUMIROEgRvVM4xsU03rgPaLnjQJbYzvrh3Lzj2PoqOPpmvdut2+tqPS1ncVvcFOp9Fx8/SbuWjfi8KyV/fOCVtnUxOtn3wSkPOOSHXfj33behfTXp7G7/XBNUhViBxilVDoE5W2gipijzqK2KOO8us5ylrL+KnmJ8Ylj+PI4Uf69VzBxHOhWRREw8icshMUDbItmYLUyKleEIT+1C9ZQv2SRzGOGsXwN99A0om35D1JtkQjOZNBX8Pqio3kJ2apHdJOVbbxDE9KQKcVz8F9aXjccH6s+XFAw8gMw4cz8vtVSAbDHrdp+/QTmt94E7mzy6uhZR5O2UlZWxkglhgKwmCM6qm0LW/sorXLTr29guKWYmYNm4Veqw9IDK7WVrrXr0dxONClpe/ytcYOO02dDiQJ8pIj51rVYrBw0siT1A7D71xtbZSeeSb2rUUo/3UR9xfvqr0Hq7DnfqfT3o2k66K4pZjx1vF+PefObEVFaBMS0CUmBuyckUi22Wh5511sW7aQeuM/kTTBdy3ouY4S1y6hK/h+qgTBRzwvTJ6nS5HCU2m7LYgqbR+evZjuLf/G1ZknJqwLQo+EM85An5NNwmmnQhBe5AUbi8Z9g/1rzRaVI3HbMYTMyj4xCs6mJnUDCjODqbSVJKnfhC1A9NRpxB59NObp0wYUT2V7JU7ZiUlrIs2cNqB9BUGABLOBtFgTAJtr2jjlg1O44qsrKG8r73c/V0sLDc88S82dQx8kpY2NpXDFcrL+9zj61F17UHpaIwyLjyLKEByDfAXf0VgsmKfPQJeaimnkSL+fLyfJjF4r0Vn9F5445A2OyvNvsdKfVd1yK1v2P4DWjyNrxWmgSVotNf/5D00vvYSjrEztcHbT4eigtqsWEJW2oUyU9QgB59i+HcVuR5+Z6dfKMk/fluJW76t0wkF+b9LW3UMpGIYdlTZ24nBpiNJrGRYfpXY4ghAUdImJ5H/0EZJW3Bx6I9WURbtjLUXNwfGa3juEzG5lVskKtsy4iPhTTib9tttUjStceB68DqTSdmeK09nnNUbcvKOJm3f0gI/n+X7nxOYETU9lQQg1I9NiqG7tZlNNO7mxuWxo3EBxazF58Xl73EeRZWrvvhuA5IWXDLlVgjY21j2g6U8idQjZqu2rMOlMjEwYGZbtETwkSSL1nzeQfNHf0SUn+/18eq2G4clmNtek09WZiF4TmGpywN1D12YDwDhqVMDOG4kknY74k05CE2VCMgbfIHTPg+9EU2JYDRmMNOKqUwi4xhdepOiII6m55x6/nsfzNKmirQKHy+HXcwWTnKRoJAlau53Ut9vVDgfYUb2Qn2JGo1E/iSwIwWLnhK0iyypGEvyG9yTxqruCo5Jh5yFkKQ73a5x+2DA1QworngevZa1luGSX1/u52jsoPfscNk+fgdzV5bN4xPRlQRi6UenuhOvGqjavV8TpEhKIP/EErJdfBi7vXwsGqqg2MpO2/1n9H876+CzW1q1VOxS/kzSaXRK2tqIiujdt8tv5PHM8ttYEtmWdJEkMf/MNRqz+HkNubkDPHYnSbrqRlKuvRp+evveNA0wMIQsPImkrBJxityOZTBiH+/fFIzU6lShdFC7FRXl7/0uvwolJryUzwV3Nui0I+tq+svEV7v71YvQJq8QQMkHYg67f/6DkxJNo/+YbtUMJWmOthQC0ubarHImbZxWHbLOScP31jPjxRxJOPlnlqMJHhjkDg8aAXbazvcP777nGHI29tBS5vX23IX+OmhpcbW2Dikfc+AjC0I3uGUa2sbp1x4o4L6rp0//v/0i++GK08fGDPnf7t99ScdnltH35ZZ9f7620TYmcIWQO2UFFWwUAeXF7rnYOR92bNlF6+hmUnX8+9nL/3CcWplpAcvBJxYvcuPzGAT2A9AVtXFxQrLgU1COGkIUHkbQVAi7t5psY+fNPxB9/vF/PI0lS7wvUYJdXhipPlUBREPS13dCwgTr7ViRtBwUpkVW9IAjeav3wQ7r/+IPahx7a49T7SDct092DzqVppt2m/mvb3Kx5OJongj2V4clmtBYz2thYtcMKG1qNluzYbGBgveklSSLjP3eQ9+EHRE+fvsvX6u5/gM1TptL4/PMDjsfmsqGTdOLGRxCGYGTPMLKN1W3kxuQCA+tbPRStH39C22ef0bl6dZ9f39qTtI2k2QuVbZU4FSdRuihSolP2vkMY0aeno0tNRZ+egcbin+95YUoMKFq22N/lvaL32N4eHA+dBd9TXC5sxcGXb/C8vooHzqFN9LQVVCFpNLCXYSG+kBvn7pcVccPIki18vakuKCptdx7WI5K2gtA368JLUOx2ki/6u6iK2IPRqWk4WybjcsRS1tTGmDR1q6GmxB/PA1WZDE82Y9SJvsT+8H8H/B9mvZnMmMwB7WeeMaPPzzvr6gDQZ2cPOJY7Z97J7QfcLh6qCMIQ5Fst6DQSbd1OLFr3/+vilmKvZjDI3d3YioqIGjt2UOdOnH8W+vQ0LAcfvNvXuh0uKprc7VTyI+ha1VPUEom9urWxsWQ/9SSaqCg0Zv9cT4xItQAaZHsykrGK4tZismKz/HIuD0VRKDnxJAw5OaT+8wZ0SUl+PZ8AclcXm/c/AKWrixHfrxrSigBfE6uEwkNkvToLEcfzAhWop/jBwrO0qygIkra9fR9tySJpKwh7oDGbSbv5JnRWq9qhBC2dVkOm82zsdYdR16p+knRLT//Duc2bqbz6Glo+/FDliMLPPsn7MDxuuM8GuGQ//RSFK1dg/lMFrrf0Gj0Grf8fOAtCuDLoNL3Xgu1t8QC02ltpsjX1u5+rrY1Nk6dQcvwJg25xYho1CutllxE1fvxuXyuu70BRIC5KT5I5cv6P91bhxUZmQkeXnLxLwrbzp5982gs9J8mMTiPhtLn76AZi5aejtJTu33+n7fPP0QxxaJ/gHU1UFLrERKToaOwVlWqH00tWZMpa3XMgxCqh0CaStkJAdaxcSdkFF9L43HMBOZ/nIiQSK20BttWru4S4ubuZZlszAFpXCjmJ4TuVVhB8ybatWFT09SHP2vNAqlbdB1JlrWX8UrUZcDG+biutH35I19p1qsYk7Kpj9Q/U3HU3nT//vMvndYmJaEwmlaISBGFUT4uEbXUO0s3uwT17u07XxsSgT0lBm5SEo9L3SZHefrZWc0StdvEkEcWARWj9+GNK559NxRVXoDh8M8DaoNOQm2xGtrsfyAeiiEiXlkb2M0+TdsvNaAKwqlVwy331FUb+uIaofQa3EsAfNJKGz074jOeOeI4MS4ba4QhDIJK2QkB1/f4HHd99R9fvfwTkfJ6LEM/AmEjhqbQtb+zE5gxs0/ud9bZGcMSRl5SITitecgRhb2offJBt8+bR+t57aocSdPKSo5F0zfxSs17VOJ747QmWtV+NIfkrNIfMxXrllcQceqiqMYWjVnsr//v1f9zx/R0D3rfl/fdofPZZ2r5YNuQ4Pin+hFM/OJWnfntqyMcShEg3smcY2Yaq1gGtiBv+ztsULv8O06hRAzqfoig0PPU0Xb/9jiLLfW6ztedBYKStCPP8u4sqPNClpiLpdGjNFvDhQ/MRqRZkmztpG4hKW43JhHnGDOJPOMHv5xJ20Fmt7vaPQSbBlMB+qfuh04iuqKFMfPeEgIqZcyja+Dj0w4YF5Hw5sTkAtNhaaOpuIsGUEJDzqs1qMRJj1NFmc1La0MmIVHWWx+xojSD62QqCtzTRZnC56Fy7lrhjjlE7nKDiNK3HUngX37dmAX9RLQ5ZkUE2INusDNt/CslZc1WLJZxJSCz6ZREAl+93ORaD9+8jMYceiiRpMO+/PwC1//0vzvoGEs44fcA9MTc2buT3ht8Zmxw8FTSCEKpGpbuvSTdVt3FIfi4rt6/0KpmljYsb1Pns27ZRe++9SHo9hatWobXs3r/UM7g3P4KGkMGOCmfR7xKi99uP3Ndew1hY4NPkW0FKDJ9s6am0jbCVn4Ig+IZI2goBZczLw5iXF7DzRemiSDenU9VRRUlrScQkbSVJIi/FwrryZrbVtauXtO2pcBZDyATBe0lnzydq3D57HKYUySamj+CVUi12p+LV4Bp/uXa/W3npg+mAElFDawItxhDDKSNPISU6BZm+K+T2uO/s2cTMnt37ceunn+EoKyNu3tEDjuOkkSexT/I+vUu5BUEYvNE9lbbb6js4y+IurvBnMktxuYg5/HCQpD4TtrCj5U4kJW2bu5t7ewl7ilwinWnkiF0+7lq3jqh99x3SMUekWpDt7p62Dd0NtNpbiTXEDumYe+JqaaH5nXeInjSJqHHj/HIOoW9ydzd1Dy/CtmULWUseQdL7phf/ULy+6XVKW0s5IvcIxlnFz0MoE0lbIezNHzsfIOJutvKTzawrb+6tHlCD5yJcJG0FwXuSwSAStnswI3sk7c/eDmhptzmJMalzUexeSqthrK4L7YbfkUeMQBMtenb7w43Tb/TJcVKvv57u33/HtM8+A943w5Ih+sEJgo+kxhqJj9bT3OlAL6cC3rVHUFwuau64g67f/yD7ySfQxnqX+DKNGEHmQw/u8euyrLCtPvLaI3j+zVOjU4nWi/evP6t96CEaHn2M1JtvIvH00wd9nMKUGJBN4IwFXSslLSWMt+4+DM8XOn/6mdq77sYwfDj5H3/kl3MIfZMMBppfew25owNbcTGmESP2vpOffVbyGaurV1OYUCiStiFOJG2FgHG1d9D1y88YhudhyAxMewSA00cP/o02lPUO7KlTb2BPb3sEu5XC1Mi5EBYEX5E7O2l67XUSzzwDSSfeshOijSRboqlvt1Fc38H4zHhV4vD0Pzyy/ndKTrmZmMMP7zcpIKjHUV2Ns66emENmE3PI7L3vIAiCX0mSxMjUGFYXN9LZkYiEhKzIyIqMRtrzsnRJq6X9m29xVFbSvX495unTfRJPZXMX3Q4Zg1ZDZkKUT44ZCjzX6KI1Qt8kjRYAuX1oxS/Dk81oNRJOWzI6XSslrf5L2mrMZiwHH4whN9cvxxf2TNJoSF64EI3FjM5qVTscAI4tPNadsE0WCdtQJ+4AhYCxbdxA+QUXoh82jIJlX6gdTtjzLPFSq9LWITsobyt3f2C3Mjy57yVpgiD0TVEUSs44A9v6DUgaicT589UOKSjkWc3Ut9soqmtXJWn7ftH7LN78KIakUSQ7ktFakzGNGhnwOCKFQ3ZQ0VZBp6NzwD1l2778iooFCzCOHEne0ncHdf66zjreK3qPwoRCZmXOGtQxBEHY1ej0WFYXN7K93sAPp/+ASWfyar/kSxci6fUYR3r3mmsvK0MbF9dvP1xPcUNucnREDcz1tDATQ8j6lrzwEsz7zyB60qQhHceg05CbFE2F3QrmbX5tBWKeNhXztKl+O77Qv6RzzlY7hF0cnXc0R+cNvCWUEHwi551JUJ1it2MsLMA4wKmvQ+WQHaxvWM9XZV8F9Lxqy+tJ2m6ra0fx4RRUb1W0VeBSXCiynqy4dIw6bcBjEIRQJkkSCaecgi4jHX2O6DfnERX/G9G5i3m96DFVzr+laQstrnIkXQfySWcw4rvvSLrwQlViiQTfb/+ev777V25acdOA9qtbtJjOn38GrRbbpk10rVuH4nC4v7ZkCXWLFnt1nA2NG3jw5wd56OeHBhy7IAh9G5XWM4yspt3rhC1A/LHHEveXv6BL8G5GRc2dd7H5gANpWbp0j9tE6hAyp+wkShdFblyu2qEEJUmSdknYKk4ntm17H5jXl8KUGGSbu/rSm6F7giAIOxNJWyFgzPvvT97775O5eFFAz9vQ1cDJH5zMVV9fhUN2BPTcaspJikYjQVu3k7p2W8DPv6OfbTIFVv803BeEcBd/wgnkf/ghMQcfrHYoQSMpRkIbVUFF5xZVzr/zgEVP2xdJKx5K+YsnoVDWWoZLdnm/o1ZD45NPknTBBejS0ig5+RS61q6lbskS6h9eBF5W1IklxILge6PS3deFG6ra/HYORZZx1tWB04lx1Og9budpdxNJ/WwBrptyHatPW81JI05SO5SgJ9tsVF55JSWnnEL3ps0D3t89jMydtPWmf/NguNo7kLu7/XJswTuKouDYvp32b79VOxTK28pZW7uWVnur2qEIPiCStkLABXrad2p0KsMswxhvHU+LrSWg51aTSa8lM8E9WGCbCi0SRiWOYt+o87A3zBT9bAVhkCSNBk1U5PTY88Y+1gIA2lzbVTl/cXMJALItmYIIq8xSQ4Y5A4PGgF22U9VR5fV+1gULSL7sUhoeewxJr0MbF0f7ipXUP7yI5MsuxbpggVfH8SRtxRJiQfCdEakWJAnq2228v+ULzv7kbO5bc59X+9q2FdPy/vu42vuf2SBpNAx/8w3yP/8M44jCPW7naY8QaZW24L4n02vVn3If9GQZZ0MjSnc3jsrKAe9ekBqDbEvF7NqHgzIP8kOA0Pzaa2yaMpXa+7z7fyT4ntLdzdY5cym/8O/uB0Yqeq/oPc78+Ezu//F+VeMQfEMkbYWwJ0kSnxz/Cc8d+RzJUclqhxNQ+SoOI0u3pONomoazdT8KI6x6QRD8of277yg79zzkri61Q1HVtEx3ix2Xppk2W2AfSDlkBxXt7l7dcyubaT77DBqefDKgMUQarUZLdmw2MPBlpZ7EraO8Armzk4bHHhtQwnbnc4pKW0HwnWiDjpzEnsKC+kZ+qvmJdXXrvNq3/MIL2X7tdXT//rtX2xuysvotGNkWwUlbwTuaqCiyHl1C9rPPDGqg5YhUC4ozns6ys7l8v8v9ECHYtmwBhwOtl61DBN/TREVhzM/HOGIEzsZGVWMR1y7hRSRthYBQ7HaK/nI05RcvQO5QZzBWJNrR11adf3NPsjjSlpwJgq/JNhtVN99Cx8qVND73vNrhqGpUahqK0/1A6sfKTQE9945e3QYmdbTQ/dtv2AdRdSMMjOemYzDLSq0LFiDp9SgOB5JeP6CE7c7nFH0fBcG3RqW5WyQo3cO5c+ad3DTdu77V0ZMmETVhAsjyHrdRFMWreQ7NnXbq2+2Ae8hlpFheuZzjlh7H/T+JKjxvaePiiN5vv96PXa2tuFq9W3o+PNmMRoLWbie1bf5pWZd+53/I//wz4o45xi/HF7wzfOm75L23FJOXwxL9xdOmUKwSCg8iaSsEhL28HHtREZ2rVyNFR6sWx4D64YWB/J2GkQXaK+vfpsm1GXCJ6gVBGCKN0UjaTTeSePbZJJxxhtrhqEqrkTAqaQD8VDXw3nJD4alckG1Wag/9K8MefID4444LaAyRyHPTMZgBLnVLlvQmbBWHg7olS7zet8XWQmN34y4xCILgGyN7hpFV1Bs4Ou9oRiZ6l+TIuPsucl99BfP+++9xm66ffmLrIYdS++CD/R7LU1yQEWfCbNR5F3gY2Nq0la3NW6lq977ljLCDs66O0rPmu4uRvFj9ZNRpyU1yPxT4paKSuk7fL52XJAlDVha65MhaVRpsJI366TVZkSltLQXEA+dwETnvToKq9OnpZD/zNK7m5oD3tAVYU72Gm1fcTGp0Ks8d+VzAz6+WvN72CIGttG2xtfCfNbcSnQsx1fdE1IWwIPhLzJw5xMyZo3YYQSHBMIwapYgN9VsDel5P1aVsT2bYuFxiDxDLzgJhsJW2nqFjnpYIvUPIwKuKW8/5UqJTMOsjpwpPEAJhdLo7abup2vfDyNq++gpnVRWOyv57nxfVuq+P8yNsRdjR+UeTH59PjCFG7VBCkrOxEUdlJZLRiKO6GuPwvV8LFKZaqFDe49o113NCywncOuPWAEQqRKLqjmq6Xd3oNDqGWYapHY7gAyKTIgSEJjoa84wZqp0/1hBLZXsl7Y7AV5yqyZO0rWjqpNvhwqQPzITzdkc7udETKapvoNCaFJBzCkKkcTY1oYvQ3mWZlhxq2qCivTSg5/UsN5PtVgpSxM1uoHiqXD3//t74c8IWdiRqvU3ces43PFYk5wXB1zztETbXtPFr7e/81rCOMUljmJgy0av9FUUBRemzss166aVET5qMztp/1WGkDiFLjkpmZuZMtcMIWaaRI8n63+PokpIwZGd7tU9hSgzLyhMBfD4Yu/6xx3HW1RF/wvGYRo/26bGFgZG7uqi85lpsW7aQ995SNCZTwGPwXLtkx2Sj04h0XzhQv35bEALAM8SkxdZCU3eTytEEjtViJMakQ1agtKEzYOcdZhnGFNP1dJYuEP1sBcHHXG1tVF5zLduOnoerxbcX/qFiZGIeAI32wPaT3dbsXp6fWmck++dvsG3bFtDzRyrP8r66rjra7V4+fHXJfQ4d8wwnw7XnfpgennYMYnmhIPhedmI0UXotNqfMy+vf5q4f7uLLsi+92nf7jTeyZcb+dK5e3efXNSYTMYfMJmrcuH6Ps7W2J2krrlWFAYqeOHGXhK2zoaHfPsqFqRacbfswouth7j/Yt72EW95/n6aXXsJRJdpdqE0ymej6+WccZWXYtgR2NZhHcasYQhZuRNJWCIjWTz6lY9Uq1YaQRemiSDenA4PriReqJEnaaRhZYKuMt9S6l7sVigthQfApjdFI94YNuBob6Vi5Uu1wVLFfurv3YTfVyMrek2++sq3n/WNGZRsdt95E7f1iiEsgxBhiSI5yV8x52yLBeunCPVbSWhcswHrpwr0ew3MuceMjCL6n0UiM6Olrq3WlAt5X08sdnbiam+n6/fchxbCj0jZy2p+02dtYsnYJHxd/7NWwNmHvutevZ9tfj6H+0Uf3uE1hSgwoerbWOHz+725deAkJZ5xB1ETvqtQF/5EkibRbbyX7uecw5uepEkPvA2fRiz9siKSt4HeKolB1662UnXMu9ooK1eIYyvTpUJbf29c2cElbm8vWW70gKm0Fwbckg4GMO/9D7uuvE3vkkWqHo4qpWQUoihY0DrY2BuZ9pam7iTaHu7LZlJhF1KRJRE+YEJBzC0MbRjZYYvqyIPjX6J6kbXene9m4t9foyX+/kNw33yRx/vzdvlZz5500vvQSrtbWfo9hc7ooa3SvQiuIoPYIRc1FPLruUf77439VmTMSjrrWrcPV0EDbF18g22x9bpNnNaORoKXLQV1739sMVuyRR5J2040R2zIr2MQecTjmaVPRqDR8vffaRawSChuiyYXgd4rNRvSkSdiLizHk5KgWR25sLiu3rxxQT7xwkN9baRu4Kufjl55Aa0o9Gtt8ClLmBuy8ghAposaPVzsEVSVER6FxJqHoa1ldvpERSd71lBuK3iFkjni6Zh5J7onX+/2cwg7D44bzY82PAUvaOmUnpW2lvecWBMH3RvUkbWsb4wAobyvH4XKg1+r73W9PfTud9fU0Pv8CKAoxc+agjY3d4zFKGzqRFYgx6rDGGAf5Nwg9ntdQ8brmOwmnnopkNBEzdw4aY98/Sya9lpwkMxWuT7noi1e4eL/5zM0R90iC74n2COFHVNoKfqcxmcha8gj5H3+kSjNuD8/TpkhqjwA7VdrWByZp65AdVLSXo9G1k2BMIj7aEJDzCkKkcjY20r58hdphBJxFkwHAb7VbAnK+7JhsRmkvxFY3R6wgUIGn2rXZ1hyQ83U4OpiaNpXc2FzSzGkBOacgRJpR6e6k6rZqLVG6KFyKi/L28sEfUKsl5eqriD/xBPSpqf1uunM/20iqOPU8gBQrCHwr/m/HoY3ZMaDU1b77CseCFAsaYz1bWtexvmG9T87b/u232MvKRKuLIKI4nXR8/z2Nz78Q8O9Lh6OD2s5aQPwfDyei0laIGL3TpyOsPUJvT9vadhRF8fuFaWVbJS7FhSLrKUwa5tdzCUKks20rpuTUU8HhIO/jj/Z6kxpOxljm8s22PIzJYwJyvqSoJNrq90VubqEwJXL6HwaLE0acwIkjTyRKFxWQ88UZ43h87uMBOZcgRCpPpW1lUzeTR+ewqWkjxS3F5MXtvRdkx+of6FyzhphDD+mtvNUlJJB0/vlenbvIk7SNoNYIIAYsBkLrZ59RdfMtZD32KNE79ZkdkWrh66qe/uw+WPmp2O1UXHoZis1G3scfYRwuKiuDgixTdsGF4HBgOeQQDJmBux/25DkSTYnEGeMCdl7Bv0SlrRAxPEsEPEuvIkVOUjQaCdpsTp/3UOpL7xJiezIjUve8LE0QhKEz5OZgzM1Fn52NvJf+feFmRsZMHM1TqWsMzOuMS1bYVt/B/tt/I33+sVTffntAziu4ReujA5awFQQhMOKjDaTFulfhJejdiQ1vk1lNr75C/eLFtC9fPqhz9w4hi7CHcGLAon8pikLLW28jt7TQ/Oabu3ytMCUG2W4FfFNE5GxsxDR6NLrUVAy5uUM+nuAbksGA5cADiZk7B8VuD+i5G7oaiNJFiSrbMCMqbQW/q7j8CmxFW0n9xz+wzJypWhyp0alE6aLocnZR3l7u1VP8cGDUaclKjKa0oZOi2g5SYvzbosLzBF+2pYglxILgZ5JGQ+biRWgTEpB0kfWW7mn9sq0+MEMWn/v1TVzGSgrbK6G1BcUROQ//IpE3fTUFQRi6UekxVLd2o3O525B4m8yyzDoIjcGAaeRIALo3bcLV1Ez05ElevR8W9cx6iKQhZA7ZQXmbu/3E8FiRtPUHSZIY9sD9NL38MonnnLPL1wpTLb1J29LWUlyyC61GO+hz6dPSyH31FRSXK6JafISCrEeXqHLeWZmz+P6072l3BG4AueB/otJW8Dvbxo3YtxYh6dW9+ZEkaUeLhAgbRpaXHLjkxs6VtiJpKwj+p7NaIy5hC+7XNW1UMWX2r2i3d/r1XA7ZwUO/3kF09lN8P+tQhr/9Folnn+3Xcwq7e3zd45zzyTmsqV7j93Od/9n5HPzawazcvtLv5xKESDYqzb1aorszEfB+9kT8cceScffdWGbNAqDx2ecoO/tsau9/YK/7yrKyU6Vt5FyrVrZV4pSdROmiSDVHTjulQNNER5N0/vlI2h0JWdlmc7ficMajyDocsoPt7dt9cr6dzyMIGklDrEGsdg0nImkr+F3200+R9cQTmMaOVTuUCB5G5r4gLar1/zCybc09lbZ2q0jaCkIAKYpC8zvv0vD0M2qHEhDDEqKJynwRfdpb/FCx0a/n6rB3kGOahKs7g4LUXExjxmDMz/frOYXdbWzcyI81P7Kx0b/fb3BXQTV0N4iecILgZ56+tvVN7v9rg102ro2PR5uQgGXW3lf1Vbd202l3odNIZCdGD+p8ocjzb5sTm4NGEmmAQFAUhbolSyg55VT03Z1kJ1qQ7e6+tsWtg78fVRQFRZZ9FabgJ4FujyCEp8grzRECTj9sGPphwTGQyrMUKGKHkQWg0raoJ2kbLaVjtRj9fj5BENw6v/+eqhtuAL0ey+yDw34ghVYjES2Poq29jfIm/1baxpviKeQyfi2uoHBuzN53EPzixJEnckj2IUxMmbj3jYfog+M+oLS1lIL4Ar+fSxAi2ah092tqSVU0DIcWWwtN3U0kmBK82t/V0gKKQuo/riPlmqu92sdTZZuTFI1eGznJS89KQ9HvMnBcTU00vfwKrvp62j77nMKUbGparWhN1RS3FDMrc9agjusoK6PkpJOJnj6dYQ8+INojBBm5u5uSE0/EVlLKiFUr0Vr8X8gkKzKnf3g6GZYMbt3/VlFtG0bC8l3qkUceITc3F5PJxLRp0/jhhx/2uO0TTzzBzJkzSUhIICEhgTlz5vS7vRDaDss9jAcPfpALx1+odigB5en96LlI9Zfm7mbaHM3uc8YPFxcQghBA0dOnE3PEEaRccTmGzEy1wwmIKdGX0VV+Lo7ODL+fa2tdO9mt1Uz5+k3avxvc4BthaPbP2J95+fPIjPH/z7fFYGFs8liMWvHwURD8KS/Zgl4r0datISXK3dfW2xVx1f/3bzZPm07T628A7mXi3iwV31rrvh6OtBVhnspOMYQscHSJiWQ/+QSpt9xM/PF/26Wv7VCKiDp//gVXSwvO2lpxvxWENCYTrrZ2cDiwbd4ckHNWd1Tze8PvfFn+JdG6yFlBEAnCrtL2tdde46qrruKxxx5j2rRpPPjggxx++OFs2rSJlJSU3bb/+uuvOfXUU9l///0xmUzcfffdHHbYYfzxxx8MC5Lq0FDWsWoV9tIyoidPwligfrVKfnw++fGRt6TVU2lb0dRFt8OFSe+f3ke9/WwdcYxMSfLLOQRB6Jtn+EUkXbwH6oFUh72Doto2DqovIv7Ld2isLcEy80C/nlMQBCESGHQa8q0WNla3kRk1mqzYvd9/1S1aDFoN+nR3krf79993fG3JEnDJWC9duMf9e/vZRtAQMhCVtmoxjRqFadQoAEakWuDHJCRFGdKMlbij/4KxIB/FZvNRlIKvZS56GJ3Vii41MP2j443xPHLoIzR0NaDThF2aL6KFXaXt/fffzwUXXMA555zDmDFjeOyxx4iOjubpp5/uc/uXXnqJBQsWMGHCBEaNGsWTTz6JLMssW7YswJGHp5b33qf6ttto/ewztUOJaMkWA7EmHYoCpQ3+W0bsqYyQbaKfrSCoYeeEraIoyGHeS8vzQGpLXYNfz3PupxdCzk3UWO3EHHssloMO8uv5hL4pisLKypW8tOElOh3+ey97Z8s7/Pv7fwdk4JkgCDv62k61XMazRzzLfqn79b+DVkP9w4twtbaR88rLtH32GSVnnEHdI49Q//Ai2EvLA8+Mh0hL2nqu00WlrXoKE0xc9e1PnLVMpmQIM1YkvZ6oceOInjzZh9EJvhQ1bhz6tLSAFVNE66OZlTmL4wqPC8j5hMAJqxS83W7np59+4oYbbuj9nEajYc6cOaxatcqrY3R2duJwOEhMTPRXmBHFNGYMzoZ6ovbZR+1Qeq2sXMn6xvXMyZ7TO5gs3EmSRJ7VwtryZorq2hmZ5p+ejL2VtmIImSCoyl5SQtW//oUhN5f0W29VOxy/iY/pxFz4bzZp7MjKGr8NViltLUHS2qgtnEDmFWf45RzC3kmSxD+X/5OG7gYmWCcwNtk/A06/qfiGZWXLyI3NZUraFL+cQxCEHUalx8La7WyoavVqe+uCBQDUP7wI27ZtADhr66hftJjkyy7t/fqe9FbaRtC1qsPlYELKBEpaS8iJzVE7nIiVUbKeg8o246yALybU02ZvI8YgeuULgrBnYZW0ra+vx+VykfqnEvTU1FQ2bvRu0vA//vEPMjIymDNnzh63sdls2HZaitDa6t0FRiRKPPMMEs8MrhvcZ/94llVVq0gyJUVM0hYgz2pmbXkz2/y4jHhbzxAy2W6lMFVcgAiCWhw1tXSu+p6uteuwLlyILik825VMGJaJpO0ESWZLQwUjk7N9fo6m7iY6nO73+RGJojpJbcPjhtPQ3cC2lm1+S9qKajRBCCxPpe3G6jYA7C47Bq2h3312Ttyi1+MoK/MqYdva7aC2zX0f52mxEwn0Wj0PH/Kw2mFEvMSDZ/Hg9JPZmvkhlcndlLSUMM46bkDH6Fq3jvbvlmM+YH+iJ/p/MKcwOLLdTsu772LbvIXUG673qt/2ULxX9B56jZ7p6dO9HuQohIawa48wFHfddRevvvoq77zzDiaTaY/b3XnnncTFxfX+ysrKCmCUwlAdMOwA/pL3F9It6WqHElCeJWBFdR1+O8fpBZfTWXYOets+ZMTt+f+QIAj+ZZ42ldQbridv6bthm7AFiI+KQuNMBmB1hXcPZwfKs4JA1xnHOIsYSqU2z8PWoQxw6Y9TdlLWVrbLuQRB8K9Rae4p58WNtRz25mFMf3k6Dtmx1/2sCxYg6fXgcCDp9XtN2AIU9QwhS401EmPSDy1wQRiEyoP/wprYQznEej4p0SkoijKg/du+WEb94sU0v/GmnyIUfEHSaqm58y6aXnwRe2mp38/38M8Pc92311Ha6v9zCYEVVknb5ORktFotNTU1u3y+pqaGtLS0fve97777uOuuu/jss88YP358v9vecMMNtLS09P4qLy8fcuzhSLHbUWRZ7TB2M3/sfO6aeRfT06erHUpAeaoJ/Flp29wWjatjJAWJWRE1DEkQglHi/PkYsn1feRpsYrQZAPxWs9Uvx/cMChm/JYrD/nkmZRdc6JfzCN4ZHuuufvV2uvxAbW/fjlN2YtQaSTdH1sNdQVBLaqyR+Gg9LmcUjd3NOGQHFW0Ve92vbskSlJ6EreJwuIeQ7YWneCHS+tl2ObsGnBwU/KMwNQZH40ziHIeSZNNTetrptH+33Ov9oybsS+y8eaK/fpCTtFriTziBxPPORWP070P/TkcnNZ3uHJhYJRR+wippazAYmDRp0i5DxDxDxWbMmLHH/e655x7+7//+j08++YTJXjTzNhqNxMbG7vJL2F3z0qVs2m8SVbfdpnYoArtW2vrrom1rT/VCQYRdCAtCsLOXl+OorVU7DL9IjXInpre1bPPL8T3JQWuTe6luOFcuhwJ/V9p6jpsTm+O3HsmCIOxKkqSeFgkS83Pv4duTv91r4qFuyRLqH15E8mWXMuq3X0m+7FLqH16018Rt77VqBPWzBbj+2+uZ8coMPi7+WO1QIl5hz8/e5po2Gp56iq5ffqH61ltRvBweG3PooQy79x5iDz/Mn2EKPpB24z9JvfZa9MOG+fU8nmuXRFMiccY4v55LCLywuxq96qqreOKJJ3juuefYsGEDF198MR0dHZxzzjkAnHXWWbsMKrv77ru5+eabefrpp8nNzaW6uprq6mra2/1XjRgp7CUlKN3dSPr+e1KpwSk7KWstwyk71Q4lYLKTotFI0G5zUtdm2/sOA/RHwx98uv15tOZNFKRG1oWwIASzlqVL2Xb0PGruvFPtUPxieE8Sr6bLP6teNjcWAfDeqIlkr1iF9aor/XIewTueStuy1jJcssvnx/ck6XNjc31+bEEQ9szTIqGlOWWv/Rh3Tth6WiJYFyzwKnHbO4QswgoMSltL6XB0EGcQCR21FaZaACebmzay8fj9iDv+b2Q9+QSSIfjumYXQ4FkVJq5dwlNYDSIDOPnkk6mrq+OWW26hurqaCRMm8Mknn/QOJysrK0Oj2ZGrfvTRR7Hb7Zxwwgm7HOfWW2/lNlEhOiQpV15JwkknIemC68dMURQOfv1gWmwtvHfsexGzhMCo05KdGE1JQydFdR2kxPq25+yaqjWUuN5FHzeeAutpPj22IAiDZxwxAsXhwNXcjGyz+X2JVqCNSyngs1poc1X55fhFPQMWEw3DMCfF++UcgvcyLBnoNXpsLhtVHVVkxmT69Pi9SVvRz1YQAurPw8j65ZL7HDrW+7Frz+3ZIjVp+/q81ylvKxdtX4JAQYoFSd+KI+1+rlyhZ83/rUGr8W5Ilb2iEm2MBW2cSL6HCsXlwlFRgSEnx2/nKG4VA1TDWXBl03xk4cKFLFy4sM+vff3117t8XFJS4v+AIpSk0/n1xWmwJEkiw5xBi62F4pbiiHpxy7NaepK27czI9+0S3/z4AlwtU3F2ZFOYGuPTYwuCMHim0aPJfeN1TGPGhGWv6amZo+B3kLVNtNnaiTH67kbcITuo7doOQH585LxXBDOtRktObA5bm7dS0lri86StZ4lhJF0bCEIwGJXurrTdUFfMfWu+Q5Ikrp58dZ/bWi/t+z4P6HcYmcMlU9bQCUB+inkI0YYeg9ZAfny+2mEIQLRBR4YlnWZHHLlJ2bTZ24g3xQPQvXkzNf/3b4Y9cD+65OTd9q29+y7aPv+CtNv/RcJJJwU4cmGg5O5uNu9/AEpnJ4WrVqJL6H8VwWCJStvwFnbtEQTBG/7uiResdgwj6/D5sXOi9qNz+9/QtE8lKyHK58cXBGHwosaODcuELcAoaxqKy/3atrpis0+PXdFWgYyLrBodZ33+Nk2vvubT4wuD47kp8ccwMs8xPW0YBEEIjBGpFiQJmrrbeG79c7y79V2fn6O0oQOnrGA2aEnz8YozQRiIkalxdGy9gb+l39mbsFUUharrb6BzzRpq7rq7z/2cTU0AGAsKAhWqMAQakwldcjKSyYTDj8PrxSqh8CaStoJfOLZvp+bee2l5/wO1Q+mT52bM81QqUuT1DiPzfc/mrXVtPecwo9OKlxZBCEaKw0HDk09iK/Z9skstGo2EUUkD4Oftm3x6bM97REFZNNk/f0fbZ5/59PjC4HiqYH39Ht5qb6WxuxEQNz6CEGjRBh05idHIdnd1YbOtmabuJp+eY2utu2ghP8UStg8y+/Jx8cfcvOJmvir7Su1QhB6eYWRbana0A5EkiWH3/xfLoYeSdtONfe6X++KLFK5YTtS4cQGJUxi6nBdfYORPPxI1frxfji8rMqWtpYBYJRSuRGZF8Ivu9etpfOppGp99Vu1Q+uR5QfNHlU4wy0vuqbSt923SttvZzary30FyRNw0XkEIJTV33kXtff+l+vbbURRF7XB8JlHvnsq7qWdomK94eoRtTErDPv9C4o49xqfHFwbHX6tlPEnglKgUzPrIWjotCMFgVFosKAZidFbA9//HI7Wf7eqq1by79V3WN65XOxShh6eV3JbadmRlRw9mQ24uWY8sRhsfv8d9dUlJSHq9v0MUfESfkoKk9a5n8WDUdNTQ7epGp9ExzDLMb+cR1COStoJf6NLTSTjjDGKOOFztUPoUse0RehKqFU1ddDt8N3V7Y+NGXt1+Beb8/4qkrSAEM60GTYyFuL/unnysW7KEukWLVQhq6DJj3P3Ty9vKfHrcLY3bACiJySb3ioXE/fWvPj2+MDie1TK+fvAqlhcKgrpGpbsTWQbZvXrC1//HdyRtI+uhjGj7EnwKUyxoo7fym+YfXPDZBXvcruLqqyk+4UQUeffheqF83Sb4juf/d3ZMNjpNWI6sinjiuyr4RdTYsUSNHat2GHuUE+u+wfcsvUow+acpeLBJMhuINelo7XZS0tDhrmjwAU/yW7YnU5gihpAJQrDSJiQgt7XjqNq+y9LQuiVLqH94EcmXXapidIM3KjGfH1uh0VHh0+MeNex8Xv8qlQSjlbgoUdUSLHLjckk0JZIVk4XD5UCv9c33JjMmk+MLjycvLs8nxxMEYWA816X2riQw+r4FSlGtO2kbaQUGnut08UAqeBSkWFBkI+ga2drU9yohR20tbZ98Ci4XFZdeCk4X2vh4rJddSvPSpSF93RZJZJuN+kWL6N60maxHFiMZDD49vmdVmBhCFr5E0laISFG6KNLN6VR1VFHSWhIxSVtJkshPsfBLWTNFtb5L2hY3u98sZJs14i6EBSGUeKZq1z+8CIDkiy+m/tFHey/8+5u6HcwOypnK/749F53Wt8vCqpu0GFszODjOhtzVhSZKDFkMBjGGGL45+RufH3dS6iQmpU7y+XEFQfDOqDT3g//Glnj0KTuSEb6g/D979x3eVPX/Afx9kzbde5eWlg4oS6YgIoKsAgrCT0SWDAcqKIri/iogbkVFEXEDCoKoqKiAyFQBQRAQWaUthS6glO6dnN8fIZHYQUfS2+a8X8/Do7n35ubz7k1vb07OPUcIJF6aiFem4RFyS3P/HaubjTpNhpuTA4JdWiIfQHbpBeSX5cNDZ9nxxTEwEKEvvoALny5FweYt5uXawABkf/hRs75uk4mi0+Hi6i9hyM9HaVISnOPirLp/05db/FLGfnF4BLI6IQTKz55r8uMlmi5cpJuMzN94oZpkxcnIjl+6hRjlAYj0d7XafonI+gKmT4f/zAeQ9fY7ONaufbNvsAWAjiEh0Be2xsU8N1wsLLPafk+eL0D7C0m4c+V8nLptrNX2S0RElbX0dYWLoxblxcbJyKx5jX4uvxQFpRXQahRE+MkzPILp1ukg1yC4OvIavSlpHegPQ7mxoba697rXzTcj6tu18DNdo2m1bLBtZhRFgf+M6QieNw8OAQFW37+3szciPSMR4x1j9X1T08BGW7I6/cWLONm3L05c3QOizHofnq1N1snIogNNk5EVWm2fJ3OMjbYBTmFwcrDdQOtEZB0B06cDGg0gBKAo8L/vPrVLahA3JweEejkDsN5Ei4k5idhy9hN4OpxAuac3nFq3tsp+yboun8ClIfQGPY5nH0dJRYlV9kdEdafRKGgT7AFDmbFhIzU/FeWGcqvs++SloREifF2hc5DnIzCHRmi6Wgf9+16/Uq/ywJkPGCcf0+uhODqywbaZ8ZsyBT63jYGDn5/V9z2j8wysG7UOw6OHW33f1DTI8xeLGk15Whqg1ULr5WX1MVusyXTxYs1br5oDU0/bRCv1tK0wVOB8cToAINqb4wASNQfnFy8GDAZzw23We++pXVKDBQamQ+f/MzYmWee2+UPnDyFT2YCdXS6g4It1CHnxBavsl6zjl5RfMOTrIXhsx2NW2V9aQRpGrxuN61dfb7WGYCKqu7YhHhAVnnBQnFEhKnAm/4xV9mu67o2SaGgE4LIJFjk0QpMTE+hubrS9Uq/y84sXQ5SXQ3F0hCgvN17HEZEU2GhLVufSsSPi/tqPiBWfq11KjWQdHsE0Y27S+UKrDGGRVpAGAyogDI5oFxje4P0RkW1dPulY2yP/mIdKOL94cZMf1qYmWrejcArYgj/P/26V/bVwi0B59rWoKGiH2EAPaJrwl5Ay0ml1SCtIQ1JuklX2d6HkAjx1ngj3CIdG4eUxkVraBHkAUKATQQCsd51umoTMdMeZLEw/P9MdhtR0xAa6w1B6qadtDXd+Xn7dFvf3IYvrNmoehBAoz8xEwY4dVr3W1hv0zfranWqHE5GRTSg6HRyDg9Uuo0ami5fUAuOtV44aOWYGb+nnCq1GQUFpBc7llyLI07lB+zNdZBjK/NHaShObEZFtXH7hb7q17vLJyXLWfIWo776F1rP5/S538OuKw/+cBWCdMb3cEYOSsyPg5eIIf3c22DY1nQM7Y+mQpVbrPdYlsAt+G/sbiiuKrbI/IqqfuBDj35/yYj/AJcV8e39DyTgJGfDv8AitPNlo29TEXjY8QmJO1Y22V7puu/wxNV2irAwnBwwE9HrEbN8Gx6Agq+x3w6kNeH738xjSagjm9JpjlX1S08OuBCStINcgrL5pNX4f+7s0DbYA4OSgRbiPcQZ0awyRYPoG31AagNhAj5o3JiJ16Q1VTl7hd+ed0Li7oyIjAxc+/kSl4hpmUGQ/lGb+H3KyrDMr78nzBYjMTcdrWxfi3OuvW2WfZD2eOk90C+oGPxfrjQ+nKAon6iFSWVyw8VqyoMAXgPXmnjCNaRsTKE+jbYWhAqfzTwNgT9umyN3JAQFOYQCAM/mnoTfoK29UzXWbaVJZ6DmcT3OgcXKCU+vWcIqNgT4722r7PZV3CgXlBexta+fY05asLvO5+dB4eMB34gSbzJBoLYqioJ1fO7XLUEV0gDtOXShC4vlCXBvt36B9HbuQCAAwlAUgKkCuW86ImpuAB+6vcrnGyQkRn3+Gi1+sQsD9Mxq5KuswnX9OXyhCud4AR239v5euMFRgV+p+xOSdQnhmEkoON92/ZURE9sTbVYcQL2ecK4rAtUGDcXXw1Q3eZ0FpBTLzjJMMRvvL02ibVpCGCkMFnLXOCHKzTs8+sq7W/i2x3+CACk050gvSEe5pOdRcdddtAHvYNjetvloDRWvdCbvv7ng34iPi4aiVpwOajNjTlqxKlJXh4urVuPD++xAGfvPXVEWZx7VteE/bE9nGRltvxxZw1fF7IKLmyjkuDiHz5hpnJ26Ggj2d4eqkh8ExDf9kZjRoX2fyz+CHrCdx7Jr1OHb3Y/CdMtlKVZI17c7YjQV/LsC2M9savK+JP03E/Zvvx9nCsw3eFxE1TJtgD+iLYnGd90yMiB7R4P2Zrnf93Z3g5do8/8bVh+luuAjPCI7V3US1DvSEoczYgUa2ybFlY+0GW8A4vn+MTwwiPCOsvm9qOnj2JqsSej2CHnsUPhMmwCEwUO1yrujQ+UOYv2s+lv2zTO1SGpVpPK+kS+N7NURaofG2K85KS2RfLny6FGdfernZ3HKl0ShwbfkJ3KLexsakHQ3al+mDbrYuAD433QiPG26wQoVkbX9k/IGl/yzFb2m/NWg/eWV5OHj+ILanboe7Tp5eeERNVdylORKOZ+ZZZX+m4cCiJbsjLMgtCBPaTsCQVkPULoWqERvkbh7XVrbJsYmodtgtjqxK4+IC38nNp0dSWkEavjzxJboEdsHk9s2n7oaKutRo29AxbfPL8lGszwcAtAuIbnBdRNQ0lBw9inOvvAIAcLu+D9x791a5otrx1bVAhiERx7OTGrSfxBzj8w2lAVKNf9jcmMZobOiYl6YPyoEugXBzlKtRh6gpahtiHNf2aEYOTuWego+zD7ycvOq9PxnHswWAON84PNHjCbXLoBpcPhkZe9raN0NpKdJnz0bJiROIWrsWGteGjaF/rugcFv21CLE+sbi93e1WqpKaIva0Jal18OuAuzrehQltJ6hdSqMy9TRIyylGSXkVg97XkofOAx3KF6Mg8RG0DWrY2LhE1HQ4t22LoKeeRMCsWXC79lq1y6m1MHfj7WFn8k81aD+Hz5+EW7FA32MV8DuXaoXKyBZMd3g0tHeSqdE30iuyYQURkVW0uTQZ2QksxPBvh2PL6S0N2l/iOeOdZaY7zYiaiphAd1TkXYXi1IkYEyNPByIZaZycUPTXAZSnnEZpQkKD93fy4kmsPbkWX534ygrVUVPGnrZkVWWnTkHj4QGtry8URVG7nCsK9wzHg10fVLuMRufrpoOXiyNyi8uRnFWItiGe9d5X0rkSiLIAxAR6WLFCIlKb76RJapdQZ3F+0dibB1wsT2vQfhIvJiE6Q+ChLQeRnvQgojest1KFZE2mRtZzxedQWF5Y716yp/JOAeDs6kRNRZS/Oxy1CspKfOHh4YS8soYNk2AeHkGynraHsw6jpWdLeOrqf51PtuXp7Igg50hk5AajqKj+vcmpeQh+5hlovTzhFBPT4H2ZemZziEL7x562ZFXpTzyJhN7XIX/jz2qXQjVQFOWyycjqP65tXkm5eTZe2W45I5KJqKhAxrNzUPBrw8YOtbUuIa0BAKU4C4Oo/2SYmcVnIBQgLTwCLt26Wqs8sjJPnSf8nP0ANKy3rbmnLT/4EDUJOgcNogPcUXpuKF7ssq5BQ5hV6A04dcF4rSvTtWpuaS7G/TgOvb/ojaLyIrXLoRqY3pcJZ/NVroRszTN+MNyuuQYat4YPxcS7hOTBRluyKlFeDigKdJHNZwbDrOIs7M7YjaScho2B2NxEW2Fc25d3vQHnkDXw9z0HLxd5ZuMlks3FFSuQ8+WXSJs1C/qcHLXLqdY14bEQQgNoynAiq37DGlwsuYgSQz7+bqXB4SdeR+gLL1i5SrIm04eVhowFaGrw5QcfoqajbYgnIHQ4cbZh8y+czi5CuV7AxVGLEE9nK1XX9GUVZyHQJRBBrkFwdWzY2JlkW62DPKB1O4HvU1bgTN4ZtcuhZoJ3CcmDjbZkVa2+/gpt/toPp9hYtUuptQ8OfYC7f74b3yZ+q3Ypjerfnrb1vxjembkdjt77EOLbPGaXJ6L68Rk3Du4DBiD0lZeh9fZWu5xqeTg7Q6s3jq+9O/VYvfZhugg2lHujbTDH6m7qTL1j6zsZWYWhAqfzTwPgBx+ipsQ0ru3RzIb1Pky8dEdZVIAbNJqmP3SbtUR7R2PzmM348f9+VLsUuoLYQHfo/Lbh76KVOHD+gNrlkA2JigoU/rEH2cs/gzDU/44w4LIvnHmXkN1joy1ZncbZGYpWq3YZtdbQD3zN1b89bes/PEI75zEoPT8I7fzaWKssImqCFJ0OYYvegceAAWqXckXumlAAwOFz9ZvkISknGRAChhJ/xEp0K21zZWpore/wCOkF6Sg3lMNJ64QQtxArVkZEDRF3qdF2T/5ijPpuFFLyUuq1H/N4tpJOQuakdVK7BLqC2CB36Atbw6GoG4Jcg9Quh2zszN134+yLL6I8tf4T3RaVF+Fs0VkA/MJZBmy0Jek19ANfcxV9WU9bIerXU7Yktz3KsgagQ0iYNUsjoibo8skl9Xl5SH/8CVRkZalYUdWCXcIBAEk5p+r1/MPnTyL8PLD0o5NwmPO4FSsjWzD/Db/UQ7quTM9r6dkSGoWXxURNhWmS3EKcwcmck/UexuzkOWOjrUzj2VLzEhPogbIL/XAx5Va09emidjlkQ4qDA9z69IH7gAHGYSXryXTt4uvsCy8nTmBn73h1SlaTvfwzpD38MAp27FC7lDoxfeBLzU9FuaH+J8/mpqWvG7QaBYVlepzLL63XPswXwpL2XiCSVfoTTyL3u++Q9sjsen/pYytR3sZz+tmS+o0Ld+xCIlqeF/AsqYChCY/fS0atPI3HOyUvpV6Tz5nusjHth4iahkAPJ/i4OsJQGgCg/uNWy9rT9q6f78JdG++Sbs6O5sjLxRFBnsYe0QnnGjaGMzV94e8uQvi7i+AUHV3vfXBoBLmw0ZaspnDXLuT9tB5lDejqr4ZA10C4OLigQlQgNb951d4QOgcNWvoaJyZIrMcFwr6MQ0gv2wfFIRexQXJdCBPJLuixR+EUF4egp5606IHbFFwVaBxTvUCfXq/npxWkYE8bBR/ecQcCH51tzdLIBkLdQ+GocUSpvhQZhRl1fj5nXyZqmhRFQZtgDxjKjI229bkjTghhvsaNDmz4bO3NRYWhAvvO7sMfmX/A2UGeydeas9ZBHgD02HPmBPQGvdrlUBNn+hKL1y5yYKMtWY3vlCkIfPRRuPXooXYpdaJRNOZvqWQdIiExq+7j2i47vAou4cvgEfAn/Nx01i6NiJowXWQkWq39Bs5tmt541j3D4wAABu1F5JbU7QspvUGP/PILKHdQ4HPVdXDtwtsUmzqtRouWHi0B1O9vuOkWQ/ZWIWp64oI9/220rccQKOcLSpFXUgGNAkT6ydNom16QjgpDBZy1zgh2C1a7HKqF6ABXuLeej3dP3oX0wvp96UzNiygrq/dzTdc7vEtIDmy0Jatx69kDfnfeAaeYGLVLqTNzo209x8RrrqJMk5HVo6dt0qXeScGuLZtcTzsisr3Lf+/LTp3Cuddfb/BMuNYQ6x8EoXeDEBrsS0us03O1Gi1aFb2JgpOP4argljaqkKzt+vDrMazVMHjoPOr83Ds63IH7O9+PzgGdrV8YETVI2xCPf4dHqMeEwYnnjJ0Swn1d4ezYfCZJbijTzyrCM4JjdTcTrYO8YCg3jk0qWyci2RjKypA04mYc69Yd+tzceu3D/IUze9pKwUHtAoiaAtO4tvW5IGzOovwvTUZWj562Z4tPAwBivPkNH5HMDMXFSJk0GRXnzkHj6QX/aXerWo+iKAgvegpHUwXKutdtFmYhBDJTszDunz/RupMP0LmFjaoka3q428P1fu71Ydfj+rDrrVgNEVmLsaetPwAgpzQHF0suwsfZp9bPl3U8WzboND+tg9xhKAuE1vksknOT0Sesj9olkY1odDroC/KB8nKUnjgB16uvrtPzDcKAlLwUALxLSBb86o2sojw9HUV//VXvb4vUZrqoka2nbXRg/Xra5pbmosSQBwDoGBRr9bqIqPnQuLgg4OFZcG7fHt63/J/a5QAA2viHA3BAUlbdzm3nC0rhn5mC24/9DIclC21THBER1UrrIA8o0MFQ7g2g7tfp/zbayjM0AnDZBIte7FjRXMQGepi/oEi4yMnj7F3YW28hZstmuHTvXufnlupLMSpmFHqH9kYLD3YukAEbbckqcn/8ESnjxiNz/vNql1Ivsve0Tc8tRnFZ7Qe9N100G8o90S44wBalEVEz4j1yJCJXr4KDn5/apQC4fOiXut1FsPDPxahosQ2/t24Lz0GDbFEa2UiFoQJn8s/U6TknL57E9jPbkVmYaaOqiKghXHRaRPq5mYdIqOtt4ycvdUqICZSrp615gkX2wms2vFwd4a4JAQAcu1C3oZ2o+XG56io4hobWa4hBFwcXPNnzSSwZtASOGkcbVEdNDRttySoUjQYOwcHQRTXPb3RNk5jklOYgpyRH3WIaka+bDt6ujhACSK7DEAkJ2cZvgA1lgYiV7EKYiKqmOPw74lLhzp3I27BRtVq8PHPhHPIlduUtqtPz9p3bjTPRJ/HjLf0R9PhjNqqOrK2ovAg9VvTAsG+GoaCs9r2rf0r+CfdvuR8fHvrQhtURUUPEBf/bA9E0Y3ptJZ03XttyeARqDiIuNbKnFqSoWwgRNSlstCWr8LvzTsRu2wr/++5Tu5R6cXV0Nc+uKtMQCYqiXDaube0/6P59NgEAoK0IQIiXs01qI6LmqfjwPzhzz71Ie/RRFB84oEoNYT7OcPTej3ztPugNtb+LIFI7CiVnh6GdX0cbVkfW5uroCk+dJ5y0TsgozKj187ycvNDGpw3a+LaxYXVE1BBtgj1gKAsEULc74orKKpCWUwxArkbb3NJcZJdkA+DM8s1NO3/jZN4FFReRX5avcjVkS6KsDBfXrEHm8y9AVFTU6bkZBRl8f0iGE5GRVdWni39T0cqzFUoqSpBTmqN2KY0qOsAd+0/nmHsj1MbxbONtO/5OYc36mBOR9Tm3jYP7wAGA3gCndu1UqaFneAxKzw+CoTQQWQUlCPKs3XiG+Rci4JTpgC4Dom1cIVnbVyO+gq+zb51mSp/cfjImt59sw6qIqKHigj1hKDX2tK3L8Aim61pfNx183HS2KK1JMnU+CXQNhKujq7rFUJ20Cw7Et+c9oHHMx6ncU+gYwC+Q7ZaDA8699DIMRUXwuW0MnGJrP0fMszufxe6M3Xipz0u4KeomGxZJTQUbbYkuebv/23B2kK/XqHnsx/O172mbVngaABDhGWWTmoio+VK0WrR45RVAq4Wi1apSg7uTMwL1NyE1vxinsurQaJuYhDU/vQDD4UiIjT/xS6lmxN/FX+0SiMgG2oZ4wFBmHNM2NT8V5YbyWo3jaLqujZGoly1w2SRk7GXb7BgnIwswNtrmsdHWnikaDbxvHQ1otFBcXOr0XFMv2zD3MFuURk0QG22pwYoPHkTm8y/AtXv3Zj0GoIwNtsC/M+rWtqdthaECuRXG20/bB7A3GhFVpugsezXlfP01XHteA11Y481yGx3gjtSLxUjKKkTPqCtPkPZn+mG45+0EADh7erDB1s5VGCqgQIFWo84XC0RUO+E+rnDR+KL0fH88OKA3hBC1el7ipUnIogNr96WdvTD1RuZ4ts1PbKC78QsKtyQcz07EcH7MsmtBTz5Zr+etumkVCsoK4OTgZOWKqKnimLbUYCUnTqDk779ReuKE2qVQPZh62iadL6jVhXBaQRoE9BAGR3QOibRxdUTU3F1ctQoZT/8PZ+68E/qC2vfob6hQv3Jo3Y/g99Sdtdp+zbHv8XevX3H3tOsQvuA1G1dH1paSl4LZ22fjkW2P1Gr7nek70XNlTzy87WEbV0ZEDaHRKGgd5ImyrMEI0lwHnbZ2Qx0kSj4JWSsv9rRtbnzcdHBBCADgyPmTKldDTZm7zr1WdxyQfWCjLTWY+/V90eLthfC9Y6rapTRIYXkhZmyegeFrh6PcUK52OY2mpa8rtBoFhWV6nM0rveL2STnG264MZf5oHeRp6/KIqJlzv+EGOIaGwvOmm6Bxa7weT3rnI3ANX46/8tbWavuTl85t7t5R0EVE2LI0sgENNNh4aiO2p26HQRiuuH1ybjJK9aVQwB7VRE1d2xAPAMDxzNpPvnPS3NNWrkbbiW0nYnb32egR3EPtUqgewt1bAjB+EUn2TxgMKEvhsaaacXgEajDHoEA4Dh6sdhkN5uLggr2Ze1FcUYzU/FRpvqHWOWgQ4euKpKxCJJ4vQLBXzcNEpOXmwlDuCZQHItyXExwQUc0cg4LQ6rtvofXwaNTX7RAYg/WZQIEho1bbZxQZx+qO8o60YVVkK6HuoXDUOKJUX4qMwgy0cK95KA5TbzTeQkzU9MUFewKaIvyRuQt/ZOSgZ0jPGrfXGwSSs4w9bWUb07Z7cHd0D+6udhlUT218o3EqH8gqTYPeoOcQPnbMUFaGhGt7w1BQgNjffoWD/5XH5v/8yOfYlroNo2JG4caoGxuhSmoK2NOW6BKNosH83vPxSfwnCHYLVrucRhVlHtf2yrcuB2uvQeHJpxBecRe0GvZQIqIru7zBVuj1uLhqNURFhU1fs1dYWwCAQXsROSU1n9vKDeWoKMjEjHV6DPvrBIThyj01qWnRarRo6WHsoWSaiKcm5sl6JPmClqg5iwv2gIN7Ao5hARb9teiK26deLEKZ3gAnBw1Cves2yQ+RmjqGRKIw+X50Fm+xwdbOaXQ6OPj7Q3FyQtmZM7V6zsHzB/FHxh84X3TextVRU8JGW2oQUVaG3O+/R/Hfh+3iQ258ZDyuDr4aLg5yXeCZxvtKrMVkZCcvNey2DvKyaU1EZJ8ynv4fMufORcbcuTZ9nRj/IAi98W6A3WeO1bhtWn4aWmbp0fewQKttW6BoeHnUHJkaYE0T8dTEtA1nWCdq+uKCPWEoDYSh1B/BbleeMT3x0rVqK383qToYnMo9hQ2nNtTqiytqmuKCvGAoCUPiWdt+sU1NQ8tly9Bm359w7dKlVtvzLiE58VMJNUjZmTNIf+xxpEyeDHCm7WbL1NM2sRY9bRPOGreJlWyMMCKyDo/Bg6A4O8P9uuts+jqKosAZxrsm/kqveaLMYxdOItsDWHmtFzxvG2vTush2TB9iTB9qqpNXlocLJRcAABGeHL+YqKnzcnVEkHMrFCbNxm0Rj15xe9N4tjGSXatuT92OR7c/incPvKt2KVRPsUHGO5NSLxajqIwNt/bOMSgQikPtRiw1CMO/XzjzLiGpsNGWGkSUl8P16qvh2rUrFDtotM0uycbahLVYdWyV2qU0KlNP26Qr9LTNLc3F5sIZcGn5EVoFyNUbmYisw6N/f8T8sgmeQ4bY/LX8dMYeWScu1tzr6K+MBJz3VvBd99YIv/8+m9dFthHpGQngysMjmD70BLoEwl0nV6MOUXMVF2xszDpai8nIEs8Zr2ejJRvP1lPniasCrkJ7v/Zql0L15Oumg69vOpyCvsWiPz9VuxxqQs4WnkWJvgQOGocrjttP9oWNttQgznFxiPhsOVp+9KHapVjF2cKzeHbns1h8YLHapTSqqEsXtWk5xSgu01e7XXJuMvSaXGh05xDH4RGIqJ4un2xBX1CA/C1bbPI64e7GXpRn8k/VuN3xC4kAAD8nXgQ3Z7UdHoG3FxI1P22CPQEAR9NzUa4vr3Fb051j0ZL1tB0VOworhq3A1A5T1S6FGiDQNx86393YkbZV7VLIxkRZGc4tWIDT06bBUFpa47bJecYvpMM9wuGgqV3vXLIPbLQluozpNsmLpReRU5KjbjGNyNdNB29XRwAwz7ZbFU9NBAqTp6MsczQi/NwaqzwislP6gkKcnjwFqTPuR96GjVbff5xfNADgYnlajdudyU9GVIZAFBttmzVTI+y54nMoLK/+bxknISNqftqGeMDRdzt+zLsT7xx4p8ZtzY22AbxWpeanrU9HlGb1Q0uHQWqXQrbm6IicL9egcMevKD15ssZNzdcuHItfOmy0JbqMq6Mrgt2MYyBeaUw8e/PvZGTVj2ubml0BQ0lLtHTpAp0DTx9E1DAaN1c4t28Prbc3HMOuPLlMXXUNbQMAKFPOwlDDZJkO58/g5aV6PPTqhxD66u82oKbNU+cJX2dfADX/DTf1xDUNp0BETV9csCcgHGFQSmrsTX+hoBQXi8qhKECUvzw9bSsMFVfsgUzNQ9fQWJSdH4LS3A5ql0I2pigK/O67F8Fz58IxKKjGbc3XLrxLSDpsdaEGSRwyFKfGjkN5RobapVhNbcfEszdR/sbeCDWNa5sg6cQORGQbiqIgeM6zaPX1V3DpYP0x+HqExUAIDaApw9HzZ6rcJqckBx4FRchzAQxhLaFotVavgxqPqfdsTX/DOTwCUfMTFeAGTUUAAOBkDeOUJ166jm3h7QIXnTzn838u/IOrV1yNKRumqF0KNVBMoHH8ZtPnLrJvflOmwGfsbRZDh1XFNDwC7xKSDxttqd4qLl5E2alTKD5wAFpvb7XLsRpTo610PW0Dr9zTdv2Zz+Hoswsta/6bQkRUa4pWC8fQUPPjstRUlCYmWmXf7k7O0OqNJ6zdqceq3CbhYhKOhyu4Y7ovAj7ipB/NnflveDU98fQGPVLyUgDwgw9Rc+Ko1SDc3fg7m1Z4BuWGqnuV/js0glwdDJJzk6EXeo51aQdaB7lD0eYjvfQQjmVZ53qImj/eJSQvNtpSvWk9PNDq27UIe3cRNC4uapdjNbXppWOPzD1ts6putK0wVOB46ddwDv4OLXx56iAi6ytNTETKuPE4fcedKE+reRza2vLUGhuE/zlf9Vhhp7IvwlDmB6UiBCEhflZ5TVKPeTKyar54TS9IR7mhHE5aJ4S4hTRiZUTUUB2CwiEMjjAIPdLyq/4bcVLSu8LYoGM//Nyd4BmyBS4tP8KKf75RuxyyMSEEys+eQ8Gvv0IIUeU2ReVFOFt0FgC/cJYRv4qjelMcHOAcFwfnuDi1S7Eq0+2Ssva0TTpfCCEEFEWxWJ+anwooegiDI7q24B8LIrI+ra8vNJ6exiEKHB2tss8gl5bIzk9GVn7Vs/K6GdqhMPFRXBXuVem8R81P96DuuKvjXegc0LnK9UFuQVg5bCXOF5+HRuEXkETNSdsQL/x8LABa53Qk5yZXOcSJrD1tTZ9b2KBjH/ydw3AOxruByM6Vl+PkgAFARQViNv8CxxaVJ8U1/X77OvvCy8mrkQsktbHRlug/oryiAABn8oy3XjlqrNNw0NS19HWFg0ZBUZkemXklCPGy7D399zljLzVDmT9iAz3VKJGI7JyDjw9afvIxNM7O0HpZ56J0VMQ0/PnttdC0CahyfdKZLCzY8Q6U6FiIaT2hWKmxmNTR3r892vtXPz6yTqtDx4COjVgREVlLXIgnDAeNjbbVda74t9HWrRErUx9nlrcvUV6tcK4ISCtMUbsUsjFFp4NT61iIsjJUXMyputGWPemlxkZbqrfcH36EolHg2rMnHPzs55bSQNdAuDi4oLiiGGn5adJMVOKo1aClnyuSzhci6XxhpUbb/RnHAQDOCJZqYgcialz/nT23+PA/cIqNgcbJqV77u9J43Rf/OYYB2Skorchjgy0RURMWF+wBQ5lxnPLEnMrDmJWU65F6sRjAv+d+GVQYKnA6/zQATrBoLzoExmL3KSC3IgN6gx5aDT972bNWa9bUOBFunF8cHun2CHycfRqxKmoqeF8Y1VvWokVIe/gRlJ44oXYpVqVRNIjwjAAg3xAJUf7VN26cyDbenhPgFN6oNRGRvPK3bkXKhAlIe/gRiIqKeu3DdIts6sVilJTrLdaVG8qxwW0B3hgSiJzxExtcLzUNWcVZ+CPjD2QUZFRat+LoCqw6tgrni86rUBkRNUSghxNcYByL+lhW5XHKjUN8Ad6ujvBz0zV2eapJL0hHhaECzlpnBLsFq10OWUG3Fq0gDA4QqEBGYeW/ZWRfamqwBYx3Ak/pMAU3x9zcSBVRU8JGW6oXIQRce/SAS5cu0EVFqV2O1ZluLZJtMjLTrWRJ5wsrrUsrMH6DH8HbMoiokWhc3QAhAIMBQq+/8hOq4O+ug0fLFXCNeQFbk/+yWHc69wzy3Aqxq1M+Iibebo2SqQl4fvfzuOvnu7DlzJZK6z76+yO88McL5gk9iKj5UBTFPGbrmYLKt41fPp6tTGOUmz6vtPRsybG67USbIG8Yyox3sh7PTlS5GiJSE8/qVC+KoiDkuXmI/GJlpVtZ7YG0k5EFVN/TNrfCOEtv+4DoRq2JiOTl1rMHIlZ8jrB33q738AiKosDZuRAah3z8lfGfO0Mq/FCY9CDKMyahpa88t9LauxjvGER4RsBBsRwFTG/QY3jUcPQL68dx4YiaqY5BxuvQIn0eckpyLNbJOp4tJyGzP/7uOjjojZ+x96fb112tVJkoK0PqzAeRGD8E+gLLzlMGYcCmlE04cfEEDMKgUoWkJo5pS1SF1j6t0da3LYJd5brFKKqanra5pbmoUPIBAFe3aNPodRGRvFw6Wk4aVXL8BJzbtK7TPjq6TMS24xfgHGq5r1Pni9E76Sy0UTHQQACQp2eWPbu/y/24v8v9lZZrNVo83P1hFSoiImvpEBKAb855QeOYi1N5p9DZubN5XeKl61dTJwRZmHra8sso+6EoCnx1LXABh3C0iqFAyL4oOh2KDxxAxblzKD1xAq5du5jXnSs6h4e3PQwHxQF7Ju5hb3oJsdGW6kVUVEBxsN+3z8CIgRgYMVDtMhqd6SI3LacYxWV684Rjh88bLxYM5Z7oEBqoWn1EJLesDz/E+QVvIHj+c/C59dZaP69bcBdsOXgcZ7Ith1g4ffwUntr7OfT7tBBPj4Kik2cMRCKi5igu2BOGPQHQOOYiOTcZnQM7m9edPGfsaRsj0SRkwGWNtpyEzK6Ee0TiQglwuoqhQMj+BD39NLQe7pU6JhSUFeAq/6sAAI4aTpgrI/ttdSObynh2Dgq2b0fgww/D+5b/U7scshIfNx18XB1xsagcSVkFaB/qBQD4M+04AMBBHwRPZ/6xICJ1GPKMPf7LU9Pq9DzTrbL/Hfpl76mv4BXog0BXX2jYYGt3hBAAYB7bMr0gHTqtDn7OflKNd0lkT1oHeaDswkCUXeiPzjf1MS83GASSLhvTViYcHsE+tfOPxoFUILusbtc81Dx5xg+ucnmMTwxW3LiikauhpoR9q6leypKTob9wARoXZ7VLsSm9QY9yfbnaZTQq04Xu5UMkHLnU09bHsYUqNRERAUDAw7MQ/tFHCJz1UJ2eF+brCEefXUjSr4Te8G9v2999fsecO/NxdN7jVq6U1Hb/5vvRe1VvHLlwxLzs9T9fxw1f3oCVx1aqWBkRNYSLTouWru2hL4pCarYwL0/LKUZphQE6rQZhPi4qVti48srykF2SDYDDI9gb05B05chBQVnl+UaISA5stKV6Cf/wA0SuWQPXXr3ULsVm5u6cix4reuCHpB/ULqVRRVXRI+10/ikAQJh7hBolEREBMPaYdL+ut/mxMBhQlpp6xee18neHU9A6KN6/4si5MwCAnJIcGBTjea5nGMfqtjcF5QXIL8tHcl6yeZnpFuIIT/4tI2rO4oI9AADHM/PNy05eum5t5e8GB608H3FdHVzx9YivsfCGhXBzlGsCNnt3VYtgGCouvdezE1WuhmxN6PUo2rsX2Z+vgND/28GAk4+RPH/RyKq07u5w6dgBDj4+apdiMzqtDmWGMvMtR7KoqqetvjQY+qKWaOfPhg0iahpEeTnSn3gCp0bfitLEmj/MuOmcodX7AwD2pBqHe9mfYZyN2VDuhbggf9sWS43O1OPsVO4pAMY7Z07nnbZYR0TNU+sgVzh67cV3pz9AucF4R1zipfFsowPlarh00DigtU9r9G/ZX+1SyMoC3J2gqQiE0Lvg2LlMtcuhRnD67mk4+/zzKDt92rxs2DfDcNPam5CUm6RiZaQmjmlLVI2p7adiUrtJCHELUbuURhV1qdH28p62RWcHouhibwyIv0atsoiILIiyMpQln4K+oAClCSfhFB1d4/ae2lDk4NyliRXjsf/UP1jyTgXSfUvgMK4YcJDrg769M43taOpdm16YjjJDGXQanXR/14nsTVyIF5yCv8epinKk5d+LSK9IJF7qbCDbeLZkvxRFQazhIew/UQD3rh3ULodsTNFq4XZdb0BvACoqAABF5UVIKzCOaezr5KtmeaQiNtpSnRXu2YPi/fvhevXVcO3WTe1ybCbEXc4PdabhEZLOF8JgECip0CP1YjEA+WbjJaKmS+PmhvAP3kfp0aNwu/baK24f7BqOnOID5ka8rKN/wbcA0OnLoLi62rpcamSmRlvT3TLmoRG8IqDVaNUqi4isoF2IF8pzu0GjaKGB8fc5UdJJyFYdW4WSihIMiBiAcI9wtcshK2sT5If9KYVIOMsxbWUQvmiRxeOUvBQAgI+TD7ydvVWoiJoCNtpSnRVs347sjz+Bz8SJdt1oK6uWvq5w0CgoLtcjM68E6Xm5gFIBX1dX+Lk7qV0eEZGZg48PHC5rsDUUFUEYDNC6V/7QHu0dhWPFwLkS45i2B91y8MQULa42DMDVitJoNVPjMA2BkJKXAoMwmIdJ4NAIRM1fuI8rtNm3oKhMj7JSbwD/Do8gWweDVcdWITE3EbE+sWy0tUOxl97PCefyr7Al2SPTF8+RXpGq1kHq4pi2VGcuHTvC6+YRcL36arVLsblPDn+Cx7Y/htT8K090Yy8ctRq09DP2Oks6X4hVx1bDvc0zcA9dp3JlRETV0+fk4PTUO5A6434YSksrre8YGAsAKDCkAwDOGTKQFKLA6bobGrVOahyh7qFw1DiiVF+KjMIM84Rkph64RNR8aTQK2lyajOxoRj4uFpbhQmEZAONEZDIZFjUMQyKHINq75iGCqHkK81PgErYUf5Q9Cb1Bf+UnkF0Q5caxuk13CfHaRW7saUt15jlkCDyHDFG7jEaxIXkDjmYfxdBWQxHmEaZ2OY0mOsAdSecLkZRVgFO5Z6AoAv6u3mqXRURUrfKMDJQmJACOjig/fRpOsbEW668JjwMOAkKbg/MFOShTzgMAuodygkV75KBxQEuPlkjMTcSp3FPsaUtkZ+KCPfFXaiZ2p/6DUG/jnX+hXs5wc5Lr4+20q6apXQLZUIfQAGjdTkKvqcCp3FRE+0SoXRLZkCgrQ/JtY1F68iRid2zntQsBYKMtUY0iPSNxNPuo+dYEWZjGtU08VwCfottQkNAN/QfGqVwVEVH1nNu2RdiS9+Dg41OpwRYAonwDAb0roC3CF3//jOF/lCPdxwHd/6+FCtVSY2jl1QqJuYlIzk02/x1nbxUi++DnkwWPNnPx03l3XHVuNQAgWrKhEcj+hXi6Qsm6DUXFrigocgV81K6IbEnR6WDIzwfKy1F6/DjvEiIAHB6B6shQWgp9QaHaZTQa0/gxsjXamiZxSMoqRGJWIUSFJzq1kKenMRE1T249elg02Orz8sz/rygKnBEMAPj90HrcvtWAR9eWwc2R31/bK9Pf8L+z/kZWcZZxGXurENmFHmHGc32FUoAjZzMByDcJWXpBOtIK0mAQBrVLIRtRFAWt3a+HvigaKVnlapdDjaDFGwsQ/csvcO5xtXkiMl67yI2NtlQnhb/9hhPdu+P0XXerXUqjMH2rZRpPRhbRl3raHsvMR8qFIgDyTexARM1baWIikkbcjAsffWRe5qczfvl0rvggtndQcDDGBxpnZ7VKJBszfcjZdmYbACDAJQDuOv4tI7IHnVoEwVDuBQDYl34CgHw9bT849AGGfD0E7x18T+1SyIZMk5GdPMvJyGTgctVV0IW1wPni8yiuKIaD4oAWHrwrTGZstKU6KU9LAwBovbxUrqRxmD7wmcaTkUWUv/Hi4EJZMhxDl8E98FcEe7Jhg4iaj8Lfd6IiMxM5a7+FoaQEANDSIxIAkOVTjneHa/H97fEqVki2ZvritajC+OUjZ18msh9ero5wNAQBAI5nnwTwb6cDWZg6lUR4cpxTe9bCTw8Hrz+xI5OTQsskKTcJABDuGQ5HjaPK1ZCaeE8g1YnvpEnwGjkShuIStUtpFKaLoIulF5FTkgNvZ291C2okPm46+LrpkOeQAUePI3Cp0ENRFLXLIiKqNd9JtwMOWngOHWruTdvGLxq7cozrKwpj0C6snXoFks39t5G2lSfHhCOyJ366MJzDCSiOxuFPZLsrjGN1y8HLIx8uoV8hWe8JYLba5ZCNifJy5H7/Pcp3fg9te8GhEcg+e9q+++67iIyMhLOzM3r27Ik9e/bUuP2aNWsQFxcHZ2dndOzYET/99FMjVdo8aT094RgUqHYZjcLV0RXBbsYxEGUb1zbK3w0anXF2dX/ncJWrISKqO9/x4+Hg8++sHT38olBRFAnlbE8Un74LN8YMULE6sjVPnSeeu/Y5fDz4Y/x8y8+cZZ3IzkRcaszQOJ2Hh7MDAtyd1C2oEeWW5iK7JBsAx7u0d70jjF8wGzR5yC7Ou8LW1Ow5OODsSy8j8Mc9aHGBdwmRHfa0Xb16NR5++GEsWbIEPXv2xFtvvYX4+HgcP34cgYGVGxp37tyJcePG4aWXXsJNN92ElStXYuTIkdi/fz86dOigQoKm5/w7iwCtBgHTp1det3gxoDcg4IH7VajMthYfWAyNokGkZyQyCzORnJuMzoGdAQBLDi6BQRgwvXPln0lzd/vX86HVaBAdMByHM4yNtqaLwSlrX4DeYMBntzyjYoVERHW35rYhiD2ahlbXTsfLO95FttMRtHrgWwDAB/ffBhgMmLZ4jbpFktWY/obf2+neSuvs+W84kQxM16odA67C3nxAozuP6AB3KIpi19eql5/XTJ1JAl0C4eboxvOanbr96/nQKhqgwgNwyMeu00dxY5ueAOz7c5npd3zpqKcrrbPn3B9MvxXQaHDzLbdgU8omlDqeNd8lZM/XqutmzQO0Ggx/vfIxXTd7PqA3YPibc1SorGmwu562b7zxBu6++25MnToV7dq1w5IlS+Dq6opPPvmkyu0XLlyIIUOG4NFHH0Xbtm0xf/58dO3aFYsWLWrkypswrQZZb7+Dc2+8gfSnn8aFjz+BEALnFy9G1tvvAFq7exsBADSKBu8eeBcFZQUA/u1pu+TgErx74F1oFPvMrdVosC9vFfbnfQmNk7HRtp1/NKasfQH78lZBq7HP3ERkv4QQCE3NhlNZBZ7e8z4cDXp4VBTDJzQAH9x/G/r8cgjguc2umP6GLzm4xGK5vf8NJ5KB6Vr1t7TfAQAa3QW08ne2+2vVy89rpvk2Wnm14nnNjmk1GuzLXwXlUl+7/RnGSffs/b1u+h2fsvYFi+X2nhsaDfr8cgjfpf2Fa1/+ELNHvo5rQq6x/2tVrQYxP6w0NtBeZt3s+Yj5YaXdtjfVliKEEGoXYS1lZWVwdXXFV199hZEjR5qXT548GTk5Ofjuu+8qPadly5Z4+OGH8dBDD5mXzZkzB99++y0OHjxYq9fNy8uDl5cXcnNz4enp2dAYTZK5gRaAQ2gIvEePRtbb78B/5gNV9sC1F6aLIACI9opGoFsgdqXvwozOM3Bvp3vx5fEvkVmYiZuibkKUdxQA4OiFo9iUsqlOr6PVaDGj8wzz43WJ65Ccm4wBLQegvX97AMbJ0L5P/L7OGe7tdC90Wh0A4JeUX3DkwhH0Cu2Fq4OvBgCcLTyL1cdXWzxn3fHfkVl2BEIoUBSB1k434kTpj+jmObbKbzyJiJo6fUEhNo0Zhoikc/iml4KDbWIwMNUFfX45hF8HXoVpi1ZfeSfUrFz+NzzcIxwjokfg3QPvmv+GE1HzZWq8EQYNFI0BLoZWKNYkI8ypC9aPXW7ebsXRFbhQfAEjY0aipWdLAMDhrMPYcnpLnV5Pp9VZnDfWJqzFmfwzGBw5GHG+cQCAxJxE/Jj0Y52zzOg8A1qNFgCw4dQGnMg+gT5hfdAlsAsAIL0gHV+d+AoAsP/sfuw7tw+hbqFIL0xHB78OOHzhMM9rdsz0XgcAV0NrCCFQrE2Aiz4WMZ7GO4M1iiM6ud9qfk5i8TbkVaSjpfM18HM0fkbNrUhDUvH2Or/+Ve63QqsYJ8JKKdmF7PJkhDp1QZCuLQCgUJ+FE0U/13m/7dxGwEljHIc6rfQvnCs7ikBdW7RwMr7vfzzzMS44bjDnPJl3uFLuqrR2jYeb1g8AcLbsCNJLD8DXMQoRztcAAPSiHIcK6t5bNcqlL7wcWgAALpQn4nTJH/B0aIFol77mbQ7kfwmBijrt97/HyHntXNzyWxbW9W6LsFaD4b7tc8SmXsC63m2hjHkRuvwctNr+A/Q6J5wc/O8xD/nrd3imJeNcu264GGU8No5FBYja8i2E1gEnho41bxt8cDe8zpxEVpvOuBBr/FlqS0sQs8l4njl+00TztoH//Amf5GO4ENMBWXGdAQBKRTlabzBeN58YchuEg/H94X/sAPxOHsbFVnE41767eR9tfvgcAHBy0C3QO7kAAPwSDsP/+AHkhscgs9M1cF21DP13fYuk8Dgcmf0S2vy8BrE/rsTJm8ZX2QPXHtS2HdGuhkfIysqCXq9HUFCQxfKgoCAcO3asyudkZmZWuX1mZma1r1NaWorS0lLz47w8+x9bJmD6dOhzc3Fx2XJUnD0nRYMtYGzwTCtIw7cnv0VibiIScxMtLoq+O/kdDmUdwlUBV5kbbU/mnMSHf39Yp9fRaXQWjbYbT23E9tTtCPMIMzfaphak1nm/AHBnxzvNjbY7Undg7cm1cHV0NTfaZpVkVbtfRREQAmywJaJmT+vuhiE/bcf8u3ph9G85GL4nAY56sMHWjt3b6V4czz6OX07/gjP5Z9hgS2RHlo56GlPWwtyYVaxJBmDsdXu5tQlrcfzicXQP7m5utD2WfazO19Qejh4W546fkn/C7ozdiPKOMjfanso7Va9r9fs63wctjI22W09vxU/JP8HH2cfcaHu26Gyl/aYXpgMAG2wlsHTU07ju0yPI1RxCkeaEeXmxNgF/FyYAAIRBh9/2djWvcwn/BQ7ux7EnwQEVucY+elr3o3ANX1vn19+5rwsgjJ8lnUO2wdF7P/YmlaI827hM43wGbq3qvt9dB1pDVBjnHXAK/BU6v19RdvoCSs8ZG68Ux6vgHrPh35zayrmrsvtwCxhKwgAAOt/dcApaj/KcbijJCDBuoJTCI67u9f5x1Av6QuNk7A5ef8IldC0q8uOwITXMvI17m2+haMrrtN9Kx6hPDsqFC8b+fhSGnUehEcAfLQOwOOBOYGsiWuZl4v0tXyNX54ZZjv8e88f+3IxuqX9hS0Y5vk0xHpvAwmws2/Q1SrSOeND5avO2M//ahu4pf2BnaiFWpRobUb1KC7Bq09fG9W69zNvec2gHeiT9in0pF7EswwMA4FRRhm8vbfuoU1eUOhjHE5985Ddcc2Izvo3qg/fP/TunxPpL2z7j0AG5TsZ93HZ8J3odXY/1ET3xbnYAEHQdUuOKMenYRoTPGg1Hg96uG2zrwq4abRvLSy+9hHnz5qldRqMLfvJJ5Kz8AqK8HIqjo9032Jo8d+1zWJe4DnqhrzQ+XnxkPK4KuAot3FuYl7XyaoWJbSdWtatqaRWtxeN+4f0Q7hGOaO9o87JQt9A67xcAHDT//ppfE3IN3Bzd0N6vvXmZn7Nftfv97MgKY8OtQcsGWyKyC+NfWYvC62+Aox4o14INtnZuQb8F6PpZV+iFHo4aRzZsENmRpaOeRoelX0JRDBBCwe3tJsDbydtimxujbsTVRVcjxC3EvCzGO6bO19ROWstJzga0HIAY7xjzeJOAsUd/fa7VNZeNWHhdi+vg6+xrbggGAH8X/0r7XXlsJQzCwPOaJL659T30/+q6Sx1qFMQ6D7FYr1EccFXvSPPjpJI+yNNHoWVcJ/g6GJfnVmiRXDq0zq99Va8oaBTj58nTpdciuyIIoTFdEeho3G+h3g0JJXXfb7urY6G71NM2vawnzpW7IzCiPUJjjfstNfjhaPFQJJRsqDZ3VVp3joOr1h8AcLY8HxllgG+LaLSMMu5XL8rxd1Hd643q2AGeWmMD7YWKcpwpLYSnawtERUSatzlUFA+D0Ndpv1Udo5Px/ijf/Skc9YBeAQqGP4ipkcZtnPO8cKLsRlTonDD1smPu6nwdTqSGonWHbpgabVyuK/LDieIbYdBqLbb1drsWJ8L9ERnXBVPbGJc7lBbjRP6NAGCxbYDnNTgR6okWsR0xtZ1xuaaiHCdyjNtO7B0Fw6WetiG+PXAi0BkBUe0wteO/+ziRZdx2zLUxqHA2NhIHBXTHCT8NvCNiMbXzpW173w39Yz/D0aBHuUbLBttLODxCPYZHqKqnbXh4uF0PjwD8O0SC4ugIUV4uRU9b4N/bKx01jig3lEvzbfa/t51poWj07GlLRHbBNC5YuRbsaSsBWf+GE8lA1mtVntfkI+t7Xdbcsl6rmsawLddopehpW9vhEexqRF+dTodu3bph8+bN5mUGgwGbN29Gr169qnxOr169LLYHgE2bNlW7PQA4OTnB09PT4p+9MzXY+s98AHF/H4L/zAeQ9fY7OL94sdql2ZTpomhG5xnYf/t+zOg8o8qJTeyN6Q9kN8+xODz1ALp5jq1yMHgioubEdBH868CrcNU/R/HrwKvQ55dDxhl5ye7I+jecSAayXqvyvCYfWd/rsuaW9VrV1GB78qbxuOrIYZy8aXyVk5NJSdiZVatWCScnJ7F06VJx5MgRMW3aNOHt7S0yMzOFEELcfvvt4oknnjBv//vvvwsHBwfx+uuvi6NHj4o5c+YIR0dH8ffff9f6NXNzcwUAkZuba/U8TcG5d98VR9rEiXPvvlur5fbivQPviQ5LO4j3DrxXq+X2YvI3z4sOSzuIyd88X6vlRETNwfszxogjbeLE+zPG1Go5NW+y/g0nkoGs16o8r8lH1ve6rLllvVb9/pHnxJE2ceL7R56r1XJ7Udt2RLsb0/a2227D+fPn8eyzzyIzMxOdO3fGhg0bzJONnT59GhrNvx2Mr732WqxcuRL/+9//8NRTTyE2NhbffvstOnSofkZC6egNVQ6FYH6sN6hQlO0ZhKHK241Mjw3CPnPrDYYqbz0xTfigN9hnbiKycwZDlbeXTVu02th7gec2uyLr33AiGch6rcrzmnxkfa/Lmlvaa1W9ocqhEIa//gzWXVovM7sa01YttR2LgoiIiIiIiIiIiOQl5Zi2RERERERERERERM0dG22JiIiIiIiIiIiImhA22hIRERERERERERE1IWy0JSIiIiIiIiIiImpC2GhLRERERERERERE1ISw0ZaIiIiIiIiIiIioCWGjLREREREREREREVETwkZbIiIiIiIiIiIioiaEjbZERERERERERERETQgbbYmIiIiIiIiIiIiaEDbaEhERERERERERETUhbLQlIiIiIiIiIiIiakLYaEtERERERERERETUhDioXYA9EEIAAPLy8lSuhIiIiIiIiIiIiJoqU/uhqT2xOmy0tYL8/HwAQHh4uMqVEBERERERERERUVOXn58PLy+vatcr4krNunRFBoMB6enp8PDwgKIoapdjU3l5eQgPD8eZM2fg6empdjmNhrmZWwbMzdyykDU7czO3DJibuWXA3HLlBuTNztzMba+EEMjPz0doaCg0mupHrmVPWyvQaDQICwtTu4xG5enpafe/RFVhbrkwt1yYWz6yZmduuTC3XJhbLswtH1mzM7dcZMldUw9bE05ERkRERERERERERNSEsNGWiIiIiIiIiIiIqAlhoy3ViZOTE+bMmQMnJye1S2lUzM3cMmBu5paFrNmZm7llwNzMLQPmlis3IG925mZu2XEiMiIiIiIiIiIiIqImhD1tiYiIiIiIiIiIiJoQNtoSERERERERERERNSFstCUiIiIiIiIiIiJqQthoS0RUhW3btqG4uFjtMoiIiIioBsnJyaioqFC7DGokPNby4TRMJDM22hL9x8GDB/H8889j8eLFyMrKsliXl5eHO+64Q6XKbOujjz7C5MmT8emnnwIAVq9ejbZt2yIqKgpz5sxRubrGN3jwYJw6dUrtMmzm3LlzFo8PHDiAyZMno3fv3hg9ejS2bdumTmEqKC0tRWJiIkpLS9UuxWY6duyI+fPn48yZM2qX0iScPXsWmZmZapfRKPR6Pc6ePYvz58+rXUqjOHLkCKZPn44uXbogJCQEISEh6NKlC6ZPn44jR46oXZ4qEhMT0b9/f7XLsJmMjAx8/vnn+Omnn1BWVmaxrrCwEM8995xKldnWpk2bMGfOHGzZsgUAsGPHDgwdOhT9+/c3X8vJok2bNkhISFC7jEaTnp6OOXPmYMKECZg9ezaOHTumdkk2sWHDBvz9998AAIPBgPnz56NFixZwcnJCWFgYXn75ZbtszBs+fDg+++wz6TqPlJaWYvbs2bj++uvxyiuvAACef/55uLu7w8PDA+PHj0deXp7KVdrGwYMHMWnSJERFRcHFxQVubm7o2LEjnnnmGbvNDABZWVl49dVXMWrUKPTq1Qu9evXCqFGj8Nprr0lz3VobirDHMx3Z3NGjR3HjjTciKSlJ7VKs6ueff8bw4cMRGxuL/Px8FBYWYs2aNbjhhhsAGD/oh4aGQq/Xq1ypdb311lv43//+h/j4eOzatQszZszAm2++iVmzZkGv12PBggV47bXXMG3aNLVLtbquXbtWufzAgQOIi4uDs7MzAGD//v2NWZbNabVaZGRkIDAwEDt37kS/fv1w7bXXokePHjhw4AC2bt2KzZs34/rrr1e7VKtaunQp2rRpg169eqGkpAQzZszAsmXLIISARqPBnXfeiYULF8LJyUntUq1Ko9HA19cXOTk5GDhwIO6++27cfPPNcHBwULs0m8rOzsa0adOwZ88e3HjjjVi0aBHuuecefPLJJ1AUBT179sTXX3+NkJAQtUu1uh9//BGvvPIK9uzZg/LycgCAh4cHhg8fjhdeeAEtW7ZUuULrW79+PUaOHImuXbsiPj4eQUFBAIx/uzdt2oR9+/bhu+++Q3x8vMqVNq6DBw+ia9eudnftAgB79+7F4MGDYTAYUF5ejhYtWuDbb79F+/btAdjvddvnn3+OqVOn4qqrrsKJEyfwzjvvYNasWRg9ejQMBgM+//xzrFixAqNHj1a7VKv6v//7vyqXf/fdd+jfvz88PDwAAN98801jlmVzrq6uSElJQUBAAI4cOYJrr70WAQEB6NKlC/7++2+cPn0au3btwlVXXaV2qVYVFxeHDz/8EH369MFLL72EBQsW4Omnn0bbtm1x/PhxvPTSS5g1axYef/xxtUu1Ko1GA61WCzc3N4wbNw533XUXunXrpnZZNvfwww9j9erVGDduHH766SfccMMN+OGHH/Diiy9Co9Hg2WefxdChQ/H222+rXapVbdy4EaNGjcKwYcPg4uKCb775BnfccQfc3Nzw9ddfQwiB3377DcHBwWqXalV79+5FfHw8XF1dMXDgQItrts2bN6OoqAgbN25E9+7dVa5UfWy0pXqx1w8A1157LW644Qa88MILEELgtddew/z587FmzRoMGTLEbi/+27Zti2eeeQbjx4/HX3/9hR49emDJkiW48847AQAff/wx3nvvPfz5558qV2p9jo6OGDhwIK655hrzMiEE5s+fj3vvvReBgYEAYHe9jTUaDTIzMxEYGIjBgwcjPDwcH3/8sXn9Qw89hL///hubN29WsUrri4qKwhdffIGePXvi0UcfxVdffYU33njD/AHgsccew80334xXX31V7VKtSqPRIDU1FXv27MEnn3yC9evXw8fHB5MmTcKdd96Jtm3bql2iTdx5553Ys2cP7rnnHnz11Vfw9vZGcnIyFi9eDI1GgwcffBBt27bFsmXL1C7Vqj777DPMmDED06ZNg7OzMz7++GNMmTIFERERWLVqFf755x/s3LkTsbGxapdqVZ06dcLNN99cbc/KuXPn4ptvvsGhQ4cauTLbutIH2LS0NLz++ut2d+0CAIMGDUJ4eDg++ugjFBYW4vHHH8eXX36JTZs2oUuXLnZ73dalSxdMnToVM2fOxObNm81fxsyaNQsAsGDBAqxduxa//fabypVal0ajwfXXX49WrVpZLF++fDlGjBgBb29vALC7nsaXX7ONHDkSBoMB33zzDRwcHGAwGDBhwgQUFBRg3bp1apdqVc7Ozjhx4gRatmyJjh074tlnn8Wtt95qXv/jjz/ioYcesrte1hqNBocPH8bPP/+MTz75BP/88w86duyIu+66CxMmTICPj4/aJdpEy5Yt8cknn2DgwIFISkpCbGwsvvnmG9x8880AjHcX3H333XZ3J2SXLl1wzz334N577wVgzDlz5kwcPXoU5eXlGDp0KMLDw+3uvHbNNdegU6dOWLJkCRRFsVgnhMC9996LQ4cOYdeuXSpV2IQIoirMmjWrxn8TJ04UGo1G7TKtztPTU5w8edJi2YoVK4Sbm5tYt26dyMzMtMvcLi4uIiUlxfzYyclJHD582Pw4ISFBeHt7q1Gazf32228iOjpaPPvss0Kv15uXOzg4iH/++UfFymxLURRx9uxZIYQQISEhYteuXRbrDx8+LPz9/dUozaacnJzM7/XWrVuL9evXW6zfvn27aNmypRql2dTlx1sIIdLT08WLL74oYmNjhUajEb169RIff/yxihXaRkhIiPj999+FEEJkZmYKRVHEzz//bF7/22+/iRYtWqhVns3ExcWJVatWmR/v3btXhIWFCYPBIIQQ4rbbbhOjRo1SqzybcXZ2FseOHat2/bFjx4Szs3MjVtQ4FEURoaGhIjIyssp/oaGhdnntIoQQPj4+4vjx4xbLXnrpJeHj4yP27Nljt9dtbm5uIikpyfzY0dFRHDx40Pz46NGjws/PT43SbOqLL74QYWFh4pNPPrFYLtM1W3h4uNixY4fF+v3794uQkBA1SrOpy69Pg4KCxP79+y3WnzhxQri4uKhRmk3995rtjz/+ENOmTRNeXl7CxcVFjBs3TmzevFnFCm3jv59HHR0dLT6PJicnC1dXVzVKsylnZ2eRnJxsfmwwGISjo6NIT08XQgixY8cOERAQoFJ1tuPs7CyOHj1a7fqjR4/a5TVbfXBMW6rSwoULsX37dvz1119V/rPXsZOcnJyQk5NjsWz8+PH46KOPcNttt2Ht2rXqFGZjrq6uKCwsND8OCAiAu7u7xTb2Ouh/7969sW/fPpw4cQLXXnstEhMT1S6p0eTn5yMvLw/Ozs6VhgNwdnZGUVGRSpXZTnBwsPkYFxYWwt/f32J9QEAALly4oEZpNvXfb7BDQkLw5JNP4sSJE9i8eTOio6Mxc+ZMlaqzndzcXLRo0QIAEBQUBAcHB4uhEEJDQyud8+1BSkoKevbsaX7cvXt3ZGZmIiMjA4DxFsStW7eqVZ7NREZG4scff6x2/Y8//oiIiIhGrKhxRERE4M0330RycnKV/2r6mdiDkpISi8dPPPEEnnrqKQwePBg7d+5UqSrbcnR0tBi/18nJyeK6zcnJyS7Hwxw7dix+/fVXfPzxx7jllltw8eJFtUtqFIqimP+OazQaeHl5Waz39va2y5/FqFGj8MILL0Cv1+Pmm2/G4sWLLcawfeedd9C5c2f1CmwkPXr0wPvvv4/09HQsXrwYZ86cwaBBg9Quy+patmxp7lW5d+9eKIqCPXv2mNf/8ccf5ms6e9KiRQscP37c/DgxMREGgwF+fn4AgLCwMBQUFKhVns0EBwdbHN//2rNnj3nIBNnZ92B2VG8xMTGYNWsWJk6cWOX6AwcO2OXYOp07d8bWrVsrZRs7diyEEJg8ebJKldlWXFwcDh06ZL5F+r+TFR07dgyRkZEqVNY4vLy88MUXX+DTTz/Fddddh3nz5lVq5LJHrVu3BmC8BeXPP/9Ely5dzOv++ecfhIaGqlWazUyYMAFPP/00fvrpJ9x+++147rnnsHLlSri7u6OoqAhz585F79691S7T6kQNIyH169cP/fr1s8uJDmJjY/HDDz9gxowZWL9+PZydnfHzzz+jQ4cOAIzjiP33Nlt7EBkZiT///NN83t6/fz80Go354tfX19c8zq09ee655zB+/Hhs27atyvHRNmzYgJUrV6pcpfV169YN+/btw5gxY6pcryiKXU7WAwAdOnTAzp07K43lOXv2bBgMBowbN06lymwrJiYGx44dQ5s2bQAYh8AwjecKGD/0h4WFqVWeTUVGRmLHjh2YN28eOnXqhA8//NDur9mEEGjdujUURUFBQQEOHTpk8Z4/efKk3Y13CQAvvvgiBg4ciLi4OPTq1Qtr1qzBpk2b0Lp1a5w8eRLZ2dnYuHGj2mU2GldXV0yZMgVTpkzBiRMn1C7H6u69915MmTIFH330Efbt24fXX38dTz31FI4dOwaNRoP33nsPjzzyiNplWt2kSZNw11134emnn4aTkxPeeOMNjBgxAjqdDoCx3cUer1Vnz56NadOmYd++fRgwYECla7YPP/wQr7/+uspVNg1stKUqde/eHfv27au20dZePwDcd9992LFjR5Xrxo0bByEEPvzww0auyvZeeeUVuLm5Vbv+9OnTuOeeexqxInVMnToV1113HSZMmGC3PYtN/tvL7r8TMSUnJ9vlxHNz5szB4cOHERUVhe7du+PXX39FUFAQWrRogfT0dPj5+WHTpk1ql2l1kydPhouLS43beHp6NlI1jefRRx/F5MmT8dZbb+HMmTP4/PPP8eCDD+KPP/6ARqPBN998gzfeeEPtMq1uxowZuOuuu7B37144Ozvjo48+wu233w6tVgvA2FvF9KWNPbn11lvRokULvP3221iwYAEyMzMBGHtz9OrVC9u2bUOvXr1UrtL6nnvuuRrvjGjXrh2Sk5MbsaLGM2nSJGzfvt08FuDlHnvsMQghsGTJEhUqs62nnnrKYlzL/56///zzz2ob8e2BRqPBvHnzMGjQIEyaNMnuxiz+r/+OZRkTE2PxePfu3Rg1alRjltQovLy8sHPnTnz88cdYt24dIiMjYTAYUFZWhnHjxuG+++6zyy8n+vbta26wq449/g1/6KGHEBgYiF27duGOO+7AuHHjzGMZFxUVYdasWXj66afVLtPqnnrqKRQWFmL+/PkoLS1FfHw8Fi5caF7fokULvPfeeypWaBszZsyAv78/3nzzTSxevNh8HtdqtejWrRuWLl1q13/H6oITkVGVMjMzUVpaape3ERJdicFgQH5+Pjw9Pe2+94asNmzYgHXr1iEpKQkGgwEhISHo3bs3xo8fX+MXGNT8/P7779i9ezd69eqFa6+9FkeOHMHLL7+MoqIiDB8+3G7voHjvvffw+eefmz8APPPMM3B2dgYAJCQkQK/XIy4uTuUqiYgapqCgAImJiWjbtu0VG7qIiKjpKS8vR1ZWFgDA398fjo6OKlfUtLDRloiIiIiIiIiIiKgJ4URkRERERGT3jh49iqioKLXLaHSy5gbkzc7ccmFuuTC3XOw598GDB/H8889j8eLF5p62Jnl5ebjjjjtUqqxpYaMtEREREdm9srIypKSkqF1Go5M1NyBvduaWC3PLhbnlYq+5f/75Z/To0QOrVq3CK6+8gri4OIs5V4qLi7Fs2TIVK2w6OBEZERERETV7Dz/8cI3rz58/30iVNC5ZcwPyZmfuqjG3fWHuqjG3fZE199y5czF79my88MILEELgtddew4gRI7BmzRoMGTJE7fKaFI5pS0RERETNnlarRefOneHp6Vnl+oKCAuzfv9/uZpqXNTcgb3bmZu7LMTdz2wPmliu3l5cX9u/fj+joaPOylStXYtq0aVi1ahWuvvpqhIaG2l3u+mBPW6Iq6PV6LF26FJs3b8a5c+dgMBgs1m/ZskWlymxL1tyAvNllzS0rHm+yZzExMZg1axYmTpxY5foDBw6gW7dujVyV7cmaG5A3O3Mz9+WY274wN3Nfzl5zOzk5IScnx2LZ+PHjodFocNttt2HBggXqFNYEsdGWaiTrB/wHH3wQS5cuxY033ogOHTpAURS1S2oUsuYG5M0ua26e23i8L2evx1u23N27d8e+ffuq/eCjKArs8QYzWXMD8mZnbua+HHPbF+Zm7svZa+7OnTtj69atlRqkx44dCyEEJk+erFJlTQ+HR6Aa3X///eYP+CEhIZU+4L/55psqVWZb/v7+WL58OYYNG6Z2KY1K1tyAvNllzc1zG4/35ez1eMuWOzMzE6WlpYiIiFC7lEYla25A3uzMzdwyYG7mloGsudeuXYsdO3ZUey26cuVKfPjhhxaTk8mKjbZUI1k/4IeGhmLbtm1o3bq12qU0KllzA/JmlzU3z2083jKQNTcRERERkT3QqF0ANW06nQ4xMTFql9HoHnnkESxcuNAub0Woiay5AXmzy5qb5zYebxnImpuIiIiIyB6wpy3VaMGCBUhKSsKiRYukGfsQAEaNGoWtW7fC19cX7du3h6Ojo8X6b775RqXKbEvW3IC82WXNzXMbj7cMZM1NRERERGQPOBEZ1ei3337D1q1bsX79eqk+4Ht7e2PUqFFql9HoZM0NyJtd1tw8t8lF1uMta24iIiIiInvAnrZUo6lTp9a4/tNPP22kSoiIrIfnNrnIerxlzU1EREREZA/YaEtUg/Pnz+P48eMAgDZt2iAgIEDlihqHrLkBebPLmltWPN5ERERERERNG4dHoFqR7QN+YWEhHnjgASxfvhwGgwEAoNVqMWnSJLzzzjtwdXVVuULbkDU3IG92WXOb8NzG4y0D2XLr9XosXboUmzdvxrlz58zvdZMtW7aoVJltyZobkDc7czP35ZjbvjA3c1+OueXFRluqkawf8B9++GFs374d69atQ+/evQEYxwacOXMmHnnkEbz33nsqV2gbsuYG5M0ua26e23i8ZTjesuZ+8MEHsXTpUtx4443o0KGDNJOwyZobkDc7czO3DJibuWXA3HLlrhNBVINp06aJqKgo8dNPP4nc3FyRm5srfvzxRxEdHS3uvfdetcuzGT8/P7F169ZKy7ds2SL8/f0bv6BGImtuIeTNLmtuntss8XjbJ1lz+/n5iR9//FHtMhqdrLmFkDc7c8uFueXC3HJhbqoOe9pSjb7++mt89dVX6Nevn3nZsGHD4OLigjFjxthtr6yioiIEBQVVWh4YGIiioiIVKmocsuYG5M0ua26e2yzxeNvn8ZY1t06nQ0xMjNplNDpZcwPyZmduuTC3XJhbLsxN1dGoXQA1bbJ+wO/VqxfmzJmDkpIS87Li4mLMmzcPvXr1UrEy25I1NyBvdllz89zG4w3Y//GWNfcjjzyChQsXQkg2166suQF5szM3c8uAuZlbBswtV+66UAR/OlSDAQMGwM/PD8uXL4ezszMA4wf8yZMnIzs7G7/88ovKFdrG4cOHER8fj9LSUnTq1AkAcPDgQTg7O2Pjxo1o3769yhXahqy5AXmzy5qb5zYebxmOt6y5R40aha1bt8LX1xft27eHo6OjxfpvvvlGpcpsS9bcgLzZmZu5L8fc9oW5mftyzC0vDo9ANVq4cCHi4+MRFhZW5Qd8e9WhQwckJCRgxYoVOHbsGABg3LhxmDBhAlxcXFSuznZkzQ3Im13W3Dy38XjLcLxlze3t7Y1Ro0apXUajkzU3IG925pYLc8uFueXC3FQd9rSlKyoqKrL4gN+2bVu7/4BPRPaP5za5yHq8Zc1NRERERNTcsdGW6JLvv/8eQ4cOhaOjI77//vsatx0xYkQjVWV7suYG5M0ua25Z8XiTrM6fP4/jx48DANq0aYOAgACVK2ocsuYG5M3O3MwtA+Zmbhkwt1y5a4ONtlSJrB/wNRoNMjMzERgYCI2m+jn6FEWBXq9vxMpsS9bcgLzZZc3NcxuPd3Xs6XjLmvtyhYWFeOCBB7B8+XIYDAYAgFarxaRJk/DOO+/A1dVV5QptQ9bcgLzZmZu5mZu57Q1zM7cMuetEEP2Hoiji7Nmz5v+v7p9Go1G5UiKi2uO5TS6yHm9Zc19u2rRpIioqSvz0008iNzdX5Obmih9//FFER0eLe++9V+3ybEbW3ELIm525mZu5mdveMDdzy5C7LthoS1SFZcuWiZKSkkrLS0tLxbJly1SoqHHImlsIebPLmltWPN4kAz8/P7F169ZKy7ds2SL8/f0bv6BGImtuIeTNztyWmNs+Mbcl5rZPzG3J3nPXRfX3SRIBWL58OUpLSystLysrw/Lly1WoqHFMnToVubm5lZbn5+dj6tSpKlTUOGTNDcibXdbcPLdZ4vG2T7LmLioqQlBQUKXlgYGBKCoqUqGixiFrbkDe7MxtibntE3NbYm77xNyW7D13XbDRlmok6wd8IQQURam0PDU1FV5eXipU1DhkzQ3Im13W3Dy3WeLxtk+y5u7VqxfmzJmDkpIS87Li4mLMmzcPvXr1UrEy25I1NyBvduZmbuZmbnvD3MwtQ+66cFC7AGraZPuA36VLFyiKAkVRMGDAADg4/PsrotfrkZycjCFDhqhYoW3ImhuQN7usuU14buPxBuz3eJvImnvhwoWIj49HWFgYOnXqBAA4ePAgnJ2dsXHjRpWrsx1ZcwPyZmdu5mZu5rY3zM3cMuSuCzbaUpVk/YA/cuRIAMCBAwcQHx8Pd3d38zqdTofIyEjccsstKlVnO7LmBuTNLmtuntt4vE3s+XjLmtukQ4cOSEhIwIoVK3Ds2DEAwLhx4zBhwgS4uLioXJ3tyJobkDc7czM3czO3vWFu5pYhd10oQgihdhHU9MybN8/830ceeaTaD/g6nU6tEm1q2bJlGDt2LJycnNQupVHJmhuQN7tsuXlu4/E2sefjLWtuIiIiIiJ7wkZbqpFsH/BNzpw5A0VREBYWBgDYs2cPVq5ciXbt2mHatGkqV2c7suYG5M0ua26e23i8ZSBT7u+//x5Dhw6Fo6Mjvv/++xq3HTFiRCNVZXuy5gbkzc7czF0d5m7+mJu5q8Pc8mKjLdVI1g/4ffr0wbRp03D77bcjMzMTrVu3Nnfdf+CBB/Dss8+qXaJNyJobkDe7rLl5buPxluF4y5Rbo9EgMzMTgYGB0Giqn2dXURTo9fpGrMy2ZM0NyJuduZm7KsxtH5ibuavC3HKr/idEBGD8+PHYunUrACAzMxMDBw7Enj178PTTT+O5555TuTrbOXz4MHr06AEA+PLLL9GxY0fs3LkTK1aswNKlS9UtzoZkzQ3Im13W3Dy38XjLcLxlym0wGBAYGGj+/+r+2dvFv6y5AXmzMzdzMzdzM7d9YG65ctcXG22pRrJ+wC8vLzffTvrLL7+Yu+XHxcUhIyNDzdJsStbcgLzZZc3NcxuPtwzHW9bcy5cvR2lpaaXlZWVlWL58uQoVNQ5ZcwPyZmduS8xtn5jbEnPbJ+a2ZO+560QQ1cDNzU0kJycLIYQYPny4ePnll4UQQqSkpAhnZ2cVK7OtHj16iMcff1zs2LFDODs7iwMHDgghhNi1a5do0aKFytXZjqy5hZA3u6y5eW7j8RbC/o+3rLk1Go04e/ZspeVZWVlCo9GoUFHjkDW3EPJmZ25LzG2fmNsSc9sn5rZk77nrgj1tqUbt27fHkiVL8Ouvv2LTpk0YMmQIACA9PR1+fn4qV2c7r7zyCt5//33069cP48aNQ6dOnQAYB8029VqyR7LmBuTNLmtuntt4vAH7P96y5hZCQFGUSstTU1Ph5eWlQkWNQ9bcgLzZmdsSc9sn5rbE3PaJuS3Ze+66cFC7AGraXnnlFYwaNQqvvfYaJk+eLM0H/H79+iErKwt5eXnw8fExL582bRpcXV1VrMy2ZM0NyJtd1tw8t/F4A/Z/vGXL3aVLFyiKAkVRMGDAADg4/HuZq9frkZycbG64tiey5gbkzc7czG3C3MxtL5ibuU3sOXd9sNGWaiTrB3zA+K3Pvn37kJiYiPHjx8PDwwM6nY657Zis2WXMzXMbjzdg/8dbttwjR44EABw4cADx8fFwd3c3r9PpdIiMjMQtt9yiUnW2I2tuQN7szM3cJszN3PaCuZnbxJ5z14cihBBqF0FNW0VFBbZt22bxAT89PR2enp4Wv1z2JCUlBUOGDMHp06dRWlqKEydOICoqCg8++CBKS0uxZMkStUu0CVlzA/JmlzU3wHMbj7f9H29AztzLli3D2LFjzZPuyULW3IC82ZmbuWXA3MwtA+aWK3ddcExbqlFKSgo6duyIm2++GTNmzMD58+cBGG+5nD17tsrV2c6DDz6I7t274+LFi3BxcTEvHzVqFDZv3qxiZbYla25A3uyy5ua5jccbsP/jLWvu/v37m7MCwJ49e/DQQw/hgw8+ULEq25M1NyBvduY2Ym7mtkfMbcTczC29xp75jJqXm2++WUycOFGUlpYKd3d3kZiYKIQQYuvWrSImJkbl6mzH19dXHDt2TAghLHInJycLFxcXNUuzKVlzCyFvdllz89zG4y2E/R9vWXNfd911Yvny5UIIITIyMoSHh4fo1auX8Pf3F/PmzVO5OtuRNbcQ8mZnbuZmbua2N8zN3DLkrgv2tKUa/frrr/jf//4HnU5nsTwyMhJpaWkqVWV7BoMBer2+0vLU1FR4eHioUFHjkDU3IG92WXPz3GaJx9s+yZr78OHD5onWvvzyS3Ts2BE7d+7EihUrsHTpUnWLsyFZcwPyZmdu5mbupeoWZ0PMzdzMvVTd4poINtpSjWT9gD948GC89dZb5seKoqCgoABz5szBsGHD1CvMxmTNDcibXdbcPLcZ8Xjb9/GWNXd5ebl5bLRffvkFI0aMAADExcUhIyNDzdJsStbcgLzZmZu5Aea2V8zN3ABzExtt6Qpk/YC/YMEC/P7772jXrh1KSkowfvx4c8+kV155Re3ybEbW3IC82WXNzXMbj7cMx1vW3O3bt8eSJUvw66+/YtOmTRgyZAgAID09HX5+fipXZzuy5gbkzc7czA0wt71ibuYGmJvAMW2pZmfOnBHt2rUTbdu2FQ4ODuKaa64Rfn5+ok2bNuLs2bNql2dT5eXl4rPPPhOPPvqouO+++8SHH34oioqK1C7L5mTNLYS82WXMzXMbj7cMx1vW3Fu3bhXe3t5Co9GIqVOnmpc/+eSTYtSoUSpWZluy5hZC3uzMzdxCMLe9Ym7mFoK5SQhFCCHUbjimpq2iogKrVq3CoUOHUFBQgK5du2LChAkWM48TETU3PLfJRdbjLWtuvV6PvLw8+Pj4mJedOnUKrq6uCAwMVLEy25I1NyBvduZmbuZmbnvD3MwtQ+7aclC7AGr6HBwcMHHiRLXLaFTLly+vcf2kSZMaqZLGJWtuQN7ssuYGeG6rCo+3/ZE1txAC+/btQ2JiIsaPHw8PDw/odDq4urqqXZpNyZobkDc7czM3c9sv5mZu5ib2tKUayfoB//JveQDjANlFRUXmk0d2drZKldmWrLkBebPLmpvnNiMebyN7Pd6y5k5JScGQIUNw+vRplJaW4sSJE4iKisKDDz6I0tJSLFmyRO0SbULW3IC82ZmbuZmbue0NczO3DLnrRIUhGagZ8fb2tvjn5uYmFEURTk5OwsfHR+3yGtWJEyfEgAEDxIYNG9QupVHJmlsIebPLkJvntn/xeNvv8ZY198033ywmTpwoSktLhbu7u0hMTBRCGMdNi4mJUbk625E1txDyZmdu5haCue0VczO3EMxNQrDRlupMhg/41dm7d69o06aN2mU0OllzCyFvdhlz89zG4y0DGXL7+vqKY8eOCSGExQeA5ORk4eLiomZpNiVrbiHkzc7czC0Ec9sr5mZuIZibhNCo3dOXmp/Y2Fi8/PLLePDBB9UupdE5ODggPT1d7TIanay5AXmzy5ib5zYebxnIkNtgMECv11danpqaCg8PDxUqahyy5gbkzc7clpjbPjG3Jea2T8xtyd5z1wUnIqN6sfcP+N9//73FYyEEMjIysGjRIvTu3VulqmxP1tyAvNllzV0dntvkYu/Huzr2nnvw4MF466238MEHHwAAFEVBQUEB5syZg2HDhqlcne3ImhuQNztzMzdzM7e9YW7mliF3XXAiMqpRTR/ww8PDsX79epUqsy2NxrITuqIoCAgIQP/+/bFgwQKEhISoVJltyZobkDe7rLl5bjPi8bbv4y1r7tTUVMTHx0MIgYSEBHTv3h0JCQnw9/fHjh07EBgYqHaJNiFrbkDe7MzN3MzN3PaGuZlbhtx1wUZbqpGsH/CJyL7x3CYXWY+3rLkBoKKiAqtWrcKhQ4dQUFCArl27YsKECXBxcVG7NJuSNTcgb3bmZm7mtl/MzdzMTWy0JapBVlYWdDodPD091S6lUcmaG5A3u6y5ZcXjTURERERE1LRxTFuqFZk+4Ofk5ODpp5/G6tWrcfHiRQBAQEAApk6dimeeeQaurq4qV2gbsuYG5M0ua+7L8dzG4y0D2XIvX768xvWTJk1qpEoal6y5AXmzM3fVmNu+MHfVmNu+MHfV7DV3XbCnLVVLxg/42dnZ6NWrF9LS0jBhwgS0bdsWAHDkyBGsXLkScXFx+O2333Do0CHs3r0bM2fOVLli65A1NyBvdllzAzy38Xjb//EG5M0NAD4+PhaPy8vLUVRUBJ1OB1dXV2RnZ6tUmW3JmhuQNztzGzE3c9sj5jZibuaWniCqwoULF0Tr1q2Fm5ubmDZtmnjzzTfFm2++Ke6++27h5uYmunXrJoqLi8Uff/whFi5cqHa5VvPggw+KDh06iMzMzErrMjIyRMeOHcXo0aOFp6enWLp0qQoV2oasuYWQN7usuXlu4/GW4XjLmrsmJ06cEAMGDBAbNmxQu5RGJWtuIeTNztzMLQPmZm4ZMLdcuavDRluqkqwf8CMiImo8Oaxfv14oiiLmzp3biFXZnqy5hZA3u6y5eW6rGo+3fR1vWXNfyd69e0WbNm3ULqPRyZpbCHmzM7dcmFsuzC0X5iY22lKVZP2Ar9PpxJkzZ6pdf+bMGaHVahuxosYha24h5M0ua26e26rG421fx1vW3Ffy119/CQ8PD7XLaHSy5hZC3uzMLRfmlgtzy4W5iRORUZUyMjLQvn37atd36NABGo0Gc+bMacSqbM/f3x+nTp1CWFhYleuTk5MRGBjYyFXZnqy5AXmzy5qb5zYe78vZ6/GWNbfJ999/b/FYCIGMjAwsWrQIvXv3Vqkq25M1NyBvduY2Ym7mtkfMbcTczC09lRqLqYkLDQ0Vv/76a7Xrd+zYIUJCQhqxosYxdepUcf3114vS0tJK60pKSkTfvn3F1KlTVajMtmTNLYS82WXNzXMbj/fl7PV4y5rbRFEUi38ajUYEBQWJcePGifT0dLXLsxlZcwshb3bmZm7mZm57w9zMLUPuulCEEELthmNqeu644w4kJiZi06ZN0Ol0FutKS0sRHx+PqKgofPLJJypVaBupqano3r07nJycMGPGDMTFxUEIgaNHj2Lx4sUoLS3F3r170bJlS7VLtSpZcwPyZpc1N89tPN4m9ny8Zc1NRERERGRP2GhLVZL1Az5gvE14+vTp+Pnnn2H69VAUBYMGDcKiRYsQExOjcoW2IWtuQN7sMubmuY3HW4bjLWvu/8rKyoJOp4Onp6fapTQqWXMD8mZnbuaWAXMztwyYW67ctdLYXXup+UhKShJDhgwRGo3Gort6fHy8SEhIULs8m8vOzhZ//PGH+OOPP8SFCxfULqfRyJpbCHmzy5ab5zYebxmOt6y5L168KKZPny78/PyERqMx32b3xBNPiMLCQrXLsxlZcwshb3bmZm7mZm57w9zMLUPuumJPW7qiixcvIiEhAQAQExMDX19flSsiImo4ntvkIuvxlil3dnY2evXqhbS0NEyYMAFt27YFABw5cgQrV65EXFwcfvvtNxw6dAi7d+/GzJkzVa7YOmTNDcibnbmZG2Bu5mZue8DccuWuF7VbjYmIiIiIGurBBx8UHTp0EJmZmZXWZWRkiI4dO4rRo0cLT09PsXTpUhUqtA1Zcwshb3bmZm4T5mZue8HczG1iz7nrg422RERERNTsRUREiA0bNlS7fv369UJRFDF37txGrMr2ZM0thLzZmbtqzM3c9oC5q8bczC0rDo9ARERERM2ek5MTEhMTERYWVuX61NRUREZGoqKiopErsy1ZcwPyZmdu5r4cczO3PWBu5r6cveauD43aBRARERERNZS/vz9OnTpV7frk5GQEBgY2XkGNRNbcgLzZmbtqzG1fmLtqzG1fmLtq9pq7PthoS0RERETNXnx8PJ5++mmUlZVVWldaWopnnnkGQ4YMUaEy25I1NyBvduZmbhPmZm57wdzMbWLPueuDwyMQERERUbOXmpqK7t27w8nJCTNmzEBcXByEEDh69CgWL16M0tJS7N27Fy1btlS7VKuSNTcgb3bmZm7mZm7mtg/MLVfuelFhHF0iIiIiIqtLSkoSQ4YMERqNRiiKIhRFERqNRsTHx4uEhAS1y7MZWXMLIW925mZu5mZue8PczC1D7rpiT1siIiIisisXL15EQkICACAmJga+vr4qV9Q4ZM0NyJuduZlbBszN3DJgbrly1xYbbYmIiIiIiIiIiIiaEE5ERkRERERERERERNSEsNGWiIiIiIiIiIiIqAlhoy0RERERERERERFRE8JGWyIiIiIiIiIiIqImhI22RERERERERERERE0IG22JiIiIiFSgKAq+/fZbtcsgIiIioiaIjbZEREREZLfOnDmDO+64A6GhodDpdIiIiMCDDz6ICxcuNFoNc+fORefOnSstz8jIwNChQxutDiIiIiJqPthoS0RERER2KSkpCd27d0dCQgK++OILnDx5EkuWLMHmzZvRq1cvZGdnq1pfcHAwnJycVK2BiIiIiJomNtoSERERkV2aMWMGdDodfv75Z/Tt2xctW7bE0KFD8csvvyAtLQ1PP/00gKqHKfD29sbSpUvNj8+cOYMxY8bA29sbvr6+uPnmm3Hq1Cnz+m3btqFHjx5wc3ODt7c3evfujZSUFCxduhTz5s3DwYMHoSgKFEUx7/e/r/v333+jf//+cHFxgZ+fH6ZNm4aCggLz+ilTpmDkyJF4/fXXERISAj8/P8yYMQPl5eXmbRYvXozY2Fg4OzsjKCgIo0ePttrPk4iIiIgaDxttiYiIiMjuZGdnY+PGjZg+fTpcXFws1gUHB2PChAlYvXo1hBBX3Fd5eTni4+Ph4eGBX3/9Fb///jvc3d0xZMgQlJWVoaKiAiNHjkTfvn1x6NAh7Nq1C9OmTYOiKLjtttvwyCOPoH379sjIyEBGRgZuu+22Sq9RWFiI+Ph4+Pj4YO/evVizZg1++eUX3H///Rbbbd26FYmJidi6dSuWLVuGpUuXmhuB//zzT8ycORPPPfccjh8/jg0bNuD666+v/w+RiIiIiFTjoHYBRERERETWlpCQACEE2rZtW+X6tm3b4uLFizh//vwV97V69WoYDAZ89NFHUBQFAPDpp5/C29sb27ZtQ/fu3ZGbm4ubbroJ0dHR5v2buLu7w8HBAcHBwdW+xsqVK1FSUoLly5fDzc0NALBo0SIMHz4cr7zyCoKCggAAPj4+WLRoEbRaLeLi4nDjjTdi8+bNuPvuu3H69Gm4ubnhpptugoeHByIiItClS5fa/cCIiIiIqElhT1siIiIisltX6kmr0+muuI+DBw/i5MmT8PDwgLu7O9zd3eHr64uSkhIkJibC19cXU6ZMQXx8PIYPH46FCxciIyOjTnUePXoUnTp1MjfYAkDv3r1hMBhw/Phx87L27dtDq9WaH4eEhODcuXMAgEGDBiEiIgJRUVG4/fbbsWLFChQVFdWpDiIiIiJqGthoS0RERER2JyYmBoqi4OjRo1WuP3r0KAICAuDt7Q1FUSo17l4+TmxBQQG6deuGAwcOWPw7ceIExo8fD8DY83bXrl249tprsXr1arRu3Rq7d++2ei5HR0eLx4qiwGAwAAA8PDywf/9+fPHFFwgJCcGzzz6LTp06IScnx+p1EBEREZFtsdGWiIiIiOyOn58fBg0ahMWLF6O4uNhiXWZmJlasWIEpU6YAAAICAix6xiYkJFj0UO3atSsSEhIQGBiImJgYi39eXl7m7bp06YInn3wSO3fuRIcOHbBy5UoAxt68er2+xnrbtm2LgwcPorCw0Lzs999/h0ajQZs2bWqd28HBAQMHDsSrr76KQ4cO4dSpU9iyZUutn09ERERETQMbbYmIiIjILi1atAilpaWIj4/Hjh07cObMGWzYsAGDBg1C69at8eyzzwIA+vfvj0WLFuGvv/7Cn3/+iXvvvdeiR+uECRPg7++Pm2++Gb/++iuSk5Oxbds2zJw5E6mpqUhOTsaTTz6JXbt2ISUlBT///DMSEhLM49pGRkYiOTkZBw4cQFZWFkpLSyvVOmHCBDg7O2Py5Mk4fPgwtm7digceeAC33367eTzbK/nhhx/w9ttv48CBA0hJScHy5cthMBjq1OhLRERERE0DG22JiIiIyC7FxsZi7969iIqKwpgxYxAREYGhQ4eidevW+P333+Hu7g4AWLBgAcLDw9GnTx+MHz8es2fPhqurq3k/rq6u2LFjB1q2bIn/+7//Q9u2bXHnnXeipKQEnp6ecHV1xbFjx3DLLbegdevWmDZtGmbMmIF77rkHAHDLLbdgyJAhuOGGGxAQEIAvvviiUq2urq7YuHEjsrOzcfXVV2P06NEYMGAAFi1aVOu83t7e+Oabb9C/f3+0bdsWS5YswRdffIH27ds38CdJRERERI1NEVeanYGIiIiIyE7MmTMHb7zxBjZt2oRrrrlG7XKIiIiIiKrERlsiIiIiksqnn36K3NxczJw5ExoNbzwjIiIioqaHjbZERERERERERERETQi7FhARERERERERERE1IWy0JSIiIiIiIiIiImpC2GhLRERERERERERE1ISw0ZaIiIiIiIiIiIioCWGjLREREREREREREVETwkZbIiIiIiIiIiIioiaEjbZERERERERERERETQgbbYmIiIiIiIiIiIiaEDbaEhERERERERERETUhbLQlIiIiIiIiIiIiakLYaEtERERERERERETUhLDRloiIiIiIiIiIiKgJYaMtERERERERERERURPCRlsiIiIiIiIiIiKiJoSNtkRERESS2rZtGxRFwbZt29QuhZqApUuXQlEUnDp1Su1SKqmoqMBjjz2G8PBwaDQajBw5Uu2SqnTq1CkoioKlS5eqXYrNNeX3CxERkT1goy0RERHViukD+p9//ql2KVSNoqIizJ07t0k0wur1eoSGhkJRFKxfv17tcpoU0++Soij47bffKq0XQiA8PByKouCmm26q12ssXry4WTUcjhkzBoqi4PHHH69y/SeffILXXnsNo0ePxrJlyzBr1iwcOXIEc+fObdRGw8uP3X//PfHEE41WR3VefPFFfPvtt2qXQURERFbgoHYBRERERGQdRUVFmDdvHgCgX79+V9z++uuvR3FxMXQ6ndVr2bJlCzIyMhAZGYkVK1Zg6NChVn+N5s7Z2RkrV67EddddZ7F8+/btSE1NhZOTU733vXjxYvj7+2PKlCm1fs7tt9+OsWPHNuh16yMvLw/r1q1DZGQkvvjiC7z88stQFMVimy1btqBFixZ48803zcu++uorzJs3D/369UNkZGSj1vzcc8+hVatWFss6dOiAiIgIFBcXw9HRsVHrMXnxxRcxevToRumJrNb7hYiISBZstCUiIqJmSQiBkpISuLi4qF1Ks6XRaODs7GyTfX/++efo2rUrJk+ejKeeegqFhYVwc3OzyWtVR43XrIthw4ZhzZo1ePvtt+Hg8O9l+cqVK9GtWzdkZWU1Sh2mn5NWq4VWq22U17zc119/Db1ej08++QT9+/fHjh070LdvX4ttzp07B29v70appzbvm6FDh6J79+5VrrPV71RTo9b7hYiISBYcHoGIiIjqbcqUKXB3d8fp06dx0003wd3dHS1atMC7774LAPj777/Rv39/uLm5ISIiAitXrrR4vulW4x07duCee+6Bn58fPD09MWnSJFy8eNFi28jISNx0003YuHEjunfvDhcXF7z//vsAgKSkJNx6663w9fWFq6srrrnmGvz444/m5549exYODg7mXqiXO378OBRFwaJFi8zLcnJy8NBDDyE8PBxOTk6IiYnBK6+8AoPBYN7GNHbl66+/jnfffRdRUVFwdXXF4MGDcebMGQghMH/+fISFhcHFxQU333wzsrOzK73++vXr0adPH7i5ucHDwwM33ngj/vnnnyp/zmlpaRg5ciTc3d0REBCA2bNnQ6/Xm+sJCAgAAMybN898y/bcuXOrPX5VjWnbr18/dOjQAUeOHMENN9wAV1dXtGjRAq+++mq1+/mv4uJirF27FmPHjsWYMWNQXFyM7777zrz+9ddfh6IoSElJqfTcJ598EjqdzuL4//HHHxgyZAi8vLzg6uqKvn374vfff7d43ty5c6EoCo4cOYLx48fDx8fH3IP10KFDmDJlCqKiouDs7Izg4GDccccduHDhQpU/k+7du8PZ2RnR0dF4//33zfv+r88//xzdunWDi4sLfH19MXbsWJw5c6bWP6dx48bhwoUL2LRpk3lZWVkZvvrqK4wfP77K5xgMBrz11lto3749nJ2dERQUhHvuucfi5xUZGYl//vkH27dvN78PTD2vTb9z27dvx/Tp0xEYGIiwsDCLdf8dbmD9+vXo27cvPDw84OnpiauvvtridzkhIQG33HILgoOD4ezsjLCwMIwdOxa5ubm1+jmsWLECgwYNwg033IC2bdtixYoV5nWm37OtW7fin3/+MedZunQpbr31VgDADTfcYF5++Xu5Lr9biYmJGDZsGDw8PDBhwoRa1V2Vqsa0rc3vr0ltjm91FEVBYWEhli1bZv55mHpaT5kypcreyFW9txVFwf33349vv/0WHTp0gJOTE9q3b48NGzZYbFfV+8V0nv7tt9/Qo0cPODs7IyoqCsuXL6/02ocOHULfvn3h4uKCsLAwPP/88/j00085Ti4REdElbLQlIiKiBtHr9Rg6dCjCw8Px6quvIjIyEvfffz+WLl2KIUOGoHv37njllVfg4eGBSZMmITk5udI+7r//fhw9ehRz587FpEmTsGLFCowcORJCCIvtjh8/jnHjxmHQoEFYuHAhOnfujLNnz+Laa6/Fxo0bMX36dLzwwgsoKSnBiBEjsHbtWgBAUFAQ+vbtiy+//LLSa69evRpardbcAFRUVIS+ffvi888/x6RJk/D222+jd+/eePLJJ/Hwww9Xev6KFSuwePFiPPDAA3jkkUewfft2jBkzBv/73/+wYcMGPP7445g2bRrWrVuH2bNnWzz3s88+w4033gh3d3e88soreOaZZ3DkyBFcd911lRot9Ho94uPj4efnh9dffx19+/bFggUL8MEHHwAAAgIC8N577wEARo0ahc8++wyfffYZ/u///q+WR/JfFy9exJAhQ9CpUycsWLAAcXFxePzxx2s9Nu3333+PgoICjB07FsHBwejXr59FQ5xp/NKqjseXX36JwYMHw8fHB4Dxtvjrr78eeXl5mDNnDl588UXk5OSgf//+2LNnT6Xn33rrrSgqKsKLL76Iu+++GwCwadMmJCUlYerUqXjnnXcwduxYrFq1CsOGDbN4j/31118YMmQILly4gHnz5uHOO+/Ec889V+UYoS+88AImTZqE2NhYvPHGG3jooYewefNmXH/99cjJyanVzykyMhK9evXCF198YV62fv165ObmYuzYsVU+55577sGjjz6K3r17Y+HChZg6dSpWrFiB+Ph4lJeXAwDeeusthIWFIS4uzvw+ePrppy32M336dBw5cgTPPvtsjWOxLl26FDfeeCOys7Px5JNP4uWXX0bnzp3NDXhlZWWIj4/H7t278cADD+Ddd9/FtGnTkJSUVKufQ3p6OrZu3Ypx48YBMDZkf/XVVygrKwNgfF9/9tlniIuLQ1hYmDlP27ZtMXPmTADAU089ZbEcqNvvVkVFBeLj4xEYGIjXX38dt9xyyxXrzs3NRVZWlsW/mlzp99ekNse3Op999hmcnJzQp08f88/jnnvuuWKWqvz222+YPn06xo4di1dffRUlJSW45ZZbqvyi479OnjyJ0aNHY9CgQViwYAF8fHwwZcoUiwbztLQ03HDDDfjnn3/w5JNPYtasWVixYgUWLlxYr3qJiIjskiAiIiKqhU8//VQAEHv37jUvmzx5sgAgXnzxRfOyixcvChcXF6Eoili1apV5+bFjxwQAMWfOnEr77NatmygrKzMvf/XVVwUA8d1335mXRURECABiw4YNFnU99NBDAoD49ddfzcvy8/NFq1atRGRkpNDr9UIIId5//30BQPz9998Wz2/Xrp3o37+/+fH8+fOFm5ubOHHihMV2TzzxhNBqteL06dNCCCGSk5MFABEQECBycnLM2z355JMCgOjUqZMoLy83Lx83bpzQ6XSipKTEXKO3t7e4++67LV4nMzNTeHl5WSw3/Zyfe+45i227dOkiunXrZn58/vz5Sj/jmmzdulUAEFu3bjUv69u3rwAgli9fbl5WWloqgoODxS233FKr/d50002id+/e5scffPCBcHBwEOfOnTMv69Wrl0XtQgixZ88ei9c2GAwiNjZWxMfHC4PBYN6uqKhItGrVSgwaNMi8bM6cOQKAGDduXKV6ioqKKi374osvBACxY8cO87Lhw4cLV1dXkZaWZl6WkJAgHBwcxOWXzadOnRJarVa88MILFvv8+++/hYODQ6Xl/3X579KiRYuEh4eHucZbb71V3HDDDUII43v+xhtvND/v119/FQDEihUrLPa3YcOGSsvbt28v+vbtW+1rX3fddaKioqLKdcnJyUIIIXJycoSHh4fo2bOnKC4uttjWdDz++usvAUCsWbOmxszVef3114WLi4vIy8sTQghx4sQJAUCsXbvWYru+ffuK9u3bWyxbs2ZNpfevEPX73XriiSdqVa/pZ1TVPyH+PS98+umnlV7jSr+/dTm+1XFzcxOTJ0+utHzy5MkiIiKi0nLT783lAAidTidOnjxpXnbw4EEBQLzzzjuVfham94sQ/56nL/+9OnfunHBychKPPPKIedkDDzwgFEURf/31l3nZhQsXhK+vb6V9EhERyYo9bYmIiKjB7rrrLvP/e3t7o02bNnBzc8OYMWPMy9u0aQNvb28kJSVVev60adMsJu6577774ODggJ9++sliu1atWiE+Pt5i2U8//YQePXpYTObk7u6OadOm4dSpUzhy5AgA4P/+7//g4OCA1atXm7c7fPgwjhw5gttuu828bM2aNejTpw98fHwsetENHDgQer0eO3bssHj9W2+9FV5eXubHPXv2BABMnDjRYpzSnj17oqysDGlpaQCMvT9zcnIwbtw4i9fRarXo2bMntm7dWunndO+991o87tOnT5U/z4Zyd3fHxIkTzY91Oh169OhRq9e6cOECNm7caO45CQC33HJLpZ61t912G/bt24fExETzstWrV8PJyQk333wzAODAgQNISEjA+PHjceHCBfPPqLCwEAMGDMCOHTsshqwAKv+MAFiMe1xSUoKsrCxcc801AID9+/cDMPaE/OWXXzBy5EiEhoaat4+Jiak0ido333wDg8GAMWPGWBy74OBgxMbGVnnsqmMaPuKHH35Afn4+fvjhh2qHRlizZg28vLwwaNAgi9ft1q0b3N3d6/S6d9999xXHI920aRPy8/PxxBNPVBqn1XRLvem9v3HjRhQVFdX69U1WrFiBG2+8ER4eHgCA2NhYdOvWzaJndl3V53frvvvuq9NrvPvuu9i0aZPFvyu50u+vNY9vQw0cOBDR0dHmx1dddRU8PT1rdQ5o164d+vTpY34cEBCANm3aWDx3w4YN6NWrFzp37mxe5uvr26ChKYiIiOwNJyIjIiKiBnF2djaPpWri5eWFsLCwSmMlenl5VTk2Y2xsrMVjd3d3hISEVLqN+b+ztQNASkqKuaH0cqbbpFNSUtChQwf4+/tjwIAB+PLLLzF//nwAxkZCBwcHiyEEEhIScOjQoUqZTM6dO2fxuGXLlpUyAkB4eHiVy035ExISAAD9+/ev8nU8PT0tHlf1c/bx8anVWJd1VdWx8/HxwaFDh6743NWrV6O8vBxdunTByZMnzct79uyJFStWYMaMGQCMjd0PP/wwVq9ejaeeegpCCKxZswZDhw41Zzf9jCZPnlzt6+Xm5pqHUgCqfo9kZ2dj3rx5WLVqVaXjZxp39dy5cyguLkZMTEyl5/93WUJCAoQQld63Jpd/AXElAQEBGDhwIFauXImioiLo9XqMHj26ym0TEhKQm5uLwMDAKtf/N1tNqvo5/ZepQb1Dhw417ufhhx/GG2+8gRUrVqBPnz4YMWIEJk6caPFlRlWOHj2Kv/76C5MmTbJ4r/Tr1w/vvvsu8vLyKv0e1EZdf7ccHBzM4/rWVo8ePaqdiKwqtfn9re3xzc3NRXFxsXm5TqeDr69vXcq/ov+e16qqtyHPTUlJQa9evSptV9XvHxERkazYaEtEREQNUl1vveqWi/+MU1sXl/eYrI+xY8di6tSpOHDgADp37owvv/wSAwYMgL+/v3kbg8GAQYMG4bHHHqtyH61bt7Z4XN/8ph6in332GYKDgyttd3kv3Zr2ZwsNOXamHpK9e/eucn1SUhKioqIQGhqKPn364Msvv8RTTz2F3bt34/Tp03jllVfM25p+Rq+99ppFj7zLubu7Wzyu6j0yZswY7Ny5E48++ig6d+4Md3d3GAwGDBkypFJP3dowGAxQFAXr16+v8mf135quZPz48bj77ruRmZmJoUOHwtvbu9rXDQwMrLYXanVfNFSlob9Ll1uwYAGmTJmC7777Dj///DNmzpyJl156Cbt3766xMfTzzz8HAMyaNQuzZs2qtP7rr7/G1KlT61xPXX+3nJycoNHY9gbE2vz+1vb4Pvjgg1i2bJl5ed++fS0mYKtKVRPpAag0EdqV6q3NOcAW534iIiIZsdGWiIiIVJeQkIAbbrjB/LigoAAZGRkYNmzYFZ8bERGB48ePV1p+7Ngx83qTkSNH4p577jEPkXDixAk8+eSTFs+Ljo5GQUEBBg4cWK8stWW69TgwMNBqr1Vdw0xjSU5Oxs6dO3H//fejb9++FusMBgNuv/12rFy5Ev/73/8AGIdImD59Oo4fP47Vq1fD1dUVw4cPNz/H9DPy9PSs98/o4sWL2Lx5M+bNm4dnn33WvNzUG9MkMDAQzs7OFj0+Tf67LDo6GkIItGrVqlIjfn2MGjUK99xzD3bv3m0xfMd/RUdH45dffkHv3r2v2OhqjfeC6ed/+PDhK/aA7NixIzp27Ij//e9/2LlzJ3r37o0lS5bg+eefr3J7IQRWrlyJG264AdOnT6+0fv78+VixYkWNjbbVZbTF71ZjqO3xfeyxxyyGL7m8p3l1PxMfH58qJ4ZLSUmpf8ENEBERUavfNSIiIplxTFsiIiJS3QcffGAxM/p7772HioqKSmOJVmXYsGHYs2cPdu3aZV5WWFiIDz74AJGRkWjXrp15ube3N+Lj4/Hll19i1apV0Ol0GDlypMX+xowZg127dmHjxo2VXisnJwcVFRX1SFhZfHw8PD098eKLL1Y5K/z58+frvE9XV1cAqLJxpjGYegg+9thjGD16tMW/MWPGoG/fvha9CG+55RZotVp88cUXWLNmDW666Sa4ubmZ13fr1g3R0dF4/fXXUVBQUOn1avMzMvX6+28vv7feeqvSdgMHDsS3336L9PR08/KTJ09i/fr1Ftv+3//9H7RaLebNm1dpv0IIXLhw4Yp1Xc7d3R3vvfce5s6da9Fo/V9jxoyBXq83D+9xuYqKCovj7ubm1uD3weDBg+Hh4YGXXnoJJSUlFutMufPy8ir9TnTs2BEajQalpaXV7vv333/HqVOnMHXq1ErvldGjR+O2227D1q1bLY7Ff5neK//NaYvfrcZQ2+Pbrl07DBw40PyvW7du5u2qO+7R0dHIzc21GOIkIyMDa9eutXqO2oiPj8euXbtw4MAB87Ls7OwGjWVMRERkb9jTloiIiFRXVlaGAQMGYMyYMTh+/DgWL16M6667DiNGjLjic5944gl88cUXGDp0KGbOnAlfX18sW7YMycnJ+Prrryvd9nzbbbdh4sSJWLx4MeLj4yvdiv7oo4/i+++/x0033YQpU6agW7duKCwsxN9//42vvvoKp06dshhOob48PT3x3nvv4fbbb0fXrl0xduxYBAQE4PTp0/jxxx/Ru3dvLFq0qE77dHFxQbt27bB69Wq0bt0avr6+6NChQ41jklrTihUr0Llz50rj+ZqMGDECDzzwAPbv34+uXbsiMDAQN9xwA9544w3k5+dbTAgHABqNBh999BGGDh2K9u3bY+rUqWjRogXS0tKwdetWeHp6Yt26dTXW5Onpieuvvx6vvvoqysvL0aJFC/z8889ITk6utO3cuXPx888/o3fv3rjvvvug1+uxaNEidOjQwaJxKTo6Gs8//zyefPJJnDp1CiNHjoSHhweSk5Oxdu1aTJs2DbNnz67Tz66mcXtN+vbti3vuuQcvvfQSDhw4gMGDB8PR0REJCQlYs2YNFi5caB4Pt1u3bnjvvffw/PPPIyYmBoGBgdWO8VodT09PvPnmm7jrrrtw9dVXY/z48fDx8cHBgwdRVFSEZcuWYcuWLbj//vtx6623onXr1qioqMBnn30GrVaLW265pdp9r1ixAlqtFjfeeGOV60eMGIGnn34aq1atwsMPP1zlNp07d4ZWq8Urr7yC3NxcODk5oX///ggMDLT671ZjqMvxrU63bt3wyy+/4I033kBoaChatWqFnj17YuzYsXj88ccxatQozJw5E0VFRXjvvffQunVr82R8jemxxx7D559/jkGDBuGBBx6Am5sbPvroI7Rs2RLZ2dmq3zVARETUFLDRloiIiFS3aNEirFixAs8++yzKy8sxbtw4vP3227X64B4UFISdO3fi8ccfxzvvvIOSkhJcddVVWLduXZUNQiNGjICLi0uVjYSAsbfq9u3b8eKLL2LNmjVYvnw5PD090bp1a8ybN++KkyvVxfjx4xEaGoqXX34Zr732GkpLS9GiRQv06dOnXmN5AsBHH32EBx54ALNmzUJZWRnmzJnTKI22+/fvx7Fjx/DMM89Uu83w4cPxwAMP4PPPP0fXrl0BGBvRf/nlF3h4eFQ5HEa/fv2wa9cuzJ8/H4sWLUJBQQGCg4PRs2dP3HPPPbWqbeXKlXjggQfw7rvvQgiBwYMHY/369QgNDbXYrlu3bli/fj1mz56NZ555BuHh4Xjuuedw9OhR83AbJk888QRat26NN998E/PmzQNgnHxu8ODBtfqyob6WLFmCbt264f3338dTTz0FBwcHREZGYuLEiRbjCD/77LNISUnBq6++ivz8fPTt27fOjbYAcOeddyIwMBAvv/wy5s+fD0dHR8TFxZnHoO3UqRPi4+Oxbt06pKWlwdXVFZ06dcL69etxzTXXVLnP8vJyrFmzBtdee221E2h16NABrVq1wueff15to21wcDCWLFmCl156CXfeeSf0ej22bt2KwMBAm/xuNYbaHt/qvPHGG5g2bRr+97//obi4GJMnT0bPnj3h5+eHtWvX4uGHH8Zjjz2GVq1a4aWXXkJCQoIqjbbh4eHYunUrZs6ciRdffBEBAQGYMWMG3NzcMHPmTDg7Ozd6TURERE2NIjgiPBEREalk6dKlmDp1Kvbu3VunmdiJGtPIkSPxzz//VBoHl4is66GHHsL777+PgoKCRp18kYiIqCnimLZERERERJcUFxdbPE5ISMBPP/2Efv36qVMQkZ367+/ahQsX8Nlnn+G6665jgy0RERE4PAIRERERkVlUVBSmTJmCqKgopKSk4L333oNOp8Njjz2mdmlEdqVXr17o168f2rZti7Nnz+Ljjz9GXl5ejUOsEBERyYSNtkRERERElwwZMgRffPEFMjMz4eTkhF69euHFF19EbGys2qUR2ZVhw4bhq6++wgcffABFUdC1a1d8/PHHuP7669UujYiIqEngmLZERERERERERERETQjHtCUiIiIiIiIiIiJqQthoS0RERERERERERNSEcExbKzAYDEhPT4eHhwcURVG7HCIiIiIiIiIiImqChBDIz89HaGgoNJrq+9Oy0dYK0tPTER4ernYZRERERERERERE1AycOXMGYWFh1a5no60VeHh4ADD+sD09PVWuhoj+n717j7Ox3t8/fq0xJ6eZMRiMHNLIECE2TdppIzNRDlu7MOUYHZw26lsqhEqKqFS+FcmOtCu7jSISSYTIqZxNjjMOYcaYjDHz+f3Rz/o2OTRLa829Zn1ez8djPbZ13/darstt7j3e3fNZAAAAAAAA/igjI0NVqlRxzxMvhaGtF5xfEiEiIoKhLQAAAAAAAIDL+qMlVvkgMgAAAAAAAADwIwxtAQAAAAAAAMCPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAALPvpLwAAHijSURBVMCPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAAMCPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAAMCPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAAMCPBDsdAEWPa5TL6QhXzIw0TkcAAAAAAAAALos7bQEAAAAAAADAjzC0BQAAAAAAAAA/wtAWAAAAAAAAAPwIQ1sAAAAAAAAA8CMMbQEAAAAAAADAjzC0BQAAAAAAAAA/wtAWAAAAAAAAAPwIQ1sAAAAAAAAA8CMMbQEAAAAAAADAjzC0BQAAAAAAAAA/wtAWAAAAAAAAAPwIQ1sAAAAAAAAA8CMMbQEAAAAAAADAjzC0BQAAAAAAAAA/wtAWAAAAAAAAAPwIQ1sAAAAAAAAA8CMMbQEAAAAAAADAjzC0BQAAAAAAAAA/wtAWAAAAAAAAAPxIsNMBAAAAANcol9MR/hQz0jgdAQAAAAGEO20BAAAAAAAAwI8UuaHta6+9purVqys8PFxNmzbVmjVrLnnsDz/8oE6dOql69epyuVyaNGnSn35PAAAAAAAAAPClIjW0/eCDDzRkyBCNHDlS69evV/369ZWYmKgjR45c9PisrCzVqFFDzz//vCpWrOiV9wQAAAAAAAAAXypSQ9uXXnpJffr0Uc+ePVWnTh1NmTJFJUqU0LRp0y56/F/+8he9+OKL6ty5s8LCwrzyngAAAAAAAADgS0VmaHv27FmtW7dOrVq1cm8LCgpSq1attGrVqkJ9z+zsbGVkZOR7AAAAAAAAAIA3FJmh7bFjx5Sbm6sKFSrk216hQgWlpaUV6nuOHTtWkZGR7keVKlWu6PcHAAAAAAAAgN8rMkNbfzJs2DClp6e7H/v373c6EgAAAAAAAIAAEex0gIIqV66cihUrpsOHD+fbfvjw4Ut+yJiv3jMsLOySa+QCAAAAAAAAwJ9RZO60DQ0NVaNGjbRkyRL3try8PC1ZskQJCQl+854AAAAAAAAA8GcUmTttJWnIkCHq3r27GjdurCZNmmjSpEk6ffq0evbsKUnq1q2bKleurLFjx0r69YPGfvzxR/evDx48qA0bNqhUqVKKi4sr0HsCAAAAAAAAQGEqUkPbe+65R0ePHtWIESOUlpamBg0aaOHChe4PEtu3b5+Cgv7v5uFDhw6pYcOG7ufjx4/X+PHj1bx5cy1btqxA7wkAAAAAAAAAhclljDFOhyjqMjIyFBkZqfT0dEVERDgdx+dco1xOR7hiZiR/3QEA8EdF+fsLie8xAAAAUDAFnSMWmTVtAQAAAAAAAMAGDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP8LQFgAAAAAAAAD8CENbAAAAAAAAAPAjDG0BAAAAAAAAwI8wtAUAAAAAAAAAP+Lx0HbhwoVasWKF+/lrr72mBg0aqGvXrjpx4oRXwwEAAAAAAACAbTwe2j766KPKyMiQJG3evFlDhw5VmzZtlJKSoiFDhng9IAAAAAAAAADYJNjTF6SkpKhOnTqSpI8//lh33HGHnnvuOa1fv15t2rTxekAAAAAAAAAAsInHd9qGhoYqKytLkvTFF1+odevWkqTo6Gj3HbgAAAAAAAAAgCvj8Z22N998s4YMGaJmzZppzZo1+uCDDyRJO3bs0FVXXeX1gAAAAAAAAABgE4/vtJ08ebKCg4P10Ucf6Y033lDlypUlSQsWLFBSUpLXAwIAAAAAAACATTy+07Zq1aqaP3/+BdsnTpzolUAAAAAAAAAAYDOP77T97LPP9Pnnn1+wfdGiRVqwYIFXQgEAAAAAAACArTwe2j7++OPKzc29YHteXp4ef/xxr4QCAAAAAAAAAFt5PLTduXOn6tSpc8H2+Ph47dq1yyuhAAAAAAAAAMBWHg9tIyMjtWfPngu279q1SyVLlvRKKAAAAAAAAACwlcdD2/bt2+uf//yndu/e7d62a9cuDR06VO3atfNqOAAAAAAAAACwjcdD2xdeeEElS5ZUfHy8rr76al199dWqXbu2ypYtq/Hjx/siYz6vvfaaqlevrvDwcDVt2lRr1qy57PEffvih4uPjFR4ernr16umzzz7Lt79Hjx5yuVz5HklJSb6sAAAAAAAAAACXFOzpCyIjI7Vy5UotXrxYGzduVPHixXX99dfrlltu8UW+fD744AMNGTJEU6ZMUdOmTTVp0iQlJiZq+/btiomJueD4lStXqkuXLho7dqzuuOMOzZo1Sx06dND69etVt25d93FJSUl655133M/DwsJ83gUAAAAAAAAALsZljDFOhyiopk2b6i9/+YsmT54sScrLy1OVKlU0YMAAPf744xccf8899+j06dOaP3++e9uNN96oBg0aaMqUKZJ+vdP25MmT+uSTT644V0ZGhiIjI5Wenq6IiIgrfp+iwjXK5XSEK2ZGFpm/7gAAWKUof38h8T0GAAAACqagc8QC3Wn7yiuvqG/fvgoPD9crr7xy2WMHDhzoWdICOnv2rNatW6dhw4a5twUFBalVq1ZatWrVRV+zatUqDRkyJN+2xMTECwa0y5YtU0xMjMqUKaMWLVromWeeUdmyZS+ZJTs7W9nZ2e7nGRkZV9AIAAAAAAAAAC5UoKHtxIkTlZycrPDwcE2cOPGSx7lcLp8NbY8dO6bc3FxVqFAh3/YKFSpo27ZtF31NWlraRY9PS0tzP09KStLf//53XX311dq9e7eeeOIJ3X777Vq1apWKFSt20fcdO3asRo0a9ScbAQAAAAAAAMCFCjS0TUlJueivA0Hnzp3dv65Xr56uv/56XXPNNVq2bJlatmx50dcMGzYs3x28GRkZqlKlis+zAgAAAAAAAAh8QZ4cnJOTo2uuuUZbt271VZ5LKleunIoVK6bDhw/n23748GFVrFjxoq+pWLGiR8dLUo0aNVSuXDnt2rXrkseEhYUpIiIi3wMAAAAAAAAAvMGjoW1ISIjOnDnjqyyXFRoaqkaNGmnJkiXubXl5eVqyZIkSEhIu+pqEhIR8x0vS4sWLL3m8JB04cEA///yzKlWq5J3gAAAAAAAAAOABj4a2ktSvXz+NGzdO586d80WeyxoyZIjeeustvfvuu9q6daseeughnT59Wj179pQkdevWLd8HlQ0aNEgLFy7UhAkTtG3bNj399NP67rvv1L9/f0lSZmamHn30UX377bf66aeftGTJErVv315xcXFKTEws9H4AAAAAAAAAUKA1bX9r7dq1WrJkiRYtWqR69eqpZMmS+fbPmTPHa+F+75577tHRo0c1YsQIpaWlqUGDBlq4cKH7w8b27dunoKD/m0PfdNNNmjVrlp566ik98cQTqlmzpj755BPVrVtXklSsWDFt2rRJ7777rk6ePKnY2Fi1bt1aY8aMUVhYmM96AEBhc41yOR3hTzEjjdMRAAAAAAAoNC5jjEf/Ej5/V+ulvPPOO38qUFGUkZGhyMhIpaenW7G+bVEe/jD4ga2K8tetxNcuYAOuUwAAALBBQeeIHt9pa+NQFgAAAAAAAAAKi8dr2rZo0UInT568YHtGRoZatGjhjUwAAAAAAAAAYC2P77RdtmyZzp49e8H2M2fO6Ouvv/ZKKAAAAAAAAHgfSxIBRUOBh7abNm1y//rHH39UWlqa+3lubq4WLlyoypUrezcdAAAAAAAAAFimwEPbBg0ayOVyyeVyXXQZhOLFi+vVV1/1ajgAAAAAAAAAsE2Bh7YpKSkyxqhGjRpas2aNypcv794XGhqqmJgYFStWzCchAQAAAAAAAMAWBR7aVqtWTZKUl5fnszAAAAAAAAAAYLugK3nRv/71LzVr1kyxsbHau3evJGnixIn673//69VwAAAAAAAAAGAbj4e2b7zxhoYMGaI2bdro5MmTys3NlSSVKVNGkyZN8nY+AAAAAAAAALCKx0PbV199VW+99ZaefPLJfGvYNm7cWJs3b/ZqOAAAAAAAAACwjcdD25SUFDVs2PCC7WFhYTp9+rRXQgEAAAAAAACArTwe2l599dXasGHDBdsXLlyo2rVreyMTAAAAAAAAAFgr2NMXDBkyRP369dOZM2dkjNGaNWv0/vvva+zYsXr77bd9kREAAAAAAAAArOHx0Pb+++9X8eLF9dRTTykrK0tdu3ZVbGysXn75ZXXu3NkXGQEAAAAAAADAGh4PbSUpOTlZycnJysrKUmZmpmJiYrydCwAAAAAAAACsdEVD2/NKlCihEiVKeCsLAAAAAAAAAFivwEPbFi1aFOi4L7/88orDAAAAAAAAAIDtCjy0XbZsmapVq6a2bdsqJCTEl5kAAAAAAAAAwFoFHtqOGzdO77zzjj788EMlJyerV69eqlu3ri+zAQAAAAAAAIB1ggp64KOPPqoff/xRn3zyiU6dOqVmzZqpSZMmmjJlijIyMnyZEQAAAAAAAACsUeCh7XkJCQl66623lJqaqn79+mnatGmKjY1lcAsAAAAAAAAAXlDg5RF+b/369frqq6+0detW1a1bl3VuAQAAAABAkeMa5XI6wp9iRhqnIwDwAY/utD106JCee+45XXvttbrrrrsUHR2t1atX69tvv1Xx4sV9lREAAAAAAAAArFHgO23btGmjpUuXqnXr1nrxxRfVtm1bBQdf8Y26AAAAAAAAAICLKPDUdeHChapUqZL27dunUaNGadSoURc9bv369V4LBwAAAAAAAAC2KfDQduTIkb7MAQAAAAAAAAAQQ1sAAAAAAAAA8CsefRAZAAAAAAAAAMC3GNoCAAAAAAAAgB9haAsAAAAAAAAAfoShLQAAAAAAAAD4kQJ/EBkABBrXKJfTEa6YGWmcjgAAAAAAAHykwHfarlq1SvPnz8+3bcaMGbr66qsVExOjvn37Kjs72+sBAQAAAAAAAMAmBR7ajh49Wj/88IP7+ebNm9W7d2+1atVKjz/+uObNm6exY8f6JCQAAAAAAAAA2KLAyyNs2LBBY8aMcT+fPXu2mjZtqrfeekuSVKVKFY0cOVJPP/2010MCAAAAQFFQlJdfkliCCQAAf1HgO21PnDihChUquJ9/9dVXuv32293P//KXv2j//v3eTQcAAAAAAAAAlinw0LZChQpKSUmRJJ09e1br16/XjTfe6N5/6tQphYSEeD8hAAAAAAAAAFikwMsjtGnTRo8//rjGjRunTz75RCVKlNBf//pX9/5Nmzbpmmuu8UlIAAAAAEUTywUAAAB4rsBD2zFjxujvf/+7mjdvrlKlSundd99VaGioe/+0adPUunVrn4QEAAAAAAAAAFsUeGhbrlw5LV++XOnp6SpVqpSKFSuWb/+HH36o0qVLez0gAAAAAAAAANikwGvanhcZGXnBwNYYo9WrV6tLly5eCwYAAAAAAAAANvJ4aPtbKSkpGj58uKpWraqOHTvqzJkz3soFAAAAAAAAAFYq8PII52VnZ+ujjz7S1KlTtWLFCuXm5mr8+PHq3bu3IiIifJERAAAAAAAAAKxR4Dtt161bp4cfflgVK1bUpEmT1KFDB+3fv19BQUFKTEwstIHta6+9purVqys8PFxNmzbVmjVrLnv8hx9+qPj4eIWHh6tevXr67LPP8u03xmjEiBGqVKmSihcvrlatWmnnzp2+rAAAAAAAAAAAl1TgoW3Tpk0VFhamb7/9VmvXrtXAgQNVoUIFX2a7wAcffKAhQ4Zo5MiRWr9+verXr6/ExEQdOXLkosevXLlSXbp0Ue/evfX999+rQ4cO6tChg7Zs2eI+5oUXXtArr7yiKVOmaPXq1SpZsqQSExNZ6gEAAAAAAACAIwo8tG3ZsqWmTp2q0aNHa+HChTLG+DLXRb300kvq06ePevbsqTp16mjKlCkqUaKEpk2bdtHjX375ZSUlJenRRx9V7dq1NWbMGN1www2aPHmypF/vsp00aZKeeuoptW/fXtdff71mzJihQ4cO6ZNPPinEZgAAAAAAAADwqwKvafv5559r//79euedd/TQQw/pl19+0T333CNJcrlcPgt43tmzZ7Vu3ToNGzbMvS0oKEitWrXSqlWrLvqaVatWaciQIfm2JSYmugeyKSkpSktLU6tWrdz7IyMj1bRpU61atUqdO3e+6PtmZ2crOzvb/TwjI0OStGHDBpUqVeqK+hUph5wOcOXWr1/vdAT4E5v+LhfhrhJfu4AVuE4FLtvOrW19gUBg29etbX0BP5OZmVmwA80VWrRokenSpYsJDw83NWvWNMOGDTPr1q270rf7QwcPHjSSzMqVK/Ntf/TRR02TJk0u+pqQkBAza9asfNtee+01ExMTY4wx5ptvvjGSzKFDh/Id849//MPcfffdl8wycuRII4kHDx48ePDgwYMHDx48ePDgwYMHDx48PH6kp6dfdhZa4Dttf++2227TbbfdphMnTui9997TtGnTNG7cOOXm5l7pWxYZw4YNy3cHb0ZGhqpUqaKvvvrKjjttEbAa/W8jpyP8KeseWOd0BADwqqJ8XeaafHmcW6DoKcpft5LnX7u29UXgKsp/l/l7HJgyMzPVvHnzPzzuioe255UpU0YDBgzQgAEDfHqLerly5VSsWDEdPnw43/bDhw+rYsWKF31NxYoVL3v8+f89fPiwKlWqlO+YBg0aXDJLWFiYwsLCLtjeoEEDRUREFKgP4JdinQ7w59xwww1ORwAA7yrC12WuyX+AcwsUPUX461a6gq9d2/oicBXhv8v8PQ5M55dZ/SMF/iCy37/pZ599prlz57ofn332mU//MoWGhqpRo0ZasmSJe1teXp6WLFmihISEi74mISEh3/GStHjxYvfxV199tSpWrJjvmIyMDK1evfqS7wkAAAAAAAAAvlTgO23nz5+v4cOH6/vvv5ck3XPPPTp9+rR7v8vl0gcffKC77rrL+yn/vyFDhqh79+5q3LixmjRpokmTJun06dPq2bOnJKlbt26qXLmyxo4dK0kaNGiQmjdvrgkTJqht27aaPXu2vvvuO7355pvuzP/85z/1zDPPqGbNmrr66qs1fPhwxcbGqkOHDj7rAQAAAAAAAACXUuCh7ZtvvqkBAwbk27Zr1y7VqFFDkvTCCy9o2rRpPh3a3nPPPTp69KhGjBihtLQ0NWjQQAsXLlSFChUkSfv27VNQ0P/dPHzTTTdp1qxZeuqpp/TEE0+oZs2a+uSTT1S3bl33Mf/zP/+j06dPq2/fvjp58qRuvvlmLVy4UOHh4T7rAQAAAAAAAACX4jLGmIIcePXVV2vhwoWqVauWJKl06dLauHGje2i7efNmtWzZUkeOHPFdWj+VkZGhyMhIpaens6YtijTXKJfTEf4UM7JAlzMAKDKK8nWZazKAQFOUr8mS59dl2/oicBXlv8v8PQ5MBZ0jFnhN29TU1HwfvrV06VJVqVLF/bxUqVJKT0+/wrgAAAAAAAAAAMmDoW10dLR27drlft64cWOFhIS4n+/cuVPR0dHeTQcAAAAAAAAAlinw0PaWW27RK6+8csn9r7zyim655RavhAIAAAAAAAAAWxV4aPvYY49p0aJF+sc//qG1a9cqPT1d6enpWrNmjTp16qQvvvhCjz32mC+zAgAAAAAAAEDACy7ogQ0bNtQHH3yg+++/X3PmzMm3r0yZMpo9e7ZuuOEGrwcEAAAAAAAAAJsUeGgrSe3bt9dtt92mzz//XDt37pQk1axZU61bt1bJkiV9EhAAAAAAAAAAbOLR0FaSSpQooY4dO+bbZozRggULNHXqVH300UdeCwcAAAAAAAAAtinwmrYXk5KSouHDh6tq1arq2LGjzpw5461cAAAAAAAAAGAlj++0zc7O1kcffaSpU6dqxYoVys3N1fjx49W7d29FRET4IiMAAAAAAAAAWKPAd9quW7dODz/8sCpWrKhJkyapQ4cO2r9/v4KCgpSYmMjAFgAAAAAAAAC8oMB32jZt2lQDBgzQt99+q1q1avkyEwAAAAAAAABYq8BD25YtW2rq1Kk6cuSI7rvvPiUmJsrlcvkyGwAAAAAAAABYp8DLI3z++ef64YcfVKtWLT300EOqVKmSBg0aJEkMbwEAAAAAAADASzz6ILIqVapoxIgRGjFihBYvXqx33nlHwcHBat++ve666y516tRJjRo18lVWAAAAAABQCMxI43QEALBage+0/b3bbrtNs2bN0qFDhzRgwAAtWLBATZo08WY2AAAAAAAAALDOFQ9tzytTpowGDBig77//XmPHjvVGJgAAAAAAAACwlkdD23PnzmnLli3asWNHvu3//e9/1aBBAw0fPtyr4QAAAAAAAADANgUe2m7ZskVxcXGqX7++ateurb///e86fPiwmjdvrl69eikpKUm7d+/2ZVYAAAAAAAAACHgF/iCyxx57THFxcZo8ebLef/99vf/++9q6dat69+6thQsXqnjx4r7MCQAAAAAAAABWKPDQdu3atVq0aJEaNGigv/71r3r//ff1xBNP6L777vNlPgAAAAAAAACwSoGXRzh27JhiY2MlSZGRkSpZsqRuvPFGnwUDAAAAAAAAABsV+E5bl8ulU6dOKTw8XMYYuVwu/fLLL8rIyMh3XEREhNdDAgAAAAAAAIAtCjy0Ncbo2muvzfe8YcOG+Z67XC7l5uZ6NyEAAAAAAAAAWKTAQ9ulS5f6MgcAAAAAAAAAQB4MbZs3b+7LHAAAAAAAAAAAefBBZAAAAAAAAAAA32NoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAHwn29AWnT5/W888/ryVLlujIkSPKy8vLt3/Pnj1eCwcAAAAAAAAAtvF4aHv//ffrq6++0n333adKlSrJ5XL5IhcAAAAAAAAAWMnjoe2CBQv06aefqlmzZr7IAwAAAAAAAABW83hN2zJlyig6OtoXWQAAAAAAAADAeh4PbceMGaMRI0YoKyvLF3kAAAAAAAAAwGoeL48wYcIE7d69WxUqVFD16tUVEhKSb//69eu9Fg4AAAAAAAAAbOPx0LZDhw4+iAEAAAAAAAAAkK5gaDty5Ehf5AAAAAAAAAAA6ArWtAUAAAAAAAAA+E6B7rSNjo7Wjh07VK5cOZUpU0Yul+uSxx4/ftxr4QAAAAAAAADANgUa2k6cOFGlS5eWJE2aNMmXeQAAAAAAAADAagUa2nbv3v2ivwYAAAAAAAAAeBdr2gIAAAAAAACAHykyQ9vjx48rOTlZERERioqKUu/evZWZmXnZ15w5c0b9+vVT2bJlVapUKXXq1EmHDx/Od4zL5brgMXv2bF9WAQAAAAAAAIBLKjJD2+TkZP3www9avHix5s+fr+XLl6tv376Xfc3gwYM1b948ffjhh/rqq6906NAh/f3vf7/guHfeeUepqanuR4cOHXzUAgAAAAAAAAAur0Br2jpt69atWrhwodauXavGjRtLkl599VW1adNG48ePV2xs7AWvSU9P19SpUzVr1iy1aNFC0q/D2dq1a+vbb7/VjTfe6D42KipKFStWLJwyAAAAAAAAAHAZHt9p26tXL506deqC7adPn1avXr28Eur3Vq1apaioKPfAVpJatWqloKAgrV69+qKvWbdunXJyctSqVSv3tvj4eFWtWlWrVq3Kd2y/fv1Urlw5NWnSRNOmTZMx5rJ5srOzlZGRke8BAAAAAAAAAN7g8dD23Xff1S+//HLB9l9++UUzZszwSqjfS0tLU0xMTL5twcHBio6OVlpa2iVfExoaqqioqHzbK1SokO81o0eP1r///W8tXrxYnTp10sMPP6xXX331snnGjh2ryMhI96NKlSpXVgwAAAAAAAAAfqfAyyNkZGTIGCNjjE6dOqXw8HD3vtzcXH322WcXDFb/yOOPP65x48Zd9pitW7d69J6eGj58uPvXDRs21OnTp/Xiiy9q4MCBl3zNsGHDNGTIEPfzjIwMBrcAAAAAAAAAvKLAQ9uoqCi5XC65XC5de+21F+x3uVwaNWqUR7/50KFD1aNHj8seU6NGDVWsWFFHjhzJt/3cuXM6fvz4JdeirVixos6ePauTJ0/mu9v28OHDl12/tmnTphozZoyys7MVFhZ20WPCwsIuuQ8AAAAAAAAA/owCD22XLl0qY4xatGihjz/+WNHR0e59oaGhqlat2kU/EOxyypcvr/Lly//hcQkJCTp58qTWrVunRo0aSZK+/PJL5eXlqWnTphd9TaNGjRQSEqIlS5aoU6dOkqTt27dr3759SkhIuOTvtWHDBpUpU4ahLAAAAAAAAABHFHho27x5c0lSSkqKqlSpoqAgj5fDvWK1a9dWUlKS+vTpoylTpignJ0f9+/dX586d3YPigwcPqmXLlpoxY4aaNGmiyMhI9e7dW0OGDFF0dLQiIiI0YMAAJSQk6MYbb5QkzZs3T4cPH9aNN96o8PBwLV68WM8995weeeSRQusGAAAAAAAAAL9V4KHtedWqVdPJkye1Zs0aHTlyRHl5efn2d+vWzWvhfmvmzJnq37+/WrZsqaCgIHXq1EmvvPKKe39OTo62b9+urKws97aJEye6j83OzlZiYqJef/119/6QkBC99tprGjx4sIwxiouL00svvaQ+ffr4pAMAAAAAAAAA/BGXMcZ48oJ58+YpOTlZmZmZioiIkMvl+r83c7l0/Phxr4f0dxkZGYqMjFR6eroiIiKcjgNcMdco1x8f5MfMSI8uZwDg94rydZlrMoBAU5SvyRLXZdirKH/t8nUbmAo6R/R4jYOhQ4eqV69eyszM1MmTJ3XixAn3w8aBLQAAAAAAAAB4k8fLIxw8eFADBw5UiRIlfJEHAAAA/x93VwAAAAB28vhO28TERH333Xe+yAIAAAAAAAAA1vP4Ttu2bdvq0Ucf1Y8//qh69eopJCQk3/527dp5LRwAAAAAAAAA2MbjoW2fPn0kSaNHj75gn8vlUm5u7p9PBQAAAAAAAACW8nhom5eX54scAAAAAAAAAABdwZq2v3XmzBlv5QAAAAAAAAAA6AqGtrm5uRozZowqV66sUqVKac+ePZKk4cOHa+rUqV4PCAAAAAAAAAA28Xho++yzz2r69Ol64YUXFBoa6t5et25dvf32214NBwAAAAAAAAC28XhoO2PGDL355ptKTk5WsWLF3Nvr16+vbdu2eTUcAAAAAAAAANjG46HtwYMHFRcXd8H2vLw85eTkeCUUAAAAAAAAANjK46FtnTp19PXXX1+w/aOPPlLDhg29EgoAAAAAAAAAbBXs6QtGjBih7t276+DBg8rLy9OcOXO0fft2zZgxQ/Pnz/dFRgAAAAAAAACwhsd32rZv317z5s3TF198oZIlS2rEiBHaunWr5s2bp9tuu80XGQEAAAAAAADAGh7faStJf/3rX7V48WJvZwEAAAAAAAAA613R0Pa8zMxM5eXl5dsWERHxpwIBAAAAAAAAgM08Xh4hJSVFbdu2VcmSJRUZGakyZcqoTJkyioqKUpkyZXyREQAAAAAAAACs4fGdtvfee6+MMZo2bZoqVKggl8vli1wAAAAAAAAAYCWPh7YbN27UunXrVKtWLV/kAQAAAAAAAACrebw8wl/+8hft37/fF1kAAAAAAAAAwHoe32n79ttv68EHH9TBgwdVt25dhYSE5Nt//fXXey0cAAAAAAAAANjG46Ht0aNHtXv3bvXs2dO9zeVyyRgjl8ul3NxcrwYEAAAAAAAAAJt4PLTt1auXGjZsqPfff58PIgMAAAAAAAAAL/N4aLt3717NnTtXcXFxvsgDAAAAAAAAAFbz+IPIWrRooY0bN/oiCwAAAAAAAABYz+M7be+8804NHjxYmzdvVr169S74ILJ27dp5LRwAAAAAAAAA2Mbjoe2DDz4oSRo9evQF+/ggMgAAAAAAAAD4czwe2ubl5fkiBwA/YEYapyMAAAAAAABYz+M1bX/rzJkz3soBAAAAAAAAANAVDG1zc3M1ZswYVa5cWaVKldKePXskScOHD9fUqVO9HhAAAAAAAAAAbOLx0PbZZ5/V9OnT9cILLyg0NNS9vW7dunr77be9Gg4AAAAAAAAAbOPx0HbGjBl68803lZycrGLFirm3169fX9u2bfNqOAAAAAAAAACwjcdD24MHDyouLu6C7Xl5ecrJyfFKKAAAAAAAAACwlcdD2zp16ujrr7++YPtHH32khg0beiUUAAAAAAAAANgq2NMXjBgxQt27d9fBgweVl5enOXPmaPv27ZoxY4bmz5/vi4wAAAAAAAAAYA2P77Rt37695s2bpy+++EIlS5bUiBEjtHXrVs2bN0+33XabLzICAAAAAAAAgDU8vtNWkv76179q8eLF3s4CAAAAAAAAANbz+E7b+++/X8uWLfNBFAAAAAAAAACAx0Pbo0ePKikpSVWqVNGjjz6qDRs2+CAWAAAAAAAAANjJ46Htf//7X6Wmpmr48OFau3atGjVqpOuuu07PPfecfvrpJx9EBAAAAAAAAAB7eDy0laQyZcqob9++WrZsmfbu3asePXroX//6l+Li4rydDwAAAAAAAACsckVD2/NycnL03XffafXq1frpp59UoUIFb+UCAAAAAAAAACtd0dB26dKl6tOnjypUqKAePXooIiJC8+fP14EDB7ydz+348eNKTk5WRESEoqKi1Lt3b2VmZl72NW+++aZuvfVWRUREyOVy6eTJk155XwAAAAAAAADwFY+HtpUrV1abNm107Ngxvfnmmzp8+LCmTZumli1byuVy+SKjJCk5OVk//PCDFi9erPnz52v58uXq27fvZV+TlZWlpKQkPfHEE159XwAAAAAAAADwFZcxxnjygrfeekv/+Mc/FBUV5aNIF9q6davq1KmjtWvXqnHjxpKkhQsXqk2bNjpw4IBiY2Mv+/ply5bpb3/7m06cOJEv95993/MyMjIUGRmp9PR0RUREXFlJAAAAAIDfco3y3U1KhcGM9Oif/kDAKMpfu3zdBqaCzhE9vtO2T58+7sHngQMHfLokwnmrVq1SVFSUe7AqSa1atVJQUJBWr17td+8LAAAAAAAAAFfK46FtXl6eRo8ercjISFWrVk3VqlVTVFSUxowZo7y8PF9kVFpammJiYvJtCw4OVnR0tNLS0gr9fbOzs5WRkZHvAQAAAAAAAADe4PHQ9sknn9TkyZP1/PPP6/vvv9f333+v5557Tq+++qqGDx/u0Xs9/vjjcrlcl31s27bN04g+N3bsWEVGRrofVapUcToSAAAAAAAAgAAR7OkL3n33Xb399ttq166de9v111+vypUr6+GHH9azzz5b4PcaOnSoevTocdljatSooYoVK+rIkSP5tp87d07Hjx9XxYoVPcr/W1f6vsOGDdOQIUPczzMyMhjcAgAAAAAAAPAKj4e2x48fV3x8/AXb4+Pjdfz4cY/eq3z58ipfvvwfHpeQkKCTJ09q3bp1atSokSTpyy+/VF5enpo2berR7+mN9w0LC1NYWNgV/74AAAAAAAAAcCkeL49Qv359TZ48+YLtkydPVv369b0S6vdq166tpKQk9enTR2vWrNE333yj/v37q3PnzoqNjZUkHTx4UPHx8VqzZo37dWlpadqwYYN27dolSdq8ebM2bNjgHi4X5H0BAAAAAAAAoDB5fKftCy+8oLZt2+qLL75QQkKCJGnVqlXav3+/PvvsM68HPG/mzJnq37+/WrZsqaCgIHXq1EmvvPKKe39OTo62b9+urKws97YpU6Zo1KhR7ue33HKLJOmdd95xL8vwR+8LAAAAAAAAAIXJZYwxnr7o0KFDeu2119wfEla7dm09/PDD1t6dmpGRocjISKWnpysiIsLpOAAAAAAAL3ONcjkd4U8xIz3+pz8QEIry1y5ft4GpoHNEj+60zcnJUVJSkqZMmeLRB44BAAAAAAAAAArGozVtQ0JCtGnTJl9lAQAAAAAAAADrefxBZPfee6+mTp3qiywAAAAAAAAAYD2PP4js3LlzmjZtmr744gs1atRIJUuWzLf/pZde8lo4AAAAAAAAALCNx0PbLVu26IYbbpAk7dixI98+l6voLu4MAAAAAAAAAP7A46Ht0qVLfZEDAAAAAAAAAKArWNP2t/bv36/9+/d7KwsAAAAAAAAAWM/joe25c+c0fPhwRUZGqnr16qpevboiIyP11FNPKScnxxcZAQAAAAAAAMAaHi+PMGDAAM2ZM0cvvPCCEhISJEmrVq3S008/rZ9//llvvPGG10MCAAAAAAAAgC08HtrOmjVLs2fP1u233+7edv3116tKlSrq0qULQ1sAAAAAAAAA+BM8Xh4hLCxM1atXv2D71VdfrdDQUG9kAgAAAAAAAABreTy07d+/v8aMGaPs7Gz3tuzsbD377LPq37+/V8MBAAAAAAAAgG08Xh7h+++/15IlS3TVVVepfv36kqSNGzfq7Nmzatmypf7+97+7j50zZ473kgIAAAAAAACABTwe2kZFRalTp075tlWpUsVrgQAAAAAAAADAZh4Pbd955x1f5AAAAAAAAAAA6ArWtAUAAAAAAAAA+I7Hd9r+/PPPGjFihJYuXaojR44oLy8v3/7jx497LRwAAAAAAAAA2Mbjoe19992nXbt2qXfv3qpQoYJcLpcvcgEAAAAAAACAlTwe2n799ddasWKF6tev74s8AAAAAAAAAGA1j9e0jY+P1y+//OKLLAAAAAAAAABgPY+Htq+//rqefPJJffXVV/r555+VkZGR7wEAAAAAAAAAuHIeL48QFRWljIwMtWjRIt92Y4xcLpdyc3O9Fg4AAAAAAAAAbOPx0DY5OVkhISGaNWsWH0QGAAAAAAAAAF7m8dB2y5Yt+v7771WrVi1f5AEAAAAAAAAAq3m8pm3jxo21f/9+X2QBAAAAAAAAAOt5fKftgAEDNGjQID366KOqV6+eQkJC8u2//vrrvRYOAAAAAAAAAGzj8dD2nnvukST16tXLvc3lcvFBZAAAAAAAAADgBR4PbVNSUnyRAwAAAAAAAACgKxjaVqtWzRc5AAAAAAAAAADyYGg7d+7cAh3Xrl27Kw4DAAAAAAAAALYr8NC2Q4cOf3gMa9oCAAAAAAAAwJ9T4KFtXl6eL3MAAAAAAAAAACQFOR0AAAAAAAAAAPB/GNoCAAAAAAAAgB9haAsAAAAAAAAAfoShLQAAAAAAAAD4EYa2AAAAAAAAAOBHrmhoe/LkSb399tsaNmyYjh8/Lklav369Dh486NVwAAAAAAAAAGCbYE9fsGnTJrVq1UqRkZH66aef1KdPH0VHR2vOnDnat2+fZsyY4YucAAAAAAAAAGAFj++0HTJkiHr06KGdO3cqPDzcvb1NmzZavny5V8MBAAAAAAAAgG08HtquXbtWDzzwwAXbK1eurLS0NK+EAgAAAAAAAABbeTy0DQsLU0ZGxgXbd+zYofLly3slFAAAAAAAAADYyuOhbbt27TR69Gjl5ORIklwul/bt26fHHntMnTp18npAAAAAAAAAALCJx0PbCRMmKDMzUzExMfrll1/UvHlzxcXFqXTp0nr22Wd9kVGSdPz4cSUnJysiIkJRUVHq3bu3MjMzL/uaN998U7feeqsiIiLkcrl08uTJC46pXr26XC5Xvsfzzz/voxYAAAAAAAAAcHnBnr4gMjJSixcv1ooVK7Rp0yZlZmbqhhtuUKtWrXyRzy05OVmpqalavHixcnJy1LNnT/Xt21ezZs265GuysrKUlJSkpKQkDRs27JLHjR49Wn369HE/L126tFezAwAAAAAAAEBBeTy0Pe/mm2/WzTff7M0sl7R161YtXLhQa9euVePGjSVJr776qtq0aaPx48crNjb2oq/75z//KUlatmzZZd+/dOnSqlixojcjAwAAAAAAAMAV8Xho+8orr1x0u8vlUnh4uOLi4nTLLbeoWLFifzrceatWrVJUVJR7YCtJrVq1UlBQkFavXq2OHTv+qfd//vnnNWbMGFWtWlVdu3bV4MGDFRx8xfNsAAAAAAAAALhiHk8mJ06cqKNHjyorK0tlypSRJJ04cUIlSpRQqVKldOTIEdWoUUNLly5VlSpVvBIyLS1NMTEx+YMHBys6OlppaWl/6r0HDhyoG264QdHR0Vq5cqWGDRum1NRUvfTSS5d8TXZ2trKzs93PMzIy/lQGAAAAAAAAADjP4w8ie+655/SXv/xFO3fu1M8//6yff/5ZO3bsUNOmTfXyyy9r3759qlixogYPHvyH7/X4449f8CFgv39s27btiooV1JAhQ3Trrbfq+uuv14MPPqgJEybo1VdfzTeU/b2xY8cqMjLS/fDWcBoAAAAAAAAAPL7T9qmnntLHH3+sa665xr0tLi5O48ePV6dOnbRnzx698MIL6tSp0x++19ChQ9WjR4/LHlOjRg1VrFhRR44cybf93LlzOn78uNfXom3atKnOnTunn376SbVq1broMcOGDdOQIUPczzMyMhjcAgAAAAAAAPAKj4e2qampOnfu3AXbz507516qIDY2VqdOnfrD9ypfvrzKly//h8clJCTo5MmTWrdunRo1aiRJ+vLLL5WXl6emTZt62ODyNmzYoKCgoAuWY/itsLAwhYWFefX3BQAAAAAAAADpCpZH+Nvf/qYHHnhA33//vXvb999/r4ceekgtWrSQJG3evFlXX32110LWrl1bSUlJ6tOnj9asWaNvvvlG/fv3V+fOnRUbGytJOnjwoOLj47VmzRr369LS0rRhwwbt2rXLnWvDhg06fvy4pF8/4GzSpEnauHGj9uzZo5kzZ2rw4MG699573ev1AgAAAAAAAEBh8nhoO3XqVEVHR6tRo0buO04bN26s6OhoTZ06VZJUqlQpTZgwwatBZ86cqfj4eLVs2VJt2rTRzTffrDfffNO9PycnR9u3b1dWVpZ725QpU9SwYUP16dNHknTLLbeoYcOGmjt3rqRf75idPXu2mjdvruuuu07PPvusBg8enO99AQAAAAAAAKAwuYwx5kpeuG3bNu3YsUOSVKtWrUuu/2qDjIwMRUZGKj09XREREU7HAQAAAAB4mWuUy+kIf4oZeUX/9AeKvKL8tcvXbWAq6BzR4zVtz4uPj1d8fPyVvhwAAAAAAAAAcBFXNLQ9cOCA5s6dq3379uns2bP59r300kteCQYAAAAAAAAANvJ4aLtkyRK1a9dONWrU0LZt21S3bl399NNPMsbohhtu8EVGAAAAAAAAALCGxx9ENmzYMD3yyCPavHmzwsPD9fHHH2v//v1q3ry5/vGPf/giIwAAAAAAAABYw+Oh7datW9WtWzdJUnBwsH755ReVKlVKo0eP1rhx47weEAAAAAAAAABs4vHQtmTJku51bCtVqqTdu3e79x07dsx7yQAAAAAAAADAQh6vaXvjjTdqxYoVql27ttq0aaOhQ4dq8+bNmjNnjm688UZfZAQAAAAAAAAAa3g8tH3ppZeUmZkpSRo1apQyMzP1wQcfqGbNmnrppZe8HhAAAAAAAAAAbOLR0DY3N1cHDhzQ9ddfL+nXpRKmTJnik2AAAAAAAAAAYCOP1rQtVqyYWrdurRMnTvgqDwAAAAAAAABYzeMPIqtbt6727NnjiywAAAAAAAAAYD2Ph7bPPPOMHnnkEc2fP1+pqanKyMjI9wAAAAAAAAAAXDmPP4isTZs2kqR27drJ5XK5txtj5HK5lJub6710AAAAAAAAAGAZj4e2S5cu9UUOAAAAAAD8lhlpnI4AALCIx0Pb5s2b+yIHAAAAAAAAAEBXsKatJH399de69957ddNNN+ngwYOSpH/9619asWKFV8MBAAAAAAAAgG08Htp+/PHHSkxMVPHixbV+/XplZ2dLktLT0/Xcc895PSAAAAAAAAAA2MTjoe0zzzyjKVOm6K233lJISIh7e7NmzbR+/XqvhgMAAAAAAAAA23g8tN2+fbtuueWWC7ZHRkbq5MmT3sgEAAAAAAAAANbyeGhbsWJF7dq164LtK1asUI0aNbwSCgAAAAAAAABs5fHQtk+fPho0aJBWr14tl8ulQ4cOaebMmXrkkUf00EMP+SIjAAAAAAAAAFgj2NMXPP7448rLy1PLli2VlZWlW265RWFhYXrkkUc0YMAAX2QEAAAAAAAAAGu4jDHmSl549uxZ7dq1S5mZmapTp45KlSrl7WxFRkZGhiIjI5Wenq6IiAin4wAAAAAAAECSa5TL6QhXzIy8opEd/FxB54geL4/w3nvvKSsrS6GhoapTp46aNGli9cAWAAAAAAAAALzJ46Ht4MGDFRMTo65du+qzzz5Tbm6uL3IBAAAAAAAAgJU8HtqmpqZq9uzZcrlcuvvuu1WpUiX169dPK1eu9EU+AAAAAAAAALCKx0Pb4OBg3XHHHZo5c6aOHDmiiRMn6qefftLf/vY3XXPNNb7ICAAAAAAAAADWCP4zLy5RooQSExN14sQJ7d27V1u3bvVWLgAAAAAAAACwksd32kpSVlaWZs6cqTZt2qhy5cqaNGmSOnbsqB9++MHb+QAAAAAAAADAKh7fadu5c2fNnz9fJUqU0N13363hw4crISHBF9kAAAAAAAAAwDoeD22LFSumf//730pMTFSxYsXy7duyZYvq1q3rtXAAAAAAAAAAYBuPh7YzZ87M9/zUqVN6//339fbbb2vdunXKzc31WjgAAAAAAAAAsM0VrWkrScuXL1f37t1VqVIljR8/Xi1atNC3337rzWwAAAAAAAAAYB2P7rRNS0vT9OnTNXXqVGVkZOjuu+9Wdna2PvnkE9WpU8dXGQEAAAAAAADAGgW+0/bOO+9UrVq1tGnTJk2aNEmHDh3Sq6++6stsAAAAAAAAAGCdAt9pu2DBAg0cOFAPPfSQatas6ctMAAAAAAAAAGCtAt9pu2LFCp06dUqNGjVS06ZNNXnyZB07dsyX2QAAAAAAAADAOgUe2t5444166623lJqaqgceeECzZ89WbGys8vLytHjxYp06dcqXOQEAAAAAAADACgUe2p5XsmRJ9erVSytWrNDmzZs1dOhQPf/884qJiVG7du18kREAAAAAAAAArOHx0Pa3atWqpRdeeEEHDhzQ+++/761MAAAAAAAAAGCtPzW0Pa9YsWLq0KGD5s6d6423AwAAAAAAAABreWVoCwAAAAAAAADwDoa2AAAAAAAAAOBHiszQ9vjx40pOTlZERISioqLUu3dvZWZmXvb4AQMGqFatWipevLiqVq2qgQMHKj09Pd9x+/btU9u2bVWiRAnFxMTo0Ucf1blz53xdBwAAAAAAAAAuKtjpAAWVnJys1NRULV68WDk5OerZs6f69u2rWbNmXfT4Q4cO6dChQxo/frzq1KmjvXv36sEHH9ShQ4f00UcfSZJyc3PVtm1bVaxYUStXrlRqaqq6deumkJAQPffcc4VZDwAAAAAAAAAkSS5jjHE6xB/ZunWr6tSpo7Vr16px48aSpIULF6pNmzY6cOCAYmNjC/Q+H374oe69916dPn1awcHBWrBgge644w4dOnRIFSpUkCRNmTJFjz32mI4eParQ0NACvW9GRoYiIyOVnp6uiIiIKysJAAAAAAAAr3KNcjkd4YqZkX4/ssMVKOgcsUgsj7Bq1SpFRUW5B7aS1KpVKwUFBWn16tUFfp/zfxjBwcHu961Xr557YCtJiYmJysjI0A8//OC9AgAAAAAAAABQQEVieYS0tDTFxMTk2xYcHKzo6GilpaUV6D2OHTumMWPGqG/fvvne97cDW0nu55d73+zsbGVnZ7ufZ2RkFCgDAAAAAAAAAPwRR++0ffzxx+VyuS772LZt25/+fTIyMtS2bVvVqVNHTz/99J9+v7FjxyoyMtL9qFKlyp9+TwAAAAAAAACQHL7TdujQoerRo8dlj6lRo4YqVqyoI0eO5Nt+7tw5HT9+XBUrVrzs60+dOqWkpCSVLl1a//nPfxQSEuLeV7FiRa1Zsybf8YcPH3bvu5Rhw4ZpyJAh7ucZGRkMbgEAAAAAAAB4haND2/Lly6t8+fJ/eFxCQoJOnjypdevWqVGjRpKkL7/8Unl5eWratOklX5eRkaHExESFhYVp7ty5Cg8Pv+B9n332WR05csS9/MLixYsVERGhOnXqXPJ9w8LCFBYWVpCKAAAAAAAAAOCRIvFBZLVr11ZSUpL69OmjNWvW6JtvvlH//v3VuXNnxcbGSpIOHjyo+Ph4952zGRkZat26tU6fPq2pU6cqIyNDaWlpSktLU25uriSpdevWqlOnju677z5t3LhRn3/+uZ566in169ePoSwAAAAAAAAARxSJDyKTpJkzZ6p///5q2bKlgoKC1KlTJ73yyivu/Tk5Odq+fbuysrIkSevXr9fq1aslSXFxcfneKyUlRdWrV1exYsU0f/58PfTQQ0pISFDJkiXVvXt3jR49uvCKAQAAAAAAAMBvuIwxxukQRV1GRoYiIyOVnp6uiIgIp+MAAAAAAABAkmuUy+kIV8yMZGQXiAo6RywSyyMAAAAAAAAAgC0Y2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+hKEtAAAAAAAAAPgRhrYAAAAAAAAA4EcY2gIAAAAAAACAH2FoCwAAAAAAAAB+pMgMbY8fP67k5GRFREQoKipKvXv3VmZm5mWPHzBggGrVqqXixYuratWqGjhwoNLT0/Md53K5LnjMnj3b13UAAAAAAAAA4KKCnQ5QUMnJyUpNTdXixYuVk5Ojnj17qm/fvpo1a9ZFjz906JAOHTqk8ePHq06dOtq7d68efPBBHTp0SB999FG+Y9955x0lJSW5n0dFRfmyCgAAAAAAAABckssYY5wO8Ue2bt2qOnXqaO3atWrcuLEkaeHChWrTpo0OHDig2NjYAr3Phx9+qHvvvVenT59WcPCv82qXy6X//Oc/6tChwxXny8jIUGRkpNLT0xUREXHF7wMAAAAAAADvcY1yOR3hipmRfj+ywxUo6ByxSCyPsGrVKkVFRbkHtpLUqlUrBQUFafXq1QV+n/N/GOcHtuf169dP5cqVU5MmTTRt2jQVgTk2AAAAAAAAgABVJJZHSEtLU0xMTL5twcHBio6OVlpaWoHe49ixYxozZoz69u2bb/vo0aPVokULlShRQosWLdLDDz+szMxMDRw48JLvlZ2drezsbPfzjIwMD9oAAAAAAAAAwKU5OrR9/PHHNW7cuMses3Xr1j/9+2RkZKht27aqU6eOnn766Xz7hg8f7v51w4YNdfr0ab344ouXHdqOHTtWo0aN+tO5AAAAAAAA4DssMYCiytE1bY8ePaqff/75ssfUqFFD7733noYOHaoTJ064t587d07h4eH68MMP1bFjx0u+/tSpU0pMTFSJEiU0f/58hYeHX/b3+/TTT3XHHXfozJkzCgsLu+gxF7vTtkqVKqxpCwAAAAAAAOCSCrqmraN32pYvX17ly5f/w+MSEhJ08uRJrVu3To0aNZIkffnll8rLy1PTpk0v+bqMjAwlJiYqLCxMc+fO/cOBrSRt2LBBZcqUueTAVpLCwsIuux8AAAAAAAAArlSRWNO2du3aSkpKUp8+fTRlyhTl5OSof//+6ty5s2JjYyVJBw8eVMuWLTVjxgw1adJEGRkZat26tbKysvTee+8pIyPDvfZs+fLlVaxYMc2bN0+HDx/WjTfeqPDwcC1evFjPPfecHnnkESfrAgAAAAAAALBYkRjaStLMmTPVv39/tWzZUkFBQerUqZNeeeUV9/6cnBxt375dWVlZkqT169dr9erVkqS4uLh875WSkqLq1asrJCREr732mgYPHixjjOLi4vTSSy+pT58+hVcMAAAAAAAAAH7D0TVtA0VB16IAAAAAAAAAYK+CzhGDCjETAAAAAAAAAOAPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAAMCPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAAMCPMLQFAAAAAAAAAD/C0BYAAAAAAAAA/AhDWwAAAAAAAADwIwxtAQAAAAAAAMCPBDsdIBAYYyRJGRkZDicBAAAAAAAA4K/Ozw/PzxMvhaGtF5w6dUqSVKVKFYeTAAAAAAAAAPB3p06dUmRk5CX3u8wfjXXxh/Ly8nTo0CGVLl1aLpfL6ThFWkZGhqpUqaL9+/crIiLC6Tg+ZVNXya6+NnWV6BvIbOoq2dXXpq4SfQOZTV0lu/ra1FWibyCzqatE30BmU1dfM8bo1KlTio2NVVDQpVeu5U5bLwgKCtJVV13ldIyAEhERYc1FwKaukl19beoq0TeQ2dRVsquvTV0l+gYym7pKdvW1qatE30BmU1eJvoHMpq6+dLk7bM/jg8gAAAAAAAAAwI8wtAUAAAAAAAAAP8LQFn4lLCxMI0eOVFhYmNNRfM6mrpJdfW3qKtE3kNnUVbKrr01dJfoGMpu6Snb1tamrRN9AZlNXib6BzKau/oIPIgMAAAAAAAAAP8KdtgAAAAAAAADgRxjaAgAAAAAAAIAfYWgLAAAAAAAAAH6EoS0A+MCyZcv0yy+/OB0DAACgyEpJSdG5c+ecjgEf4LwGNj46CfAOhrZAIdm4caOeeeYZvf766zp27Fi+fRkZGerVq5dDybzv7bffVvfu3fXOO+9Ikj744APVrl1bNWrU0MiRIx1OVzhat26tn376yekYXnXkyJF8zzds2KDu3burWbNmuuuuu7Rs2TJnghWi7Oxs7d69W9nZ2U5H8bp69eppzJgx2r9/v9NRHHP48GGlpaU5HcOncnNzdfjwYR09etTpKD71448/6uGHH1bDhg1VqVIlVapUSQ0bNtTDDz+sH3/80el4hWb37t1q0aKF0zG8KjU1Ve+9954+++wznT17Nt++06dPa/To0Q4l877Fixdr5MiR+vLLLyVJy5cv1+23364WLVq4v8cKdLVq1dLOnTudjuFzhw4d0siRI5WcnKxHHnlE27ZtczqS1yxcuFCbN2+WJOXl5WnMmDGqXLmywsLCdNVVV+n5558PqAHfnXfeqX/9619W3LyRnZ2tRx55RLfccovGjRsnSXrmmWdUqlQplS5dWl27dlVGRobDKb1r48aN6tatm2rUqKHixYurZMmSqlevnoYPHx5wXSXp2LFjeuGFF9SxY0clJCQoISFBHTt21Isvvhjw30v6C5cJpCskAsbWrVvVtm1b7dmzx+koXrFo0SLdeeedqlmzpk6dOqXTp0/rww8/1N/+9jdJvw4KYmNjlZub63DSP2/SpEl66qmnlJiYqFWrVqlfv36aOHGiBg8erNzcXE2YMEEvvvii+vbt63RUr7jhhhsuun3Dhg2Kj49XeHi4JGn9+vWFGcsnihUrptTUVMXExGjlypW69dZbddNNN6lJkybasGGDli5dqiVLluiWW25xOqpXTJ8+XbVq1VJCQoLOnDmjfv366d1335UxRkFBQerdu7defvllhYWFOR3VK4KCghQdHa2TJ0+qVatW6tOnj9q3b6/g4GCno3nd8ePH1bdvX61Zs0Zt27bV5MmT9cADD2jatGlyuVxq2rSpPv74Y1WqVMnpqF7z6aefaty4cVqzZo1ycnIkSaVLl9add96pZ599VlWrVnU4ofcsWLBAHTp00A033KDExERVqFBB0q//X7t48WKtW7dO//3vf5WYmOhwUt/buHGjbrjhhoD4/kKS1q5dq9atWysvL085OTmqXLmyPvnkE1133XWSAuv7qffee089e/bU9ddfrx07dujVV1/V4MGDdddddykvL0/vvfeeZs6cqbvuusvpqF7x97///aLb//vf/6pFixYqXbq0JGnOnDmFGctnSpQoob1796p8+fL68ccfddNNN6l8+fJq2LChNm/erH379mnVqlW6/vrrnY76p8XHx+utt97SX//6V40dO1YTJkzQk08+qdq1a2v79u0aO3asBg8erMcee8zpqF4RFBSkYsWKqWTJkurSpYvuv/9+NWrUyOlYPjFkyBB98MEH6tKliz777DP97W9/0/z58/Xcc88pKChII0aM0O23365XXnnF6ahe8fnnn6tjx45q06aNihcvrjlz5qhXr14qWbKkPv74YxljtGLFClWsWNHpqF6xdu1aJSYmqkSJEmrVqlW+76eWLFmirKwsff7552rcuLHDSQMbQ1v4pUD7R8ZNN92kv/3tb3r22WdljNGLL76oMWPG6MMPP1RSUlJA/SOjdu3aGj58uLp27arvv/9eTZo00ZQpU9S7d29J0tSpU/XGG2/ou+++czipd4SEhKhVq1a68cYb3duMMRozZowefPBBxcTESFJA3GEcFBSktLQ0xcTEqHXr1qpSpYqmTp3q3v/Pf/5Tmzdv1pIlSxxM6T01atTQ+++/r6ZNm+rRRx/VRx99pJdeesn9j4z/+Z//Ufv27fXCCy84HdUrgoKCdODAAa1Zs0bTpk3TggULVKZMGXXr1k29e/dW7dq1nY7oNb1799aaNWv0wAMP6KOPPlJUVJRSUlL0+uuvKygoSIMGDVLt2rX17rvvOh3VK/71r3+pX79+6tu3r8LDwzV16lT16NFD1apV0+zZs/XDDz9o5cqVqlmzptNRvaJ+/fpq3779Je+4fPrppzVnzhxt2rSpkJN53x/9Q/jgwYMaP358QHx/IUm33XabqlSporffflunT5/WY489pn//+99avHixGjZsGFDfTzVs2FA9e/bUwIEDtWTJEvd/YBk8eLAkacKECfrPf/6jFStWOJzUO4KCgnTLLbfo6quvzrd9xowZateunaKioiQpYO4w/u33VB06dFBeXp7mzJmj4OBg5eXlKTk5WZmZmZo3b57TUf+08PBw7dixQ1WrVlW9evU0YsQI/eMf/3Dv//TTT/XPf/4zYO6oDgoK0pYtW7Ro0SJNmzZNP/zwg+rVq6f7779fycnJKlOmjNMRvaZq1aqaNm2aWrVqpT179qhmzZqaM2eO2rdvL+nXnxbo06dPwPz0YcOGDfXAAw/owQcflPRrv4EDB2rr1q3KycnR7bffripVqgTMderGG29U/fr1NWXKFLlcrnz7jDF68MEHtWnTJq1atcqhhJYwgAMGDx582ce9995rgoKCnI7pNREREWbXrl35ts2cOdOULFnSzJs3z6SlpQVM3+LFi5u9e/e6n4eFhZktW7a4n+/cudNERUU5Ec0nVqxYYa655hozYsQIk5ub694eHBxsfvjhBweTeZ/L5TKHDx82xhhTqVIls2rVqnz7t2zZYsqVK+dENJ8ICwtz/12+9tprzYIFC/Lt/+qrr0zVqlWdiOYTvz2/xhhz6NAh89xzz5maNWuaoKAgk5CQYKZOnepgQu+pVKmS+eabb4wxxqSlpRmXy2UWLVrk3r9ixQpTuXJlp+J5XXx8vJk9e7b7+dq1a81VV11l8vLyjDHG3HPPPaZjx45OxfO68PBws23btkvu37ZtmwkPDy/ERL7jcrlMbGysqV69+kUfsbGxAfP9hTHGlClTxmzfvj3ftrFjx5oyZcqYNWvWBNT3UyVLljR79uxxPw8JCTEbN250P9+6daspW7asE9F84v333zdXXXWVmTZtWr7tgfj9lDH5/z+3SpUqZvny5fn2r1+/3lSqVMmJaF732+8ZK1SoYNavX59v/44dO0zx4sWdiOYTv/9+avXq1aZv374mMjLSFC9e3HTp0sUsWbLEwYTe8/t/94WEhOT7d19KSoopUaKEE9F8Ijw83KSkpLif5+XlmZCQEHPo0CFjjDHLly835cuXdyid94WHh5utW7decv/WrVsD5vspf8aatnDEyy+/rK+++krff//9RR+BtI6TJIWFhenkyZP5tnXt2lVvv/227rnnHv3nP/9xJpgPlChRQqdPn3Y/L1++vEqVKpXvmED64IFmzZpp3bp12rFjh2666Sbt3r3b6Ug+derUKWVkZCg8PPyCZQHCw8OVlZXlUDLvq1ixovt8nj59WuXKlcu3v3z58vr555+diOYTv/8v6JUqVdKwYcO0Y8cOLVmyRNdcc40GDhzoUDrvSk9PV+XKlSVJFSpUUHBwcL6lEGJjYy+4Zhdle/fuVdOmTd3PGzdurLS0NKWmpkr69ccbly5d6lQ8r6tevbo+/fTTS+7/9NNPVa1atUJM5DvVqlXTxIkTlZKSctHH5f4ciqozZ87ke/7444/riSeeUOvWrbVy5UqHUnlfSEhIvjV7w8LC8n0/FRYWFlBrZnbu3Flff/21pk6dqk6dOunEiRNOR/Ipl8vl/v/doKAgRUZG5tsfFRUVMH8GHTt21LPPPqvc3Fy1b99er7/+er41bF999VU1aNDAuYA+1qRJE/3v//6vDh06pNdff1379+/Xbbfd5nQsr6hatar7Lsu1a9fK5XJpzZo17v2rV692f78VCCpXrqzt27e7n+/evVt5eXkqW7asJOmqq65SZmamU/G8rmLFivnO5++tWbPGvWQCfCfwFqpDkRAXF6fBgwfr3nvvvej+DRs2BNTaPw0aNNDSpUsv6NS5c2cZY9S9e3eHknlffHy8Nm3a5P5R6t9/qNG2bdtUvXp1B5L5TmRkpN5//3298847uvnmmzVq1KgLBmCB4tprr5X064/EfPfdd2rYsKF73w8//KDY2FinonldcnKynnzySX322We67777NHr0aM2aNUulSpVSVlaWnn76aTVr1szpmF5jLrNa0q233qpbb701YD5goWbNmpo/f7769eunBQsWKDw8XIsWLVLdunUl/bpm2e9/RLcoq169ur777jv3tXf9+vUKCgpyf6MdHR3tXuc2EIwePVpdu3bVsmXLLroG28KFCzVr1iyHU3pHo0aNtG7dOt19990X3e9yuQLqA37q1q2rlStXXrDO5yOPPKK8vDx16dLFoWTeFxcXp23btqlWrVqSfl3q4vy6rtKvw4KrrrrKqXg+Ub16dS1fvlyjRo1S/fr19dZbbwXs91PGGF177bVyuVzKzMzUpk2b8v293rVrV8Csi/ncc8+pVatWio+PV0JCgj788EMtXrxY1157rXbt2qXjx4/r888/dzqmz5UoUUI9evRQjx49tGPHDqfjeMWDDz6oHj166O2339a6des0fvx4PfHEE9q2bZuCgoL0xhtvaOjQoU7H9Jpu3brp/vvv15NPPqmwsDC99NJLateunUJDQyX9OsMIpO8fH3nkEfXt21fr1q1Ty5YtL/h+6q233tL48eMdThn4GNrCEY0bN9a6desuObQNtH9kPPTQQ1q+fPlF93Xp0kXGGL311luFnMo3xo0bp5IlS15y/759+/TAAw8UYqLC07NnT918881KTk4OqLuJz/v9nXi//5CmlJSUgPmAOenXdYi3bNmiGjVqqHHjxvr6669VoUIFVa5cWYcOHVLZsmW1ePFip2N6Tffu3VW8ePHLHhMREVFIaXzr0UcfVffu3TVp0iTt379f7733ngYNGqTVq1crKChIc+bM0UsvveR0TK/p16+f7r//fq1du1bh4eF6++23dd9996lYsWKSfr0T5vx/kAkE//jHP1S5cmW98sormjBhgtLS0iT9esdIQkKCli1bpoSEBIdTesfo0aMv+xMOderUUUpKSiEm8q1u3brpq6++cq8n+Fv/8z//I2OMpkyZ4kAy73viiSfyrX35++vvd999d8lhfVEWFBSkUaNG6bbbblO3bt0CYn3ii/n9mpdxcXH5nn/77bfq2LFjYUbymcjISK1cuVJTp07VvHnzVL16deXl5ens2bPq0qWLHnrooYD6DxDNmzd3D/EuJVD+P/ef//ynYmJitGrVKvXq1UtdunRxr1uclZWlwYMH68knn3Q6ptc88cQTOn36tMaMGaPs7GwlJibq5Zdfdu+vXLmy3njjDQcTele/fv1Urlw5TZw4Ua+//rr7elysWDE1atRI06dPD8j/H/I3fBAZHJGWlqbs7OyA+fFE4Lfy8vJ06tQpRUREBOwdIjZZuHCh5s2bpz179igvL0+VKlVSs2bN1LVr18v+Bwr4t2+++UbffvutEhISdNNNN+nHH3/U888/r6ysLN15550B9RMQkvTGG2/ovffec/8jY/jw4QoPD5ck7dy5U7m5uYqPj3c4JQD8n8zMTO3evVu1a9f+wyEYAMB3cnJydOzYMUlSuXLlFBIS4nAiezC0BQAAAAAAAAA/wgeRAQAAwBpbt25VjRo1nI5RKGzqKtnV16auEn0DmU1dJbv62tRVCsy+Gzdu1DPPPKPXX3/dfafteRkZGerVq5dDyezB0BYAAADWOHv2rPbu3et0jEJhU1fJrr42dZXoG8hs6irZ1demrlLg9V20aJGaNGmi2bNna9y4cYqPj8/3GSe//PKL3n33XQcT2oEPIgMAAEDAGDJkyGX3Hz16tJCS+J5NXSW7+trUVaLv7wVSX5u6Snb1tamrZF/fp59+Wo888oieffZZGWP04osvql27dvrwww+VlJTkdDxrsKYtAAAAAkaxYsXUoEEDRUREXHR/Zmam1q9fHxCfSm9TV8muvjZ1lej7e4HU16aukl19beoq2dc3MjJS69ev1zXXXOPeNmvWLPXt21ezZ8/WX/7yF8XGxgZMX3/FnbZAIcrNzdX06dO1ZMkSHTlyRHl5efn2f/nllw4l8z6bukp29bWpq404vyjq4uLiNHjwYN17770X3b9hwwY1atSokFP5hk1dJbv62tRVou/vBVJfm7pKdvW1qatkX9+wsDCdPHky37auXbsqKChI99xzjyZMmOBMMMswtIWjbBsODBo0SNOnT1fbtm1Vt25duVwupyP5jE1dJbv62tRV4joVyOfXtnNrS9/GjRtr3bp1l/xHlcvlUqD8oJlNXSW7+trUVaLv7wVSX5u6Snb1tamrZF/fBg0aaOnSpRcMojt37ixjjLp37+5QMruwPAIc1b9/f/dwoFKlShcMByZOnOhQMt8oV66cZsyYoTZt2jgdxeds6irZ1demrhLXqUBm27m1pW9aWpqys7NVrVo1p6P4nE1dJbv62tRVom8gs6mrZFdfm7pK9vX9z3/+o+XLl1/y+8NZs2bprbfeyvfhZPA+hrZwlE3DAUmKjY3VsmXLdO211zodxeds6irZ1demrhLXqUBm27m1rS8AAABQlAU5HQB2Cw0NVVxcnNMxCs3QoUP18ssvB9SPTVyKTV0lu/ra1FXiOhXIbDu3tvUFAAAAijLutIWjJkyYoD179mjy5MkBvW7ieR07dtTSpUsVHR2t6667TiEhIfn2z5kzx6Fk3mdTV8muvjZ1lbhOBfL5te3c2tYXAAAAKMr4IDI4asWKFVq6dKkWLFgQ8MMBSYqKilLHjh2djlEobOoq2dXXpq4S16lAZtu5ta0vAAAAUJRxpy0c1bNnz8vuf+eddwopCQBcHNepwGXbubWtLwAAAFCUMbQFHHD06FFt375dklSrVi2VL1/e4US+Y1NXya6+NnW1EecXAAAAAJzD8gjwC7YMB06fPq0BAwZoxowZysvLkyQVK1ZM3bp106uvvqoSJUo4nNB7bOoq2dXXpq6/xXUqcM+vLef2PFv65ubmavr06VqyZImOHDni/vt83pdffulQMu+zqatkV1+bukr0DeS+NnWV7OprU1eJvoHe198wtIWjbBsODBkyRF999ZXmzZunZs2aSfp1jcGBAwdq6NCheuONNxxO6D02dZXs6mtTV4nrlBS459e2c2tb30GDBmn69Olq27at6tatG9AfvmZTV8muvjZ1legbyH1t6irZ1demrhJ9A72v3zGAg/r27Wtq1KhhPvvsM5Oenm7S09PNp59+aq655hrz4IMPOh3P68qWLWuWLl16wfYvv/zSlCtXrvAD+ZBNXY2xq69NXY3hOnVeIJ5f286tbX3Lli1rPv30U6djFAqbuhpjV1+buhpD30BmU1dj7OprU1dj6IvCxZ22cNTHH3+sjz76SLfeeqt7W5s2bVS8eHHdfffdAXVHlyRlZWWpQoUKF2yPiYlRVlaWA4l8x6aukl19beoqcZ06LxDPr23n1ra+oaGhiouLczpGobCpq2RXX5u6SvQNZDZ1lezqa1NXib4oXEFOB4DdbBoOSFJCQoJGjhypM2fOuLf98ssvGjVqlBISEhxM5n02dZXs6mtTV4nrlBS459e2c2tb36FDh+rll1+WseAzd23qKtnV16auEn0DmU1dJbv62tRVoi8Kl8vwJw8HtWzZUmXLltWMGTMUHh4u6dfhQPfu3XX8+HF98cUXDif0ri1btigxMVHZ2dmqX7++JGnjxo0KDw/X559/ruuuu87hhN5jU1fJrr42dZW4TkmBe35tO7e29e3YsaOWLl2q6OhoXXfddQoJCcm3f86cOQ4l8z6bukp29bWpq0TfQO5rU1fJrr42dZXoG+h9/Q3LI8BRL7/8shITE3XVVVdddDgQaOrWraudO3dq5syZ2rZtmySpS5cuSk5OVvHixR1O5102dZXs6mtTV4nrlBS459e2c2tb36ioKHXs2NHpGIXCpq6SXX1t6irRN5DZ1FWyq69NXSX6onBxpy0cl5WVlW84ULt27YAcDgAourhOBS7bzq1tfQEAAICiiqEt4GNz587V7bffrpCQEM2dO/eyx7Zr166QUvmGTV0lu/ra1NVGnF8EsqNHj2r79u2SpFq1aql8+fIOJ/Idm7pKdvW1qatE30Dua1NXya6+NnWV6Bvoff0FQ1sUOtuGA0FBQUpLS1NMTIyCgi792X8ul0u5ubmFmMz7bOoq2dXXpq4S16lLCYTza9u5ta3vb50+fVoDBgzQjBkzlJeXJ0kqVqyYunXrpldffVUlSpRwOKH32NRVsquvTV0l+kqB29emrpJdfW3qKtFXCuy+fscAhczlcpnDhw+7f32pR1BQkMNJAdiK61Tgsu3c2tb3t/r27Wtq1KhhPvvsM5Oenm7S09PNp59+aq655hrz4IMPOh3Pq2zqaoxdfW3qagx9A7mvTV2NsauvTV2NoW+g9/U3DG2BQvTuu++aM2fOXLA9OzvbvPvuuw4k8h2buhpjV1+butqI84tAUbZsWbN06dILtn/55ZemXLlyhR/Ih2zqaoxdfW3qagx9zwvEvjZ1NcauvjZ1NYa+5wVqX39z6Z+BBArBjBkzlJ2dfcH2s2fPasaMGQ4k8q2ePXsqPT39gu2nTp1Sz549HUjkOzZ1lezqa1NXievUeYF4fm07t7b1zcrKUoUKFS7YHhMTo6ysLAcS+Y5NXSW7+trUVaLveYHY16aukl19beoq0fe8QO3rbxjawlE2DQckyRgjl8t1wfYDBw4oMjLSgUS+Y1NXya6+NnWVuE6dF4jn17Zza1vfhIQEjRw5UmfOnHFv++WXXzRq1CglJCQ4mMz7bOoq2dXXpq4SfaXA7WtTV8muvjZ1legrBXZffxPsdADYzZbhQMOGDeVyueRyudSyZUsFB//fl15ubq5SUlKUlJTkYELvsamrZFdfm7r+FtepwD2/tpzb82zr+/LLLysxMVFXXXWV6tevL0nauHGjwsPD9fnnnzuczrts6irZ1demrhJ9pcDta1NXya6+NnWV6CsFdl9/w9AWjrBtONChQwdJ0oYNG5SYmKhSpUq594WGhqp69erq1KmTQ+m8y6aukl19beoqcZ0K5PNr27m1re95devW1c6dOzVz5kxt27ZNktSlSxclJyerePHiDqfzLpu6Snb1tamrRF8pcPva1FWyq69NXSX6SoHd19+4jDHG6RCwz6hRo9z/O3To0EsOB0JDQ52K6BPvvvuuOnfurLCwMKej+JxNXSW7+trSletU4J5f286tbX0BAACAQMDQFo6yYTjwW/v375fL5dJVV10lSVqzZo1mzZqlOnXqqG/fvg6n8y6bukp29bWpq8R1KpDPr23n1oa+c+fO1e23366QkBDNnTv3sse2a9eukFL5hk1dJbv62tRVou/lFPW+NnWV7OprU1eJvpcTCH39GUNbOMqm4YAk/fWvf1Xfvn113333KS0tTddee637xw0GDBigESNGOB3Ra2zqKtnV16auEtepQD6/tp1bG/oGBQUpLS1NMTExCgq69Oftulwu5ebmFmIy77Opq2RXX5u6SvS9lEDoa1NXya6+NnWV6HspgdLXn136Tx8oBF27dtXSpUslSWlpaWrVqpXWrFmjJ598UqNHj3Y4nfdt2bJFTZo0kST9+9//Vr169bRy5UrNnDlT06dPdzacl9nUVbKrr01dJa5TgXx+bTu3NvTNy8tTTEyM+9eXegTCPzBs6irZ1demrhJ9A7mvTV0lu/ra1FWib6D39WcMbeEom4YDkpSTk+P+sdQvvvjC/aME8fHxSk1NdTKa19nUVbKrr01dJa5TgXx+bTu3tvWdMWOGsrOzL9h+9uxZzZgxw4FEvmNTV8muvjZ1leh7XiD2tamrZFdfm7pK9D0vUPv6HQM4qGTJkiYlJcUYY8ydd95pnn/+eWOMMXv37jXh4eEOJvONJk2amMcee8wsX77chIeHmw0bNhhjjFm1apWpXLmyw+m8y6auxtjV16auxnCdCuTza9u5ta1vUFCQOXz48AXbjx07ZoKCghxI5Ds2dTXGrr42dTWGvucFYl+buhpjV1+buhpD3/MCta+/4U5bOOq6667TlClT9PXXX2vx4sVKSkqSJB06dEhly5Z1OJ33jRs3Tv/7v/+rW2+9VV26dFH9+vUl/brQ9/m7nwKFTV0lu/ra1FXiOhXI59e2c2tbX2OMXC7XBdsPHDigyMhIBxL5jk1dJbv62tRVou95gdjXpq6SXX1t6irR97xA7etvgp0OALuNGzdOHTt21Isvvqju3bsH9HBAkm699VYdO3ZMGRkZKlOmjHt73759VaJECQeTeZ9NXSW7+trUVeI6dV4gnl/bzq0tfRs2bCiXyyWXy6WWLVsqOPj/vt3Nzc1VSkqKe2Bd1NnUVbKrr01dJfoGcl+bukp29bWpq0TfQO/rrxjawlE2DQfOM8Zo3bp12r17t7p27arSpUsrNDQ0IPva1FWyq69NXblOBe75te3c2tK3Q4cOkqQNGzYoMTFRpUqVcu8LDQ1V9erV1alTJ4fSeZdNXSW7+trUVaJvIPe1qatkV1+bukr0DfS+/spljDFOh4Ddzp07p2XLluUbDhw6dEgRERH5LgyBYO/evUpKStK+ffuUnZ2tHTt2qEaNGho0aJCys7M1ZcoUpyN6jU1dJbv62tT1PK5TgXt+bTq3kl193333XXXu3Nn9wXqBzKaukl19beoq0TeQ2dRVsquvTV0l+qJwsaYtHLV3717Vq1dP7du3V79+/XT06FFJv/4I5yOPPOJwOu8bNGiQGjdurBMnTqh48eLu7R07dtSSJUscTOZ9NnWV7OprU1eJ69R5gXh+bTu3tvVt0aKFu6MkrVmzRv/85z/15ptvOpjKN2zqKtnV16auEn0Dua9NXSW7+trUVaJvoPf1O4X9yWfAb7Vv397ce++9Jjs725QqVcrs3r3bGGPM0qVLTVxcnMPpvC86Otps27bNGGPy9U1JSTHFixd3MprX2dTVGLv62tTVGK5TgXx+bTu3tvW9+eabzYwZM4wxxqSmpprSpUubhIQEU65cOTNq1CiH03mXTV2NsauvTV2NoW8g97WpqzF29bWpqzH0DfS+/oY7beGor7/+Wk899ZRCQ0Pzba9evboOHjzoUCrfycvLU25u7gXbDxw4oNKlSzuQyHds6irZ1demrhLXqfMC8fzadm5t67tlyxb3B6z9+9//Vr169bRy5UrNnDlT06dPdzacl9nUVbKrr01dJfoGcl+bukp29bWpq0TfQO/rbxjawlE2DQckqXXr1po0aZL7ucvlUmZmpkaOHKk2bdo4F8wHbOoq2dXXpq4S16lAPr+2nVvb+ubk5LjXX/viiy/Url07SVJ8fLxSU1OdjOZ1NnWV7OprU1eJvoHc16aukl19beoq0TfQ+/obhrZwlE3DAUmaMGGCvvnmG9WpU0dnzpxR165d3Xc4jRs3zul4XmVTV8muvjZ1lbhOBfL5te3c2tb3uuuu05QpU/T1119r8eLFSkpKkiQdOnRIZcuWdTidd9nUVbKrr01dJfoGcl+bukp29bWpq0TfQO/rd5xenwF2279/v6lTp46pXbu2CQ4ONjfeeKMpW7asqVWrljl8+LDT8XwiJyfH/Otf/zKPPvqoeeihh8xbb71lsrKynI7lEzZ1NcauvjZ15ToVuOfXtnNrW9+lS5eaqKgoExQUZHr27OnePmzYMNOxY0cHk3mfTV2NsauvTV2Noe95gdjXpq7G2NXXpq7G0Pe8QO3rb1zGGOP04Bh2O3funGbPnq1NmzYpMzNTN9xwg5KTk/N9ajkAOInrVOCy7dza1jc3N1cZGRkqU6aMe9tPP/2kEiVKKCYmxsFk3mdTV8muvjZ1legrBW5fm7pKdvW1qatEXymw+/qTYKcDAMHBwbr33nudjlEoZsyYcdn93bp1K6QkvmdTV8muvjZ1PY/r1P8JtPNr07mV7OtrjNG6deu0e/dude3aVaVLl1ZoaKhKlCjhdDSvs6mrZFdfm7pK9A3kvjZ1lezqa1NXib6B3tefcKctHGXbcOC3/2VK+nVR76ysLPcF7/jx4w4l8z6bukp29bWpq8R1KpDPr23n1ra+e/fuVVJSkvbt26fs7Gzt2LFDNWrU0KBBg5Sdna0pU6Y4HdFrbOoq2dXXpq4SfQO5r01dJbv62tRVom+g9/U7DizJALhFRUXle5QsWdK4XC4TFhZmypQp43S8QrFjxw7TsmVLs3DhQqej+JxNXY2xq28gd+U6Fbjn17Zza1vf9u3bm3vvvddkZ2ebUqVKmd27dxtjfl2bLS4uzuF03mVTV2Ps6mtTV2PoG8h9bepqjF19bepqDH0Dva+/YWgLvxOow4HLWbt2ralVq5bTMQqFTV2NsauvTV25TgUu285tIPeNjo4227ZtM8aYfP/ISElJMcWLF3cymtfZ1NUYu/ra1NUY+gZyX5u6GmNXX5u6GkPfQO/rb4KcvtMX+L2aNWvq+eef16BBg5yOUmiCg4N16NAhp2MUCpu6Snb1takr16nAZdu5DeS+eXl5ys3NvWD7gQMHVLp0aQcS+Y5NXSW7+trUVaLveYHY16aukl19beoq0fe8QO3rb/ggMvilQB0OzJ07N99zY4xSU1M1efJkNWvWzKFUvmFTV8muvjZ1vRyuU4ErUM/tpQRq39atW2vSpEl68803JUkul0uZmZkaOXKk2rRp43A677Kpq2RXX5u6SvQN5L42dZXs6mtTV4m+gd7X3/BBZHDU5YYDVapU0YIFCxxK5htBQflvbne5XCpfvrxatGihCRMmqFKlSg4l8z6bukp29bWpq8R1KpDPr23n1ra+Bw4cUGJioowx2rlzpxo3bqydO3eqXLlyWr58uWJiYpyO6DU2dZXs6mtTV4m+gdzXpq6SXX1t6irRN9D7+huGtnCUTcMBAEUT16nAZdu5ta2vJJ07d06zZ8/Wpk2blJmZqRtuuEHJyckqXry409G8zqaukl19beoq0TeQ+9rUVbKrr01dJfoGel9/wtAWcMCxY8cUGhqqiIgIp6P4nE1dJbv62tTVRpxfAAAAAHAOa9rCL9gwHDh58qSefPJJffDBBzpx4oQkqXz58urZs6eGDx+uEiVKOJzQe2zqKtnV16auv8d1KnDPrw3n9rds6TtjxozL7u/WrVshJfE9m7pKdvW1qatE398LpL42dZXs6mtTV4m+vxdoff0Nd9rCMTYNB44fP66EhAQdPHhQycnJql27tiTpxx9/1KxZsxQfH68VK1Zo06ZN+vbbbzVw4ECHE185m7pKdvW1qet5XKcC9/zadG4l+/pKUpkyZfI9z8nJUVZWlkJDQ1WiRAkdP37coWTeZ1NXya6+NnWV6BvIfW3qKtnV16auEn0Dva/fMYADfv75Z3PttdeakiVLmr59+5qJEyeaiRMnmj59+piSJUuaRo0amV9++cWsXr3avPzyy07H/dMGDRpk6tata9LS0i7Yl5qaaurVq2fuuusuExERYaZPn+5AQu+xqasxdvW1qasxXKd+K9DOr23n1ra+l7Njxw7TsmVLs3DhQqej+JxNXY2xq69NXY2hbyCzqasxdvW1qasx9IXvMLSFI2waDhhjTLVq1S57QVuwYIFxuVzm6aefLsRUvmFTV2Ps6mtTV2O4Tv1eIJ1f286tbX3/yNq1a02tWrWcjlEobOpqjF19bepqDH0DmU1djbGrr01djaEvfIOhLRxh03DAGGNCQ0PN/v37L7l///79plixYoWYyHds6mqMXX1t6moM16nfC6Tza9u5ta3vH/n+++9N6dKlnY5RKGzqaoxdfW3qagx9A5lNXY2xq69NXY2hL3yDDyKDI1JTU3Xdddddcn/dunUVFBSkkSNHFmIq3ylXrpx++uknXXXVVRfdn5KSopiYmEJO5Rs2dZXs6mtTV4nr1O8F0vm17dza1ve8uXPn5ntujFFqaqomT56sZs2aOZTKN2zqKtnV16auEn0Dua9NXSW7+trUVaJvoPf1Ow4Ni2G52NhY8/XXX19y//Lly02lSpUKMZFv9ezZ09xyyy0mOzv7gn1nzpwxzZs3Nz179nQgmffZ1NUYu/ra1NUYrlO/FWjn17Zza1vf81wuV75HUFCQqVChgunSpYs5dOiQ0/G8yqauxtjV16auxtA3kPva1NUYu/ra1NUY+gZ6X3/jMsYYpwfHsE+vXr20e/duLV68WKGhofn2ZWdnKzExUTVq1NC0adMcSuhdBw4cUOPGjRUWFqZ+/fopPj5exhht3bpVr7/+urKzs7V27VpVrVrV6ah/mk1dJbv62tRV4joVyOfXtnNrW18AAAAgEDC0hSNsGg6cl5KSoocffliLFi3S+S87l8ul2267TZMnT1ZcXJzDCb3Hpq6SXX1t6sp1KnDPr23n1ra+v3fs2DGFhoYqIiLC6Sg+Z1NXya6+NnWV6BvIbOoq2dXXpq4SfVFICvvWXuC8PXv2mKSkJBMUFJTvVvvExESzc+dOp+P5zPHjx83q1avN6tWrzc8//+x0HJ+yqasxdvW1pSvXqcA9v7adW9v6njhxwjz88MOmbNmyJigoyP2jfI8//rg5ffq00/G8yqauxtjV16auxtA3kPva1NUYu/ra1NUY+gZ6X3/EnbZw3IkTJ7Rz505JUlxcnKKjox1OBAD5cZ0KXLadWxv6Hj9+XAkJCTp48KCSk5NVu3ZtSdKPP/6oWbNmKT4+XitWrNCmTZv07bffauDAgQ4nvnI2dZXs6mtTV4m+gdzXpq6SXX1t6irRN9D7+i2np8YAAACAtwwaNMjUrVvXpKWlXbAvNTXV1KtXz9x1110mIiLCTJ8+3YGE3mNTV2Ps6mtTV2Po+1uB1temrsbY1demrsbQ97cCsa+/YmgLAACAgFGtWjWzcOHCS+5fsGCBcblc5umnny7EVL5hU1dj7OprU1dj6Pt7gdTXpq7G2NXXpq7G0Pf3Aq2vv2J5BAAAAASMsLAw7d69W1ddddVF9x84cEDVq1fXuXPnCjmZ99nUVbKrr01dJfr+XiD1tamrZFdfm7pK9P29QOvrr4KcDgAAAAB4S7ly5fTTTz9dcn9KSopiYmIKL5AP2dRVsquvTV0l+v5eIPW1qatkV1+bukr0/b1A6+uvGNoCAAAgYCQmJurJJ5/U2bNnL9iXnZ2t4cOHKykpyYFk3mdTV8muvjZ1lej7W4HW16aukl19beoq0fe3ArGvv2J5BAAAAASMAwcOqHHjxgoLC1O/fv0UHx8vY4y2bt2q119/XdnZ2Vq7dq2qVq3qdNQ/zaaukl19beoq0TeQ+9rUVbKrr01dJfoGel+/5cA6ugAAAIDP7NmzxyQlJZmgoCDjcrmMy+UyQUFBJjEx0ezcudPpeF5lU1dj7OprU1dj6BvIfW3qaoxdfW3qagx9A72vP+JOWwAAAASkEydOaOfOnZKkuLg4RUdHO5zId2zqKtnV16auEn0Dua9NXSW7+trUVaJvoPf1JwxtAQAAAAAAAMCP8EFkAAAAAAAAAOBHGNoCAAAAAAAAgB9haAsAAAAAAAAAfoShLQAAAAAAAAD4EYa2AAAAAAAAAOBHGNoCAAAADnC5XPrkk0+cjgEAAAA/xNAWAAAAAWv//v3q1auXYmNjFRoaqmrVqmnQoEH6+eefCy3D008/rQYNGlywPTU1Vbfffnuh5QAAAEDRwdAWAAAAAWnPnj1q3Lixdu7cqffff1+7du3SlClTtGTJEiUkJOj48eOO5qtYsaLCwsIczQAAAAD/xNAWAAAAAalfv34KDQ3VokWL1Lx5c1WtWlW33367vvjiCx08eFBPPvmkpIsvUxAVFaXp06e7n+/fv1933323oqKiFB0drfbt2+unn35y71+2bJmaNGmikiVLKioqSs2aNdPevXs1ffp0jRo1Shs3bpTL5ZLL5XK/7+9/382bN6tFixYqXry4ypYtq759+yozM9O9v0ePHurQoYPGjx+vSpUqqWzZsurXr59ycnLcx7z++uuqWbOmwsPDVaFCBd11111e+/MEAABA4WFoCwAAgIBz/Phxff7553r44YdVvHjxfPsqVqyo5ORkffDBBzLG/OF75eTkKDExUaVLl9bXX3+tb775RqVKlVJSUpLOnj2rc+fOqUOHDmrevLk2bdqkVatWqW/fvnK5XLrnnns0dOhQXXfddUpNTVVqaqruueeeC36P06dPKzExUWXKlNHatWv14Ycf6osvvlD//v3zHbd06VLt3r1bS5cu1bvvvqvp06e7h8DfffedBg4cqNGjR2v79u1auHChbrnlliv/QwQAAIBjgp0OAAAAAHjbzp07ZYxR7dq1L7q/du3aOnHihI4ePfqH7/XBBx8oLy9Pb7/9tlwulyTpnXfeUVRUlJYtW6bGjRsrPT1dd9xxh6655hr3+59XqlQpBQcHq2LFipf8PWbNmqUzZ85oxowZKlmypCRp8uTJuvPOOzVu3DhVqFBBklSmTBlNnjxZxYoVU3x8vNq2baslS5aoT58+2rdvn0qWLKk77rhDpUuXVrVq1dSwYcOC/YEBAADAr3CnLQAAAALWH91JGxoa+ofvsXHjRu3atUulS5dWqVKlVKpUKUVHR+vMmTPavXu3oqOj1aNHDyUmJurOO+/Uyy+/rNTUVI9ybt26VfXr13cPbCWpWbNmysvL0/bt293brrvuOhUrVsz9vFKlSjpy5Igk6bbbblO1atVUo0YN3XfffZo5c6aysrI8ygEAAAD/wNAWAAAAAScuLk4ul0tbt2696P6tW7eqfPnyioqKksvlumC4+9t1YjMzM9WoUSNt2LAh32PHjh3q2rWrpF/vvF21apVuuukmffDBB7r22mv17bffer1XSEhIvucul0t5eXmSpNKlS2v9+vV6//33ValSJY0YMUL169fXyZMnvZ4DAAAAvsXQFgAAAAGnbNmyuu222/T666/rl19+ybcvLS1NM2fOVI8ePSRJ5cuXz3dn7M6dO/PdoXrDDTdo586diomJUVxcXL5HZGSk+7iGDRtq2LBhWrlyperWratZs2ZJ+vVu3tzc3MvmrV27tjZu3KjTp0+7t33zzTcKCgpSrVq1Ctw7ODhYrVq10gsvvKBNmzbpp59+0pdfflng1wMAAMA/MLQFAABAQJo8ebKys7OVmJio5cuXa//+/Vq4cKFuu+02XXvttRoxYoQkqUWLFpo8ebK+//57fffdd3rwwQfz3dGanJyscuXKqX379vr666+VkpKiZcuWaeDAgTpw4IBSUlI0bNgwrVq1Snv37tWiRYu0c+dO97q21atXV0pKijZs2KBjx44pOzv7gqzJyckKDw9X9+7dtWXLFi1dulQDBgzQfffd517P9o/Mnz9fr7zyijZs2KC9e/dqxowZysvL82joCwAAAP/A0BYAAAABqWbNmlq7dq1q1Kihu+++W9WqVdPtt9+ua6+9Vt98841KlSolSZrw/9q7Q1VFgziMw++e+AWxGDUKYhZOMajFrCaroEEw2b2CYzLYjF+zi82r8CoEm+1s27ILu7Cw+7H7PBcw/Gfijxnm4yPNZjP9fj/z+Tzb7TZFUXxbpyiK3G63tFqtTCaTdDqdLBaLvF6v1Gq1FEWR+/2e6XSadrud5XKZ9Xqd1WqVJJlOpxmPxxkMBmk0GinL8rtZi6LI5XLJ4/FIr9fLbDbLaDTK4XD45f3W6/Wcz+cMh8N0Op0cj8eUZZlut/ubJwkAwJ/25fNnvzMAAMA/YrfbZb/f53q95v39/W+PAwAAPyTaAgDwXzmdTnk+n9lsNnl78/AMAIDqEW0BAAAAACrE1QIAAAAAgAoRbQEAAAAAKkS0BQAAAACoENEWAAAAAKBCRFsAAAAAgAoRbQEAAAAAKkS0BQAAAACoENEWAAAAAKBCRFsAAAAAgAr5Cv74SwUiqDnBAAAAAElFTkSuQmCC","text/plain":["
"]},"metadata":{},"output_type":"display_data"}],"source":["# Another go at the graph\n","\n","import pandas as pd\n","import matplotlib.pyplot as plt\n","\n","# Merge on the 'question' column or another common identifier\n","merged_df = pd.merge(base_result.to_pandas(), fine_tuned_result.to_pandas(), on='question', suffixes=('_base', '_finetuned'))\n","\n","# Create shortened question labels for readability\n","merged_df['short_question'] = ['Question ' + str(i + 1) for i in range(len(merged_df))]\n","\n","# Calculate improvements\n","merged_df['improvement_answer_relevancy'] = merged_df['answer_relevancy_finetuned'] - merged_df['answer_relevancy_base']\n","merged_df['improvement_faithfulness'] = merged_df['faithfulness_finetuned'] - merged_df['faithfulness_base']\n","merged_df['improvement_context_recall'] = merged_df['context_recall_finetuned'] - merged_df['context_recall_base']\n","merged_df['improvement_context_precision'] = merged_df['context_precision_finetuned'] - merged_df['context_precision_base']\n","\n","# Average improvement across all metrics\n","merged_df['avg_improvement'] = merged_df[['improvement_answer_relevancy', 'improvement_faithfulness',\n"," 'improvement_context_recall', 'improvement_context_precision']].mean(axis=1)\n","\n","# Plotting three subplots: base, fine-tuned, and improvements\n","fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(14, 18))\n","\n","# Plot for Base Model\n","ax1.plot(merged_df['short_question'], merged_df['answer_relevancy_base'], label='Answer Relevancy', marker='o')\n","ax1.plot(merged_df['short_question'], merged_df['faithfulness_base'], label='Faithfulness', marker='o', linestyle='--')\n","ax1.plot(merged_df['short_question'], merged_df['context_recall_base'], label='Context Recall', marker='o', linestyle='-.')\n","ax1.plot(merged_df['short_question'], merged_df['context_precision_base'], label='Context Precision', marker='o', linestyle=':')\n","ax1.set_title('Base Model Metrics')\n","ax1.set_xlabel('Questions')\n","ax1.set_ylabel('RAGAS Metrics')\n","ax1.legend()\n","ax1.set_xticklabels(merged_df['short_question'], rotation=90)\n","\n","# Plot for Fine-tuned Model\n","ax2.plot(merged_df['short_question'], merged_df['answer_relevancy_finetuned'], label='Answer Relevancy', marker='x')\n","ax2.plot(merged_df['short_question'], merged_df['faithfulness_finetuned'], label='Faithfulness', marker='x', linestyle='--')\n","ax2.plot(merged_df['short_question'], merged_df['context_recall_finetuned'], label='Context Recall', marker='x', linestyle='-.')\n","ax2.plot(merged_df['short_question'], merged_df['context_precision_finetuned'], label='Context Precision', marker='x', linestyle=':')\n","ax2.set_title('Fine-tuned Model Metrics')\n","ax2.set_xlabel('Questions')\n","ax2.set_ylabel('RAGAS Metrics')\n","ax2.legend()\n","ax2.set_xticklabels(merged_df['short_question'], rotation=90)\n","\n","# Plot for Improvement\n","ax3.bar(merged_df['short_question'], merged_df['avg_improvement'], color='green')\n","ax3.set_title('Improvement in Average Metrics After Fine-tuning')\n","ax3.set_xlabel('Questions')\n","ax3.set_ylabel('Average Improvement in RAGAS Metrics')\n","ax3.axhline(0, color='black', linewidth=1) # Add a line at y=0 for reference\n","ax3.set_xticklabels(merged_df['short_question'], rotation=90)\n","\n","# Adjust layout for better spacing\n","plt.tight_layout()\n","\n","# Show the plots\n","plt.show()\n"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"},"widgets":{"application/vnd.jupyter.widget-state+json":{"00afb28cca0243cab2cf00fed4ffb6e7":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"LabelModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"LabelModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"LabelView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4f6cc0f0c6f841c58f0075c749392328","placeholder":"ā€‹","style":"IPY_MODEL_db7636673d404cafb8de516908734ab9","value":"Token is valid (permission: fineGrained)."}},"00d0afb531684ad990141e17d735fdde":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"01c526f4dc91484b92cac4ce649259c2":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bb6d31017c334cd995fd41e75eeb37fb","placeholder":"ā€‹","style":"IPY_MODEL_98ccdf3dcfe74921ba5d58b52f4dcc9d","value":"\nPro Tip: If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks.
"}},"01ea24fd0dee447e9f543ab8b83c841d":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a649144966694ecf90f465e6e3814532","IPY_MODEL_53b15312701e45d8be3873ec90d2612f","IPY_MODEL_a35cc9dcd1944e0ba97f6b1369264cd1"],"layout":"IPY_MODEL_ebe4c25cd7d84b18b04b05a997c4fe18"}},"029a071635644fe0be03b8616dd9e9f7":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"05f54ae147f84368b9b9b229d651f372":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_993afbad55b0429fba66680948fc3cc8","max":695,"min":0,"orientation":"horizontal","style":"IPY_MODEL_214d4199769241b7b3a5308a77c0e82c","value":695}},"063b2fb3c1264175839c320cb20540e0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f1bf959afd114c0da5778504b3dfac3d","placeholder":"ā€‹","style":"IPY_MODEL_c73de0e825ed452b985bbdee42619f1c","value":"config.json:ā€‡100%"}},"06d67987a0a146efb5792b1e30fc83e8":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"086904037e764ac1a6d9257df68cafaa":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_d8c3106084b54badb89e30c9b46f056c","IPY_MODEL_b8671fe776264827be1db6ab7ee4f4e4","IPY_MODEL_5442dc5b03f34c72ba8210f8fba28799"],"layout":"IPY_MODEL_939f82c6638548eda8f92c7bc8164957"}},"099335f2c6cf4e7ca148121d38c42764":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_06d67987a0a146efb5792b1e30fc83e8","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_221017bd5a014bde99a58f744b7c849d","value":349}},"0ae4358a1bfc4c8bac06938b1cc5d677":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0b62e88a471b4f32911ec1d68311b04b":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0b7894e5c5d8482185a33cb72d69c041":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0c6657312b014603824252f540fde744":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0d5f11f617c446c3a970f3f8a14d8542":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0ddef191449543a8983807ab48efa88b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"0e14af45a68a440094ecfeb4c81369fd":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ec1866ccf43b49ee81c67b05fbcdf92a","placeholder":"ā€‹","style":"IPY_MODEL_5bc2ccc11a33479da7b493da95447885","value":"Generating:ā€‡100%"}},"0ed6af7c19cf4a2dbfa0b90ecefd49bf":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"12d52438ca3a45cd9b0b3906005309ea":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"13491622b3de4df880aabcf273d6e313":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"13e502c9d36c4651a4b8b778f3abf147":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":"center","align_self":null,"border":null,"bottom":null,"display":"flex","flex":null,"flex_flow":"column","grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"50%"}},"14fb81c6beef4d96afd21598f3acc376":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"LabelModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"LabelModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"LabelView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b4109464f1834a7dae43f0116f6569fe","placeholder":"ā€‹","style":"IPY_MODEL_dc0460abcdb74448b491f23583ae4766","value":"Connecting..."}},"16658c9880b84e44bb6c3a57d7009749":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1787c966e863494ba89d3150c008bcab":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ButtonStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ButtonStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","button_color":null,"font_weight":""}},"1899322b98174c8ca8b37849dd1dd746":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"1abfd525ec82401c871d77136c5e9710":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_36a3dff77fd14e55a9ca2343bf1a0662","IPY_MODEL_05f54ae147f84368b9b9b229d651f372","IPY_MODEL_ebed5481ee7d4833af40c86c9176c397"],"layout":"IPY_MODEL_0d5f11f617c446c3a970f3f8a14d8542"}},"1b556db40ab8459d88c689be257900d6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_029a071635644fe0be03b8616dd9e9f7","placeholder":"ā€‹","style":"IPY_MODEL_2fb82da24edc4db381cdef6bedde74ab","value":"ā€‡84.5k/84.5kā€‡[00:00<00:00,ā€‡340kB/s]"}},"1b9110b967264538b0d2bf70f9290469":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1d2b5d5c680b41438f407102c6bc3283":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"202ce0c76a614dbd82f0f64fc55618f8":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"205fefe263fa471d8d6037d509dd6488":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2088a9863ab841008c09874e754ef8bd":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_eaded34e0186406ea1fb991b31f25a08","placeholder":"ā€‹","style":"IPY_MODEL_6aeb3dd342ee4678adf99dde015aeb7e","value":"Evaluating:ā€‡100%"}},"212b6edc6da64aaa81a1391b0a85842b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"214d4199769241b7b3a5308a77c0e82c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"221017bd5a014bde99a58f744b7c849d":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"27458abe93644581b06bc2e4b80f580d":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"27abe99a21b6483eab8ad967680d55a6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_99e3b3d042fd4b9aabd0c2f93801d644","IPY_MODEL_a312b44e0bb24934ad9e2e8ec7909b25","IPY_MODEL_916a9a3a43e544a5b2eb68ee2eed680b"],"layout":"IPY_MODEL_f8d554b3702847f3a234a14ca0354bd0"}},"28bbd69ad66b447a8fb29b5f5133d8b1":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"29d141be53b548ee81aede4ebe290928":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2a3888c2c35548198bd6283d239553a0":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2b5c7fdd9284455d850756a0c7e5a0ac":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_41690a7cd102433094de092ffb50d71c","max":704,"min":0,"orientation":"horizontal","style":"IPY_MODEL_212b6edc6da64aaa81a1391b0a85842b","value":704}},"2fb82da24edc4db381cdef6bedde74ab":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3264339479b2488998eb1a95db90dc7b":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"33c05991f0244163a09a5e6b9331f025":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_cd1784eae89e451a8025750847ed6ef1","IPY_MODEL_4fc6c199cbcc45b8a1bd37d8f9fbbeed","IPY_MODEL_5ecae354be0f426291c81f19e2bfa1e1"],"layout":"IPY_MODEL_00d0afb531684ad990141e17d735fdde"}},"348eda1e99ed44b984fc42e11ceb5f22":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"35006683857e4f48adb0d9f6249b666a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"350fd98bf967435fae8823f2c084f869":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"LabelModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"LabelModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"LabelView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8949ccfc710348a6b8c59bbf49439253","placeholder":"ā€‹","style":"IPY_MODEL_4ad530c43d934970bad9576481fd947f","value":"Login successful"}},"354412e9314d4a4992d69960614eea98":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0e14af45a68a440094ecfeb4c81369fd","IPY_MODEL_816eafcbfb7a4a9aa24510e8600c4306","IPY_MODEL_c9866613b8064082a2bd893b42842239"],"layout":"IPY_MODEL_51ed2ccdb17e4fa786a28f2457311c21"}},"36a3dff77fd14e55a9ca2343bf1a0662":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_edd40704fe314f3e9b1b4d469de4007c","placeholder":"ā€‹","style":"IPY_MODEL_6c8e86e4dc684183950cb72ec93bc132","value":"special_tokens_map.json:ā€‡100%"}},"376d7493e3c947bfb522e9a6d34e40f6":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"37820e89028e450d9b3eb95614386930":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_656ca2910be84392b13462e3596954e2","placeholder":"ā€‹","style":"IPY_MODEL_5d57068353544e5dad585b56c75cf541","value":"

Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.
"}},"3985a7dac27942f2b37c1162238af035":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"39f1cb879cda4bc69a04e40d78bc34fb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3a54317901864310830a65a2b38d3356":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ButtonModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ButtonModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ButtonView","button_style":"","description":"Login","disabled":false,"icon":"","layout":"IPY_MODEL_8eb8aa6a63944c66b71956fe70338e95","style":"IPY_MODEL_1787c966e863494ba89d3150c008bcab","tooltip":""}},"3aff9c20986845fdaa38eb8fe8affddd":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3b81c55fb6c443dc96d8fc6342ceda84":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3eb70a74d6644e60909daa3cbe6fa141":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"41690a7cd102433094de092ffb50d71c":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"43a136e1e1304b638bd70f8a4329cdb1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3264339479b2488998eb1a95db90dc7b","max":80,"min":0,"orientation":"horizontal","style":"IPY_MODEL_72ce3640cd81450791cf9a79a1998b70","value":80}},"43ae71619843486e8f2ec6ac5d1274b9":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"LabelModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"LabelModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"LabelView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8695bb0563b648a188829e2e49560feb","placeholder":"ā€‹","style":"IPY_MODEL_6ef9fe1c9e9846d6a28ec690b4ccecb5","value":"Your token has been saved in your configured git credential helpers (store)."}},"4586772f17524dee97f7b6c45f20667f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"48224db4e593453a96129ce3b0aecf04":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"4ad530c43d934970bad9576481fd947f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4ca494bf09284ce8925260eccb037538":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d0995487f764920a21c87d6d30708ae":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4f6cc0f0c6f841c58f0075c749392328":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4fc26c7d321749ed92bc408d61483a46":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1b9110b967264538b0d2bf70f9290469","placeholder":"ā€‹","style":"IPY_MODEL_3eb70a74d6644e60909daa3cbe6fa141","value":"Evaluating:ā€‡100%"}},"4fc6c199cbcc45b8a1bd37d8f9fbbeed":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5910fc3156bc4195807785dea70d0561","max":107,"min":0,"orientation":"horizontal","style":"IPY_MODEL_99830a97df904732a0bc416633b3d9d6","value":107}},"51ed2ccdb17e4fa786a28f2457311c21":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"53b15312701e45d8be3873ec90d2612f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_97e66c7442d544c4a650111b3f534cd2","max":1340612432,"min":0,"orientation":"horizontal","style":"IPY_MODEL_8e0e6b9d1acf43ebbfb644f83d1ef1d0","value":1340612432}},"542e25298f9c4dad842d513e580729f0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5442dc5b03f34c72ba8210f8fba28799":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5526033b838949e7a20aab86444d5089","placeholder":"ā€‹","style":"IPY_MODEL_0ed6af7c19cf4a2dbfa0b90ecefd49bf","value":"ā€‡252/252ā€‡[00:00<00:00,ā€‡18.2kB/s]"}},"5526033b838949e7a20aab86444d5089":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5546d729b514472e9afb9eb9033244c6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"589b87ab91b64706aa322f3a8ad2b75b":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5910fc3156bc4195807785dea70d0561":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5992ffca6d004d93a36f27b1151e6cd7":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_9bd4c21f75664bbbb3bb7a2924a1696f","IPY_MODEL_aaa254dd30834039876d374499f2d5cd","IPY_MODEL_f785472a2e764df79c4bbe152b6b5c5f"],"layout":"IPY_MODEL_fa832662420b495c8ea2db00a2c76a21"}},"59b7c58882e34c9eb838c5cfce10eb80":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4ca494bf09284ce8925260eccb037538","placeholder":"ā€‹","style":"IPY_MODEL_5546d729b514472e9afb9eb9033244c6","value":"ā€‡349/349ā€‡[00:00<00:00,ā€‡23.1kB/s]"}},"5a7645ce55ac470ba893e84c68e80bdf":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ed4328bc291248bda181f59ecef041c9","IPY_MODEL_c5272bdccaef4fd2883893c757de3de0","IPY_MODEL_708bddc0849d4cf0b4b805d116175184"],"layout":"IPY_MODEL_aeb0c62dbdc244f39c1640702cde4349"}},"5bc2ccc11a33479da7b493da95447885":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5d3fee5286ae4b2e8b5f9584b3d8da9a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5d57068353544e5dad585b56c75cf541":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5ecae354be0f426291c81f19e2bfa1e1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_811eee9c6b1f40b0931cd979bdcc836b","placeholder":"ā€‹","style":"IPY_MODEL_542e25298f9c4dad842d513e580729f0","value":"ā€‡107/107ā€‡[00:00<00:00,ā€‡6.96kB/s]"}},"5f2b4972fe224537b6a488d7f1401bca":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"62a62a7f977e4e2db40caf04bae315e9":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6331d98114864e74b14c279a4fd1df6f":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6402cdd0e7b34f84b8c7fef34cff51d3":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"VBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"VBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"VBoxView","box_style":"","children":["IPY_MODEL_00afb28cca0243cab2cf00fed4ffb6e7","IPY_MODEL_43ae71619843486e8f2ec6ac5d1274b9","IPY_MODEL_b0d0f04d53674701bbda50dd8b11ba84","IPY_MODEL_350fd98bf967435fae8823f2c084f869"],"layout":"IPY_MODEL_13e502c9d36c4651a4b8b778f3abf147"}},"656ca2910be84392b13462e3596954e2":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"66201b27864c48a5ad4503f44aaf9952":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6689997c4e8a49b8acc7f1c8d9517be8":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3985a7dac27942f2b37c1162238af035","max":231508,"min":0,"orientation":"horizontal","style":"IPY_MODEL_95a6b04d532c4cca91b60c87440c808e","value":231508}},"696625b37847449790476516c37c72a9":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6aeb3dd342ee4678adf99dde015aeb7e":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6c8e86e4dc684183950cb72ec93bc132":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6cf62d35e3804c9cbc8a75e7b3acf190":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ea76a465b6c243269398134dbd294613","IPY_MODEL_ec02a444a9cf473284521bec097b9af3","IPY_MODEL_6e10b4829e3546bd926381c15ba8845a"],"layout":"IPY_MODEL_e83c6cb32f0b4c07953dfc068475b605"}},"6d167890f49042618bebe73a926ce6f2":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6d6f3c08bc6d4a7c922ce66d39500574":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6e10b4829e3546bd926381c15ba8845a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_da69983a92ab46329250fe027115cde1","placeholder":"ā€‹","style":"IPY_MODEL_9e8338c59c13450ca03b2b6c2fbb5263","value":"ā€‡1.34G/1.34Gā€‡[00:06<00:00,ā€‡241MB/s]"}},"6ef9fe1c9e9846d6a28ec690b4ccecb5":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6f16cf03eeeb464eb527db0e7b325d66":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2a3888c2c35548198bd6283d239553a0","placeholder":"ā€‹","style":"IPY_MODEL_6d167890f49042618bebe73a926ce6f2","value":"ā€‡80/80ā€‡[00:55<00:00,ā€‡ā€‡5.04s/it]"}},"708bddc0849d4cf0b4b805d116175184":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bc3e87e68ab743d9ac18d4aaea17d932","placeholder":"ā€‹","style":"IPY_MODEL_b944421a47d1473492378bd4b85294a5","value":"ā€‡1.38k/1.38kā€‡[00:00<00:00,ā€‡67.0kB/s]"}},"72ce3640cd81450791cf9a79a1998b70":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"792391b12f3b4cb39bba2cd04fe46adc":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"7a2cdeee19bb4e4dbeb7b0145e767a0a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"","description":"","description_tooltip":null,"layout":"IPY_MODEL_0b62e88a471b4f32911ec1d68311b04b","max":116,"min":0,"orientation":"horizontal","style":"IPY_MODEL_fdd0d11accee42a3b7d01f6135cf7c05","value":116}},"7e1918b2ea1a4a0eaf2af9eccbaba5fd":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"7ed4c1746eed4c0ab62c56486c047534":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"811eee9c6b1f40b0931cd979bdcc836b":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8154fb8e60f54e35b1951edfb94aeec0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_589b87ab91b64706aa322f3a8ad2b75b","placeholder":"ā€‹","style":"IPY_MODEL_f8bc69007ae54fb0a2eccfc93c83f41b","value":"ā€‡704/704ā€‡[00:00<00:00,ā€‡38.4kB/s]"}},"816eafcbfb7a4a9aa24510e8600c4306":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_66201b27864c48a5ad4503f44aaf9952","max":20,"min":0,"orientation":"horizontal","style":"IPY_MODEL_792391b12f3b4cb39bba2cd04fe46adc","value":20}},"838e5eca5cc64bae88398106e45bdc6e":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8695bb0563b648a188829e2e49560feb":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8759483ad1d94043ad524111b2e8be8f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"88739c574a6e402e90e11619f7359a4a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"88f58fdaf0184861949eabf842662ed0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"894077a1eae9487a9f4a34964099dfbb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_348eda1e99ed44b984fc42e11ceb5f22","placeholder":"ā€‹","style":"IPY_MODEL_6d6f3c08bc6d4a7c922ce66d39500574","value":"ā€‡232k/232kā€‡[00:00<00:00,ā€‡466kB/s]"}},"8949ccfc710348a6b8c59bbf49439253":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"89b6159fa92947a282f25d23fa64933f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_063b2fb3c1264175839c320cb20540e0","IPY_MODEL_2b5c7fdd9284455d850756a0c7e5a0ac","IPY_MODEL_8154fb8e60f54e35b1951edfb94aeec0"],"layout":"IPY_MODEL_12d52438ca3a45cd9b0b3906005309ea"}},"89df8c2b347e4ea9aaebe076ee6de362":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_838e5eca5cc64bae88398106e45bdc6e","placeholder":"ā€‹","style":"IPY_MODEL_f7ba980eac9242149908255d2ad11452","value":"ā€‡115/116ā€‡[00:06<00:00,ā€‡11.27it/s]"}},"8e0e6b9d1acf43ebbfb644f83d1ef1d0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8e3ced6e76274954a192160741786b4f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8eb8aa6a63944c66b71956fe70338e95":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"916a9a3a43e544a5b2eb68ee2eed680b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_696625b37847449790476516c37c72a9","placeholder":"ā€‹","style":"IPY_MODEL_88f58fdaf0184861949eabf842662ed0","value":"ā€‡712k/712kā€‡[00:00<00:00,ā€‡1.43MB/s]"}},"9214bf7d49d44c52a40d05cd8fb7587d":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"939f82c6638548eda8f92c7bc8164957":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9440a0e2c13f4102adf4c95da3a4ab77":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"95a6b04d532c4cca91b60c87440c808e":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"97a14f355075450fa5978b6f9c729419":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"97b7b8bac3724273b559f807f3e4b63a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"97e66c7442d544c4a650111b3f534cd2":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"98ccdf3dcfe74921ba5d58b52f4dcc9d":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"993af9b12eeb465097dd6ad2ec8f541a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_fd6b07049ffd4baea48d98bab00ae91e","IPY_MODEL_6689997c4e8a49b8acc7f1c8d9517be8","IPY_MODEL_894077a1eae9487a9f4a34964099dfbb"],"layout":"IPY_MODEL_13491622b3de4df880aabcf273d6e313"}},"993afbad55b0429fba66680948fc3cc8":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"99830a97df904732a0bc416633b3d9d6":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"99e3b3d042fd4b9aabd0c2f93801d644":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_28bbd69ad66b447a8fb29b5f5133d8b1","placeholder":"ā€‹","style":"IPY_MODEL_62a62a7f977e4e2db40caf04bae315e9","value":"tokenizer.json:ā€‡100%"}},"9bd4c21f75664bbbb3bb7a2924a1696f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_88739c574a6e402e90e11619f7359a4a","placeholder":"ā€‹","style":"IPY_MODEL_35006683857e4f48adb0d9f6249b666a","value":"1_Pooling/config.json:ā€‡100%"}},"9c5c7ecc51c84c86b7c212a501b9fac5":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9e8338c59c13450ca03b2b6c2fbb5263":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9f0bedc74ee2481fb88b093eb23a1458":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"PasswordModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"PasswordModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"PasswordView","continuous_update":true,"description":"Token:","description_tooltip":null,"disabled":false,"layout":"IPY_MODEL_97a14f355075450fa5978b6f9c729419","placeholder":"ā€‹","style":"IPY_MODEL_7e1918b2ea1a4a0eaf2af9eccbaba5fd","value":""}},"a312b44e0bb24934ad9e2e8ec7909b25":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_e89e9331e2534620bd83c71e653052ca","max":711649,"min":0,"orientation":"horizontal","style":"IPY_MODEL_a752d32fcf0046afa6f59d9aaeb19bea","value":711649}},"a35b5d59d9974d459ad3477f4faaba7f":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a35cc9dcd1944e0ba97f6b1369264cd1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_29d141be53b548ee81aede4ebe290928","placeholder":"ā€‹","style":"IPY_MODEL_acb7913c428942d2a58f138bfe4978c5","value":"ā€‡1.34G/1.34Gā€‡[01:21<00:00,ā€‡17.0MB/s]"}},"a418a3e6152b48ddbde6931ed96a704e":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a45be662c59d4faf892e6980b86deaba":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a4f06a72f6794a8b8d6d15aec09148c0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a649144966694ecf90f465e6e3814532":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6331d98114864e74b14c279a4fd1df6f","placeholder":"ā€‹","style":"IPY_MODEL_39f1cb879cda4bc69a04e40d78bc34fb","value":"model.safetensors:ā€‡100%"}},"a752d32fcf0046afa6f59d9aaeb19bea":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a98c940d9c164284a475129af41a3886":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"aaa254dd30834039876d374499f2d5cd":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5d3fee5286ae4b2e8b5f9584b3d8da9a","max":297,"min":0,"orientation":"horizontal","style":"IPY_MODEL_48224db4e593453a96129ce3b0aecf04","value":297}},"acb7913c428942d2a58f138bfe4978c5":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ad76dfdb5a6148f9aed2927225cd53e1":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"adff481f63e74e7d860be13e7457cea7":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"aeb0c62dbdc244f39c1640702cde4349":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b03f7f7e26ac47e2aa4af084c5e3e3bb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_fd7703d8bead4df285305942d7feeecb","max":84541,"min":0,"orientation":"horizontal","style":"IPY_MODEL_9440a0e2c13f4102adf4c95da3a4ab77","value":84541}},"b0d0f04d53674701bbda50dd8b11ba84":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"LabelModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"LabelModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"LabelView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9214bf7d49d44c52a40d05cd8fb7587d","placeholder":"ā€‹","style":"IPY_MODEL_0c6657312b014603824252f540fde744","value":"Your token has been saved to /root/.cache/huggingface/token"}},"b4109464f1834a7dae43f0116f6569fe":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b5b3fafd5f97403c8d8a473da84e9163":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b6e988e0b67046d79bc7d096c3f941aa":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ff44ce74e8a54a5fb6ca61606e6ae6f2","IPY_MODEL_b03f7f7e26ac47e2aa4af084c5e3e3bb","IPY_MODEL_1b556db40ab8459d88c689be257900d6"],"layout":"IPY_MODEL_fdc3c61e672a4de89d33b206acf5320b"}},"b8671fe776264827be1db6ab7ee4f4e4":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_f3269734f9414c13a2fa9723c2f35790","max":252,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b91018e2a75d4b029c61e611f8f364cb","value":252}},"b91018e2a75d4b029c61e611f8f364cb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b944421a47d1473492378bd4b85294a5":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b959b621ed974f7680cb91cb4119ae14":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1d2b5d5c680b41438f407102c6bc3283","placeholder":"ā€‹","style":"IPY_MODEL_205fefe263fa471d8d6037d509dd6488","value":"ā€‡80/80ā€‡[00:46<00:00,ā€‡ā€‡2.37s/it]"}},"ba9cb2ccc31941d8ad3e150949c17398":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bb6d31017c334cd995fd41e75eeb37fb":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bc3e87e68ab743d9ac18d4aaea17d932":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c486ad7a260640d5a1c45f518c4e0469":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c517665a69554416ac467b4b2867dedd":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c5272bdccaef4fd2883893c757de3de0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_fb505e3d077341e18d59d8bcc3eb5fe9","max":1381,"min":0,"orientation":"horizontal","style":"IPY_MODEL_8e3ced6e76274954a192160741786b4f","value":1381}},"c56c5d295dff457595c92c6b44e03834":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f2d6b5ae78d9470998d5d4cbf386f481","IPY_MODEL_7a2cdeee19bb4e4dbeb7b0145e767a0a","IPY_MODEL_89df8c2b347e4ea9aaebe076ee6de362"],"layout":"IPY_MODEL_cbab2ec2689a40be9fcce5108bd19d48"}},"c73de0e825ed452b985bbdee42619f1c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c9866613b8064082a2bd893b42842239":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f9f5ca2ad7fb470dad1aec1b10636936","placeholder":"ā€‹","style":"IPY_MODEL_4d0995487f764920a21c87d6d30708ae","value":"ā€‡20/20ā€‡[00:50<00:00,ā€‡ā€‡3.30s/it]"}},"cbab2ec2689a40be9fcce5108bd19d48":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":"hidden","width":null}},"cd1784eae89e451a8025750847ed6ef1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f5c2be5d46f04e6d824061b493d20b03","placeholder":"ā€‹","style":"IPY_MODEL_f5268fae9be749b7a05256473c9575c2","value":"sentence_bert_config.json:ā€‡100%"}},"cd8022aaa9404a10939337bd6e1c38bb":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_e7d91e070fbb4c59a7c83c1c16c89a3a","IPY_MODEL_099335f2c6cf4e7ca148121d38c42764","IPY_MODEL_59b7c58882e34c9eb838c5cfce10eb80"],"layout":"IPY_MODEL_ad76dfdb5a6148f9aed2927225cd53e1"}},"d8c3106084b54badb89e30c9b46f056c":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a35b5d59d9974d459ad3477f4faaba7f","placeholder":"ā€‹","style":"IPY_MODEL_0b7894e5c5d8482185a33cb72d69c041","value":"config_sentence_transformers.json:ā€‡100%"}},"da69983a92ab46329250fe027115cde1":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"db7636673d404cafb8de516908734ab9":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dc0460abcdb74448b491f23583ae4766":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dc88c7132d8e48eb9db8fd34a3b874f0":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_4fc26c7d321749ed92bc408d61483a46","IPY_MODEL_43a136e1e1304b638bd70f8a4329cdb1","IPY_MODEL_6f16cf03eeeb464eb527db0e7b325d66"],"layout":"IPY_MODEL_ba9cb2ccc31941d8ad3e150949c17398"}},"e622bf4802484e4180e47c25887d6826":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_2088a9863ab841008c09874e754ef8bd","IPY_MODEL_e6c325123e3d443d85cdbca446883658","IPY_MODEL_b959b621ed974f7680cb91cb4119ae14"],"layout":"IPY_MODEL_376d7493e3c947bfb522e9a6d34e40f6"}},"e6c325123e3d443d85cdbca446883658":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_adff481f63e74e7d860be13e7457cea7","max":80,"min":0,"orientation":"horizontal","style":"IPY_MODEL_a45be662c59d4faf892e6980b86deaba","value":80}},"e7d91e070fbb4c59a7c83c1c16c89a3a":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5f2b4972fe224537b6a488d7f1401bca","placeholder":"ā€‹","style":"IPY_MODEL_0ddef191449543a8983807ab48efa88b","value":"modules.json:ā€‡100%"}},"e83c6cb32f0b4c07953dfc068475b605":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e89e9331e2534620bd83c71e653052ca":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ea76a465b6c243269398134dbd294613":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a418a3e6152b48ddbde6931ed96a704e","placeholder":"ā€‹","style":"IPY_MODEL_8759483ad1d94043ad524111b2e8be8f","value":"model.safetensors:ā€‡100%"}},"eaded34e0186406ea1fb991b31f25a08":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ebe4c25cd7d84b18b04b05a997c4fe18":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ebed5481ee7d4833af40c86c9176c397":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_97b7b8bac3724273b559f807f3e4b63a","placeholder":"ā€‹","style":"IPY_MODEL_a4f06a72f6794a8b8d6d15aec09148c0","value":"ā€‡695/695ā€‡[00:00<00:00,ā€‡48.3kB/s]"}},"ec02a444a9cf473284521bec097b9af3":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_7ed4c1746eed4c0ab62c56486c047534","max":1336413848,"min":0,"orientation":"horizontal","style":"IPY_MODEL_a98c940d9c164284a475129af41a3886","value":1336413848}},"ec1866ccf43b49ee81c67b05fbcdf92a":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ed4328bc291248bda181f59ecef041c9":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c517665a69554416ac467b4b2867dedd","placeholder":"ā€‹","style":"IPY_MODEL_0ae4358a1bfc4c8bac06938b1cc5d677","value":"tokenizer_config.json:ā€‡100%"}},"edd40704fe314f3e9b1b4d469de4007c":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f1bf959afd114c0da5778504b3dfac3d":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f2d6b5ae78d9470998d5d4cbf386f481":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_16658c9880b84e44bb6c3a57d7009749","placeholder":"ā€‹","style":"IPY_MODEL_1899322b98174c8ca8b37849dd1dd746","value":"embeddingā€‡nodes:ā€‡ā€‡99%"}},"f3269734f9414c13a2fa9723c2f35790":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f5268fae9be749b7a05256473c9575c2":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f5c2be5d46f04e6d824061b493d20b03":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f785472a2e764df79c4bbe152b6b5c5f":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_27458abe93644581b06bc2e4b80f580d","placeholder":"ā€‹","style":"IPY_MODEL_3b81c55fb6c443dc96d8fc6342ceda84","value":"ā€‡297/297ā€‡[00:00<00:00,ā€‡21.8kB/s]"}},"f7ba980eac9242149908255d2ad11452":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f8bc69007ae54fb0a2eccfc93c83f41b":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"DescriptionStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f8d554b3702847f3a234a14ca0354bd0":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f9f5ca2ad7fb470dad1aec1b10636936":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fa832662420b495c8ea2db00a2c76a21":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fb505e3d077341e18d59d8bcc3eb5fe9":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fd6b07049ffd4baea48d98bab00ae91e":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_202ce0c76a614dbd82f0f64fc55618f8","placeholder":"ā€‹","style":"IPY_MODEL_b5b3fafd5f97403c8d8a473da84e9163","value":"vocab.txt:ā€‡100%"}},"fd7703d8bead4df285305942d7feeecb":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fdc3c61e672a4de89d33b206acf5320b":{"model_module":"@jupyter-widgets/base","model_module_version":"1.2.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"fdd0d11accee42a3b7d01f6135cf7c05":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ff44ce74e8a54a5fb6ca61606e6ae6f2":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3aff9c20986845fdaa38eb8fe8affddd","placeholder":"ā€‹","style":"IPY_MODEL_4586772f17524dee97f7b6c45f20667f","value":"README.md:ā€‡100%"}},"ff6e1c52ad0c406da08c604a934e7aa1":{"model_module":"@jupyter-widgets/controls","model_module_version":"1.5.0","model_name":"CheckboxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"CheckboxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"CheckboxView","description":"Add token as git credential?","description_tooltip":null,"disabled":false,"indent":true,"layout":"IPY_MODEL_c486ad7a260640d5a1c45f518c4e0469","style":"IPY_MODEL_9c5c7ecc51c84c86b7c212a501b9fac5","value":true}}}}},"nbformat":4,"nbformat_minor":0} diff --git a/Tasks/Task 4/task4-finetuning-os-embed.ipynb b/Tasks/Task 4/task4-finetuning-os-embed.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6f33c3a79159d26108fbd48e79ecbf8b6977aa29 --- /dev/null +++ b/Tasks/Task 4/task4-finetuning-os-embed.ipynb @@ -0,0 +1,2032 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " \u001b[1;31merror\u001b[0m: \u001b[1msubprocess-exited-with-error\u001b[0m\n", + " \n", + " \u001b[31mƗ\u001b[0m \u001b[32mBuilding wheel for grpcio-tools \u001b[0m\u001b[1;32m(\u001b[0m\u001b[32mpyproject.toml\u001b[0m\u001b[1;32m)\u001b[0m did not run successfully.\n", + " \u001b[31mā”‚\u001b[0m exit code: \u001b[1;36m1\u001b[0m\n", + " \u001b[31mā•°ā”€>\u001b[0m \u001b[31m[1154 lines of output]\u001b[0m\n", + " \u001b[31m \u001b[0m :30: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n", + " \u001b[31m \u001b[0m running bdist_wheel\n", + " \u001b[31m \u001b[0m running build\n", + " \u001b[31m \u001b[0m running build_py\n", + " \u001b[31m \u001b[0m creating build/lib.macosx-14.0-arm64-cpython-311/grpc_tools\n", + " \u001b[31m \u001b[0m copying grpc_tools/command.py -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools\n", + " \u001b[31m \u001b[0m copying grpc_tools/__init__.py -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools\n", + " \u001b[31m \u001b[0m copying grpc_tools/protoc.py -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools\n", + " \u001b[31m \u001b[0m creating build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/wrappers.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/type.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/timestamp.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/struct.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/source_context.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/field_mask.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/empty.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/duration.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/descriptor.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m creating build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf/compiler\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/compiler/plugin.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf/compiler\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/api.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m copying grpc_tools/_proto/google/protobuf/any.proto -> build/lib.macosx-14.0-arm64-cpython-311/grpc_tools/_proto/google/protobuf\n", + " \u001b[31m \u001b[0m running build_ext\n", + " \u001b[31m \u001b[0m building 'grpc_tools._protoc_compiler' extension\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/grpc_root/src/compiler\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/grpc_tools\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/js\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/php\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/python\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/ruby\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util\n", + " \u001b[31m \u001b[0m creating build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c grpc_root/src/compiler/python_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/grpc_root/src/compiler/python_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/any_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/any_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/command_line_interface.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_primitive_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_primitive_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_source_generator_base.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_enum.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/api.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/api.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_generator.cc:73:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < options.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc:89:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < canonical_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc:97:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < canonical_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc:129:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < aliases_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc:203:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < canonical_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc:377:32: warning: comparison of integers of different signs: 'size_type' (aka 'unsigned long') and 'int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (canonical_values_.size() != descriptor_->value_count()) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:109:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < msgs.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:127:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < service_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:154:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:157:23: warning: comparison of integers of different signs: 'int' and 'unsigned long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = 0; j < GOOGLE_ARRAYSIZE(kMacroNames); ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:164:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < names_to_undef.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:618:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < enum_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:634:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:655:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < enum_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:660:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:669:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < service_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:678:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < extension_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:689:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:741:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:752:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:764:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:811:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < file_data.size();) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:812:48: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = 0; j < kBytesPerLine && i < file_data.size(); ++i, ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:821:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < file_data.size(); i += kBytesPerLine) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:916:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:939:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:959:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:983:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1000:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_wrapper_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1073:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1087:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < classes.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1092:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < enums.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1324:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1336:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < enum_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1345:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < service_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1379:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1391:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < message_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.cc:1408:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < enum_generators_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:179:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parts.size() - 1; i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:689:16: warning: comparison of integers of different signs: 'size_t' (aka 'unsigned long') and 'int32_t' (aka 'int') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (; pos < source_annotation.end() && pos < insertion_content.size() - 1;\n", + " \u001b[31m \u001b[0m ~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:692:17: warning: comparison of integers of different signs: 'size_t' (aka 'unsigned long') and 'int32_t' (aka 'int') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (pos >= source_annotation.begin()) {\n", + " \u001b[31m \u001b[0m ~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:751:35: warning: comparison of integers of different signs: 'int32_t' (aka 'int') and 'size_t' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (source_annotation.begin() >= insertion_offset && !crossed_offset) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:858:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < data_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:1057:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < output_directives_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:1139:27: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parsed_files.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:1163:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < proto_path_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_helpers.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_helpers.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:1465:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < arguments.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:1470:17: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (i + 1 == arguments.size() || arguments[i + 1][0] == '-') {\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:1707:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parts.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2191:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parsed_files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2202:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < relative_output_filenames.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2225:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < output_filenames.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2227:11: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (i == output_filenames.size() - 1) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2268:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parsed_files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2424:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parsed_files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2435:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parsed_files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/command_line_interface.cc:2607:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < nested_messages.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_reflection_class.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_reflection_class.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 5 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_enum_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_enum_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_doc_comment.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_helpers.cc:148:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_helpers.cc:201:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/importer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/importer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/arena.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/arena.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_map_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_map_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_enum_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_enum_field_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_enum_field_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:141:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields->size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:179:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < aligned_to_1[f].size(); i += 4) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:181:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = i; j < aligned_to_1[f].size() && j < i + 4; ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:193:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < aligned_to_4[f].size(); i += 2) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:195:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = i; j < aligned_to_4[f].size() && j < i + 2; ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:198:13: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (i == aligned_to_4[f].size() - 1) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_padding_optimizer.cc:218:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < aligned_to_8[f].size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 19 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/arenastring.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/arenastring.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c grpc_tools/_protoc_compiler.cpp -o build/temp.macosx-14.0-arm64-cpython-311/grpc_tools/_protoc_compiler.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/importer.cc:401:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < mappings_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 32 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 7 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_parse_function_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_parse_function_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_context.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_context.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_message_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_message_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_enum_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_enum_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:8356:5: error: member access into incomplete type 'PyFrameObject' (aka '_frame')\n", + " \u001b[31m \u001b[0m __Pyx_PyFrame_SetLineNumber(py_frame, py_line);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:652:62: note: expanded from macro '__Pyx_PyFrame_SetLineNumber'\n", + " \u001b[31m \u001b[0m #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m /opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11/pytypedefs.h:22:16: note: forward declaration of '_frame'\n", + " \u001b[31m \u001b[0m typedef struct _frame PyFrameObject;\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9155:23: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9155:53: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9169:23: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m *type = exc_info->exc_type;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9171:21: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m *tb = exc_info->exc_traceback;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9185:26: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m tmp_type = exc_info->exc_type;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9187:24: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m tmp_tb = exc_info->exc_traceback;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9188:15: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m exc_info->exc_type = type;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9190:15: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m exc_info->exc_traceback = tb;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9211:26: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m tmp_type = exc_info->exc_type;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9213:24: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m tmp_tb = exc_info->exc_traceback;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9214:15: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m exc_info->exc_type = *type;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9216:15: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m exc_info->exc_traceback = *tb;\n", + " \u001b[31m \u001b[0m ~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9440:20: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m t = exc_state->exc_type;\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9442:21: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m tb = exc_state->exc_traceback;\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9443:16: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m exc_state->exc_type = NULL;\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9445:16: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m exc_state->exc_traceback = NULL;\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9522:20: error: no member named 'exc_type' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m if (exc_state->exc_type) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m grpc_tools/_protoc_compiler.cpp:9525:24: error: no member named 'exc_traceback' in '_err_stackitem'\n", + " \u001b[31m \u001b[0m if (exc_state->exc_traceback) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^\n", + " \u001b[31m \u001b[0m fatal error: too many errors emitted, stopping now [-ferror-limit=]\n", + " \u001b[31m \u001b[0m 20 errors generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_extension_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_extension_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/code_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/code_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc:90:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < options.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc:43:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_file.h:44:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.h:43:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.h:49:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/io/printer.h:90:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < path.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.cc:181:53: note: in instantiation of member function 'google::protobuf::io::AnnotationProtoCollector::AddAnnotation' requested here\n", + " \u001b[31m \u001b[0m io::AnnotationProtoCollector annotation_collector(\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_context.cc:132:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_context.cc:135:27: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = i + 1; j < fields.size(); ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_context.cc:154:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc:573:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields_by_number().size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc:716:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields_by_number().size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc:758:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields_by_number().size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_enum_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_doc_comment.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_doc_comment.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum_lite.cc:88:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < canonical_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum_lite.cc:111:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < aliases_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_enum_lite.cc:172:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < canonical_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_field_base.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_field_base.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/code_generator.cc:56:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/code_generator.cc:112:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parts.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_extension.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_extension.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc:261:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc:737:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < filename.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_helpers.cc:1166:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < scc->descriptors.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_doc_comment.cc:126:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < lines.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_field_base.cc:309:20: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for(int i = 0; i < text.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_name_resolver.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_name_resolver.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_extension.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_extension.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.cc:95:21: warning: comparison of integers of different signs: 'int' and 'unsigned long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < GOOGLE_ARRAYSIZE(kForbiddenWordList); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.cc:164:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.cc:562:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < text.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_file.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_file.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_string_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_string_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder.cc:225:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < map_fields.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder.cc:250:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < map_fields.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_kotlin_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_kotlin_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_map_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_map_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_file.cc:93:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_primitive_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_primitive_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_file.cc:45:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.h:43:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/io/printer.h:90:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < path.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_file.cc:696:53: note: in instantiation of member function 'google::protobuf::io::AnnotationProtoCollector::AddAnnotation' requested here\n", + " \u001b[31m \u001b[0m io::AnnotationProtoCollector annotation_collector(\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:190:21: warning: comparison of integers of different signs: 'int' and 'size_t' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < num_words; i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:216:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:258:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = 0; j < value.length(); j++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:665:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < name.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:755:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < worker.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1113:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < lines.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1167:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int len = 0; len < input->size(); ++len) {\n", + " \u001b[31m \u001b[0m ~~~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1180:14: warning: comparison of integers of different signs: 'int' and 'const size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (offset != StringPiece::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1201:14: warning: comparison of integers of different signs: 'int' and 'const size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (offset == StringPiece::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1398:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1606:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int y = 0; y < desired_output.size(); y++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1613:11: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (x >= input_for_decode.size()) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1625:9: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (x != input_for_decode.size()) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1911:14: warning: comparison of integers of different signs: 'int' and 'const size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (offset == StringPiece::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1922:16: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m while (start < proto_file_list.length()) {\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc:1924:16: warning: comparison of integers of different signs: 'int' and 'const size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (offset == StringPiece::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_extension.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_extension.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_string_field_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_string_field_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_kotlin_generator.cc:34:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.h:43:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/io/printer.h:90:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < path.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_kotlin_generator.cc:120:51: note: in instantiation of member function 'google::protobuf::io::AnnotationProtoCollector::AddAnnotation' requested here\n", + " \u001b[31m \u001b[0m io::AnnotationProtoCollector annotation_collector(\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_map_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_map_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_message_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_message_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 16 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_map_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_map_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_primitive_field_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_primitive_field_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/js/js_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_primitive_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_primitive_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/python/python_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:76:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < options.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:129:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < file_generators.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:131:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = 0; j < file_generators.size(); ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:138:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < file_generators.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:176:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < file_generators.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:188:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < all_files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:199:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < all_annotations.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:42:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.h:43:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/io/printer.h:90:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < path.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc:156:53: note: in instantiation of member function 'google::protobuf::io::AnnotationProtoCollector::AddAnnotation' requested here\n", + " \u001b[31m \u001b[0m io::AnnotationProtoCollector annotation_collector(\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m 8 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_generator_factory.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_generator_factory.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:265:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:284:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:299:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < words.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:313:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < words.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:330:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:345:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1228:40: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < type.size() && i < containing_type.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1228:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < type.size() && i < containing_type.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1232:29: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (type[i] == '.' && i >= package.size()) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc:109:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < tokens.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc:309:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < options.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc:425:24: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (last_dot_pos == std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1490:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < input.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc:540:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < top_level_enum_values.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc:869:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = 0; j < to_register.size(); ++j) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1539:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files_ordered.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1647:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1708:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1788:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:1831:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < fields.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3448:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < options.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3554:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3735:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3757:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < extensions.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3778:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3928:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_map_field_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_map_field_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/parser.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/parser.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:89:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < masks.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:523:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m while (limit_chunk_ < chunks_.size() && IsColdChunk(limit_chunk_)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_message_field_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_message_field_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:2191:15: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m (i == sorted.size() ||\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:2190:26: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (; extension_idx < sorted_extensions.size() &&\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:2203:11: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (i == sorted.size()) break;\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:2782:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < has_bit_indices_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:2968:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < optimized_order_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:3101:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < optimized_order_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:3267:41: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int chunk_index = 0; chunk_index < chunks.size(); chunk_index++) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:3457:25: warning: comparison of integers of different signs: 'int' and 'size_t' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < HasBitsSize(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:3468:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < optimized_order_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:3601:41: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int chunk_index = 0; chunk_index < chunks.size(); chunk_index++) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:4095:41: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m i < ordered_fields.size() || j < sorted_extensions.size();) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:4095:12: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m i < ordered_fields.size() || j < sorted_extensions.size();) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:4096:14: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if ((j == sorted_extensions.size()) ||\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:4376:41: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int chunk_index = 0; chunk_index < chunks.size(); chunk_index++) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_message.cc:4403:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int j = 0; j < chunk.size(); j++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:37:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/io/printer.h:90:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < path.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc:3595:51: note: in instantiation of member function 'google::protobuf::io::AnnotationProtoCollector::AddAnnotation' requested here\n", + " \u001b[31m \u001b[0m io::AnnotationProtoCollector annotation_collector(\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_file.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_file.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_service.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_service.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/descriptor.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/descriptor.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/parser.cc:161:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 1; i < name.length(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/parser.cc:506:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < detached_comments->size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message.cc:560:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < text_format_data_str.size(); i += kBytesPerLine) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message.cc:574:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < sorted_extensions.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 5 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/ruby/ruby_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/ruby/ruby_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_message_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_message_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/ruby/ruby_generator.cc:158:27: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < default_str.length(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/ruby/ruby_generator.cc:329:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < name.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_message.cc:625:44: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m i < descriptor_->field_count() || j < sorted_extensions.size();) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_message.cc:628:18: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m } else if (j == sorted_extensions.size()) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_message.cc:841:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < map_fields.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/php/php_generator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.cc:146:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < file_data.size(); i += kBytesPerLine) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.cc:186:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < dependencies.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/subprocess.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/subprocess.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.cc:37:\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.h:43:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/io/printer.h:90:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < path.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.cc:71:53: note: in instantiation of member function 'google::protobuf::io::AnnotationProtoCollector::AddAnnotation' requested here\n", + " \u001b[31m \u001b[0m io::AnnotationProtoCollector annotation_collector(\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_generator.cc:101:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < options.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_generator.cc:248:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < files.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/extension_set.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/extension_set.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_reflection.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_reflection.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 22 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/js/well_known_types_embed.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/js/well_known_types_embed.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_oneof.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_oneof.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/io_win32.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/io_win32.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/printer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/printer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/subprocess.cc:409:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (input_pos == input_data.size()) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:291:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < full_name.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:368:24: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m while (first_index != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:379:23: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (file_name_start == std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:401:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:414:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:425:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:571:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < name.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1224:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1244:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1317:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1329:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1428:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1437:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1548:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1556:17: warning: comparison of integers of different signs: 'int' and 'const typename basic_string, allocator>::size_type' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (lastindex != std::string::npos) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1660:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < lines.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/zip_writer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:401:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1307:26: note: in instantiation of function template specialization 'google::protobuf::compiler::php::(anonymous namespace)::GeneratedClassFileName' requested here\n", + " \u001b[31m \u001b[0m std::string filename = GeneratedClassFileName(en, options);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:414:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1271:26: note: in instantiation of function template specialization 'google::protobuf::compiler::php::(anonymous namespace)::LegacyGeneratedClassFileName' requested here\n", + " \u001b[31m \u001b[0m std::string filename = LegacyGeneratedClassFileName(desc, options);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1405:5: note: in instantiation of function template specialization 'google::protobuf::compiler::php::(anonymous namespace)::LegacyGenerateClassFile' requested here\n", + " \u001b[31m \u001b[0m LegacyGenerateClassFile(file, en, options, generator_context);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:401:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1418:26: note: in instantiation of function template specialization 'google::protobuf::compiler::php::(anonymous namespace)::GeneratedClassFileName' requested here\n", + " \u001b[31m \u001b[0m std::string filename = GeneratedClassFileName(message, options);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:414:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < result.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1271:26: note: in instantiation of function template specialization 'google::protobuf::compiler::php::(anonymous namespace)::LegacyGeneratedClassFileName' requested here\n", + " \u001b[31m \u001b[0m std::string filename = LegacyGeneratedClassFileName(desc, options);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc:1520:5: note: in instantiation of function template specialization 'google::protobuf::compiler::php::(anonymous namespace)::LegacyGenerateClassFile' requested here\n", + " \u001b[31m \u001b[0m LegacyGenerateClassFile(file, message, options, generator_context);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/descriptor_database.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/descriptor_database.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 17 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc:126:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < all_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc:176:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < all_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc:195:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < text_blob.size(); i += kBytesPerLine) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc:203:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < all_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc:244:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < base_values_.size(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/strtod.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/strtod.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc:97:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < buf.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/message_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/message_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/descriptor.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/descriptor.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 5 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/service.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/service.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/implicit_weak_message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/implicit_weak_message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/extension_set_heavy.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/extension_set_heavy.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_table_driven.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_table_driven.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/tokenizer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/tokenizer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/source_context.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/source_context.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 20 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/plugin.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/plugin.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/parse_context.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/parse_context.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/inlined_string_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/inlined_string_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/map.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/map.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/field_mask.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/field_mask.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/zero_copy_stream.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/zero_copy_stream.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/duration.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/duration.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/struct.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/struct.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/coded_stream.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/coded_stream.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/reflection_ops.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/reflection_ops.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/compiler/plugin.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/map_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/map_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/int128.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/int128.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_table_driven_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_table_driven_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_enum_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_enum_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/io/gzip_stream.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/io/gzip_stream.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/dynamic_message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/dynamic_message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/structurally_valid.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/structurally_valid.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/repeated_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/repeated_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/stubs/int128.cc:176:13: warning: comparison of integers of different signs: 'std::streamsize' (aka 'long') and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (width > rep.size()) {\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/timestamp.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/timestamp.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/status.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/status.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/bytestream.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/bytestream.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/message.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/message.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_bases.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_bases.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/strutil.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/strutil.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/statusor.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/statusor.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/empty.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/empty.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/common.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/common.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/type.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/type.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_tctable_full.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_tctable_full.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/stubs/strutil.cc:506:11: warning: 'sprintf' is deprecated: This function is provided for compatibility reasons only. Due to security concerns inherent in the design of sprintf(3), it is highly recommended that you use snprintf(3) instead. [-Wdeprecated-declarations]\n", + " \u001b[31m \u001b[0m sprintf(dest + used, (use_hex ? \"\\\\x%02x\" : \"\\\\%03o\"),\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk/usr/include/stdio.h:180:1: note: 'sprintf' has been explicitly marked deprecated here\n", + " \u001b[31m \u001b[0m __deprecated_msg(\"This function is provided for compatibility reasons only. Due to security concerns inherent in the design of sprintf(3), it is highly recommended that you use snprintf(3) instead.\")\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk/usr/include/sys/cdefs.h:218:48: note: expanded from macro '__deprecated_msg'\n", + " \u001b[31m \u001b[0m #define __deprecated_msg(_msg) __attribute__((__deprecated__(_msg)))\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/field_mask_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/field_mask_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/repeated_ptr_field.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/repeated_ptr_field.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/substitute.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/substitute.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/stringpiece.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/stringpiece.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/json_escaping.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/protostream_objectsource.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/protostream_objectsource.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/generated_message_tctable_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/generated_message_tctable_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/message_differencer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/message_differencer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/wrappers.pb.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/wrappers.pb.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/field_mask_util.cc:369:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < parts.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/field_mask_util.cc:375:12: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m i != parts.size() - 1)) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc:182:33: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m while (*num_left > 0 && index < str.size()) {\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc:328:16: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m } while (i < str.length()); // case iv\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_escaping.cc:352:21: warning: comparison of integers of different signs: 'int' and 'const size_t' (aka 'const unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < len; i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/time.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/time.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/stubs/stringprintf.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/stubs/stringprintf.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/unknown_field_set.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/unknown_field_set.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/datapiece.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.cc:31:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.h:105:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 1; i < indent_string.length(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/stubs/stringprintf.cc:152:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < v.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/stubs/stringprintf.cc:155:28: warning: comparison of integers of different signs: 'int' and 'unsigned long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = v.size(); i < GOOGLE_ARRAYSIZE(cstr); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/text_format.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/text_format.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/delimited_message_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/delimited_message_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:57:36: warning: comparison of integers of different signs: 'unsigned int' and 'int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m MathUtil::Sign(before) == MathUtil::Sign(after)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:75:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::ValidateNumberConversion' requested here\n", + " \u001b[31m \u001b[0m return ValidateNumberConversion(after, before);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:339:14: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::NumberConvertAndCheck' requested here\n", + " \u001b[31m \u001b[0m return NumberConvertAndCheck(u32_);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:122:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::DataPiece::GenericConvert' requested here\n", + " \u001b[31m \u001b[0m return GenericConvert();\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:56:13: warning: comparison of integers of different signs: 'int' and 'unsigned int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (after == before &&\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:57:36: warning: comparison of integers of different signs: 'unsigned long long' and 'int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m MathUtil::Sign(before) == MathUtil::Sign(after)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:75:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::ValidateNumberConversion' requested here\n", + " \u001b[31m \u001b[0m return ValidateNumberConversion(after, before);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:341:14: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::NumberConvertAndCheck' requested here\n", + " \u001b[31m \u001b[0m return NumberConvertAndCheck(u64_);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:122:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::DataPiece::GenericConvert' requested here\n", + " \u001b[31m \u001b[0m return GenericConvert();\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:56:13: warning: comparison of integers of different signs: 'int' and 'unsigned long long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (after == before &&\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:57:36: warning: comparison of integers of different signs: 'int' and 'unsigned int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m MathUtil::Sign(before) == MathUtil::Sign(after)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:75:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::ValidateNumberConversion' requested here\n", + " \u001b[31m \u001b[0m return ValidateNumberConversion(after, before);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:335:14: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::NumberConvertAndCheck' requested here\n", + " \u001b[31m \u001b[0m return NumberConvertAndCheck(i32_);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:135:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::DataPiece::GenericConvert' requested here\n", + " \u001b[31m \u001b[0m return GenericConvert();\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:56:13: warning: comparison of integers of different signs: 'unsigned int' and 'int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (after == before &&\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:57:36: warning: comparison of integers of different signs: 'unsigned long long' and 'long long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m MathUtil::Sign(before) == MathUtil::Sign(after)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:75:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::ValidateNumberConversion' requested here\n", + " \u001b[31m \u001b[0m return ValidateNumberConversion(after, before);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:341:14: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::NumberConvertAndCheck' requested here\n", + " \u001b[31m \u001b[0m return NumberConvertAndCheck(u64_);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:148:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::DataPiece::GenericConvert' requested here\n", + " \u001b[31m \u001b[0m return GenericConvert();\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:56:13: warning: comparison of integers of different signs: 'long long' and 'unsigned long long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (after == before &&\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:57:36: warning: comparison of integers of different signs: 'int' and 'unsigned long long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m MathUtil::Sign(before) == MathUtil::Sign(after)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:75:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::ValidateNumberConversion' requested here\n", + " \u001b[31m \u001b[0m return ValidateNumberConversion(after, before);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:335:14: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::NumberConvertAndCheck' requested here\n", + " \u001b[31m \u001b[0m return NumberConvertAndCheck(i32_);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:161:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::DataPiece::GenericConvert' requested here\n", + " \u001b[31m \u001b[0m return GenericConvert();\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:56:13: warning: comparison of integers of different signs: 'unsigned long long' and 'int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (after == before &&\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:57:36: warning: comparison of integers of different signs: 'long long' and 'unsigned long long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m MathUtil::Sign(before) == MathUtil::Sign(after)) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:75:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::ValidateNumberConversion' requested here\n", + " \u001b[31m \u001b[0m return ValidateNumberConversion(after, before);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:337:14: note: in instantiation of function template specialization 'google::protobuf::util::converter::(anonymous namespace)::NumberConvertAndCheck' requested here\n", + " \u001b[31m \u001b[0m return NumberConvertAndCheck(i64_);\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:161:10: note: in instantiation of function template specialization 'google::protobuf::util::converter::DataPiece::GenericConvert' requested here\n", + " \u001b[31m \u001b[0m return GenericConvert();\n", + " \u001b[31m \u001b[0m ^\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/datapiece.cc:56:13: warning: comparison of integers of different signs: 'unsigned long long' and 'long long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (after == before &&\n", + " \u001b[31m \u001b[0m ~~~~~ ^ ~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc:87:23: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (n_valid_bytes == str.size()) {\n", + " \u001b[31m \u001b[0m ~~~~~~~~~~~~~ ^ ~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc:102:14: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (; len < input->size(); ++len) {\n", + " \u001b[31m \u001b[0m ~~~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc:117:14: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (; len < input->size(); ++len) {\n", + " \u001b[31m \u001b[0m ~~~ ^ ~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc:963:12: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (size >= kKeywordTrue.length() &&\n", + " \u001b[31m \u001b[0m ~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc:967:12: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (size >= kKeywordFalse.length() &&\n", + " \u001b[31m \u001b[0m ~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_stream_parser.cc:971:12: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (size >= kKeywordNull.length() &&\n", + " \u001b[31m \u001b[0m ~~~~ ^ ~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/protostream_objectwriter.cc:380:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < uninterpreted_events_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/field_comparator.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/field_comparator.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 12 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 6 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/object_writer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/object_writer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/time_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/time_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/type_info.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/type_info.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc:31:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.h:157:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < children_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc:315:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < children_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.cc:392:21: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < children_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/proto_writer.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/proto_writer.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/type_resolver_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/type_resolver_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/error_listener.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/error_listener.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/proto_writer.cc:410:16: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m while (i < name.size() &&\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/proto_writer.cc:413:22: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (i > 0 && i == name.size()) { // safe field name\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/utility.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/utility.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/internal/field_mask_utility.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/internal/field_mask_utility.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 2 warnings generated.\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/utility.cc:348:21: warning: comparison of integers of different signs: 'int' and 'unsigned long' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < GOOGLE_ARRAYSIZE(well_known_types_name_array_); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/wire_format.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/wire_format.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m 1 warning generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/util/json_util.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/util/json_util.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/util/json_util.cc:37:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/default_value_objectwriter.h:157:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 0; i < children_.size(); ++i) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m In file included from third_party/protobuf/src/google/protobuf/util/json_util.cc:39:\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/internal/json_objectwriter.h:105:25: warning: comparison of integers of different signs: 'int' and 'size_type' (aka 'unsigned long') [-Wsign-compare]\n", + " \u001b[31m \u001b[0m for (int i = 1; i < indent_string.length(); i++) {\n", + " \u001b[31m \u001b[0m ~ ^ ~~~~~~~~~~~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m third_party/protobuf/src/google/protobuf/util/json_util.cc:67:13: warning: comparison of integers of different signs: 'size_t' (aka 'unsigned long') and 'int' [-Wsign-compare]\n", + " \u001b[31m \u001b[0m if (len <= buffer_size_) {\n", + " \u001b[31m \u001b[0m ~~~ ^ ~~~~~~~~~~~~\n", + " \u001b[31m \u001b[0m 3 warnings generated.\n", + " \u001b[31m \u001b[0m clang++ -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -fwrapv -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX14.sdk -DHAVE_PTHREAD=1 -I. -Igrpc_root -Igrpc_root/include -Ithird_party/protobuf/src -I/Users/jeevan/Documents/Learnings/ai-engineering-bootcamp/AIE4/mid-term/ai-safety-chatty/.venv/include -I/opt/homebrew/opt/python@3.11/Frameworks/Python.framework/Versions/3.11/include/python3.11 -c third_party/protobuf/src/google/protobuf/wire_format_lite.cc -o build/temp.macosx-14.0-arm64-cpython-311/third_party/protobuf/src/google/protobuf/wire_format_lite.o -std=c++14 -fno-wrapv -frtti\n", + " \u001b[31m \u001b[0m error: command '/usr/bin/clang++' failed with exit code 1\n", + " \u001b[31m \u001b[0m \u001b[31m[end of output]\u001b[0m\n", + " \n", + " \u001b[1;35mnote\u001b[0m: This error originates from a subprocess, and is likely not a problem with pip.\n", + "\u001b[31m ERROR: Failed building wheel for grpcio-tools\u001b[0m\u001b[31m\n", + "\u001b[0m\u001b[31mERROR: ERROR: Failed to build installable wheels for some pyproject.toml based projects (grpcio-tools)\u001b[0m\u001b[31m\n", + "\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.1.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -qU \\\n", + " langsmith==0.1.125 \\\n", + " langchain_openai \\\n", + " langchain_huggingface \\\n", + " langchain-core==0.2.41 \\\n", + " langchain \\\n", + " langchain_community \\\n", + " langchain-qdrant==0.1.4 \\\n", + " langchain-text-splitters \\\n", + " langchain-openai \\\n", + " langchain_huggingface \\\n", + " faiss-cpu \\\n", + " langchain-experimental \\\n", + " unstructured==0.15.7 \\\n", + " python-pptx==1.0.2 \\\n", + " nltk==3.9.1 \\\n", + " PyMuPDF==1.24.10 \\\n", + " ragas==0.1.18 \\\n", + " protobuf==3.20.3 \\\n", + " pyarrow==14.0.1 \\\n", + " fsspec==2024.6.1 \\\n", + " sentence_transformers \\\n", + " datasets \\\n", + " pyarrow==14.0.1\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "from uuid import uuid4\n", + "\n", + "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", + "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangChain API Key:\")\n", + "\n", + "os.environ[\"LANGCHAIN_PROJECT\"] = \"AIM-SDG-MidTerm - AI Safety\"\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", + "\n", + "os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")\n", + "os.environ[\"QDRANT_URL\"] = getpass.getpass(\"Enter Your Qdrant URL: \")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preparing Training documents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_experimental.text_splitter import SemanticChunker\n", + "from enum import Enum\n", + "from typing import List\n", + "from langchain_community.document_loaders import PyMuPDFLoader\n", + "from langchain_core.documents import Document\n", + "import asyncio\n", + "\n", + "class PDFLoaderWrapper():\n", + " class LoaderType(str, Enum):\n", + " PYMUPDF = \"pymupdf\"\n", + "\n", + " def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n", + " self.file_path = file_path if isinstance(file_path, list) else [file_path]\n", + " self.loader_type = loader_type\n", + "\n", + " async def aload(self) -> List[Document]:\n", + " all_docs = []\n", + " for file_path in self.file_path:\n", + " if self.loader_type == self.LoaderType.PYMUPDF:\n", + " try:\n", + " loader = PyMuPDFLoader(file_path)\n", + " docs = await loader.aload()\n", + " all_docs.extend(docs)\n", + " except Exception as e:\n", + " print(f\"Error loading file {file_path}: {e}\")\n", + " continue\n", + " return all_docs\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n", + "NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n", + "SMALL_DOC = \"https://arxiv.org/pdf/1908.10084\"\n", + "documents_to_preload = [\n", + " BOR_FILE_PATH,\n", + " NIST_FILE_PATH\n", + " # SMALL_DOC\n", + "]\n", + "\n", + "pdf_loader = PDFLoaderWrapper(\n", + " documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n", + ")\n", + "documents = await pdf_loader.aload()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "\n", + "text_splitter = RecursiveCharacterTextSplitter(\n", + " chunk_size = 1024,\n", + " chunk_overlap = 50,\n", + " length_function = len\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "training_documents = text_splitter.split_documents(documents.load())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import uuid\n", + "\n", + "id_set = set()\n", + "\n", + "for document in training_documents:\n", + " id = str(uuid.uuid4())\n", + " while id in id_set:\n", + " id = uuid.uuid4()\n", + " id_set.add(id)\n", + " document.metadata[\"id\"] = id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "\n", + "total_documents = len(training_documents)\n", + "\n", + "# Define the split percentages\n", + "train_percent = 0.75 # 75% for training\n", + "val_percent = 0.125 # 12.5% for validation\n", + "test_percent = 0.125 # 12.5% for testing\n", + "\n", + "# Shuffle the documents\n", + "random.shuffle(training_documents)\n", + "\n", + "# Calculate the split indices\n", + "train_split = int(total_documents * train_percent)\n", + "val_split = int(total_documents * (train_percent + val_percent))\n", + "\n", + "# Split the documents\n", + "training_split_documents = training_documents[:train_split]\n", + "val_split_documents = training_documents[train_split:val_split]\n", + "test_split_documents = training_documents[val_split:]\n", + "\n", + "print(f\"Training set: {len(training_split_documents)} documents\")\n", + "print(f\"Validation set: {len(val_split_documents)} documents\")\n", + "print(f\"Test set: {len(test_split_documents)} documents\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Constructing a Fine-Tuning dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "\n", + "qa_chat_model = ChatOpenAI(\n", + " model=\"gpt-4o\",\n", + " temperature=0\n", + ")\n", + "\n", + "qa_prompt = \"\"\"\\\n", + "Given the following context, you must generate questions based on only the provided context.\n", + "Check internet the question that you generate is realistic and asked by online users and \n", + "include only such questions in the output to be realistic.\n", + "You are to generate {n_questions} questions which should be provided in the following format:\n", + "\n", + "1. QUESTION #1\n", + "2. QUESTION #2\n", + "...\n", + "\n", + "Context:\n", + "{context}\n", + "\"\"\"\n", + "\n", + "qa_prompt_template = ChatPromptTemplate.from_template(qa_prompt)\n", + "\n", + "question_generation_chain = qa_prompt_template | qa_chat_model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tqdm\n", + "def create_questions(documents, n_questions):\n", + " questions = {}\n", + " relevant_docs = {}\n", + " for document in tqdm.tqdm(documents):\n", + " document_content = {\"context\" : document.page_content, \"questions\" : []}\n", + " questions_generated = question_generation_chain.invoke({\"context\": document.page_content, \"n_questions\": n_questions})\n", + " for question in questions_generated.content.split(\"\\n\"):\n", + " question_id = str(uuid.uuid4())\n", + " questions[question_id] = \"\".join(question.split(\".\")[1:]).strip()\n", + " relevant_docs[question_id] = [document.metadata[\"id\"]]\n", + " return questions, relevant_docs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "training_questions, training_relevant_contexts = create_questions(training_split_documents,10)\n", + "len(training_questions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "val_questions, val_relevant_contexts = create_questions(val_split_documents,5)\n", + "len(val_questions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_questions, test_relevant_contexts = create_questions(test_split_documents,5)\n", + "len(test_questions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "training_corpus = {train_item.metadata[\"id\"] : train_item.page_content for train_item in training_split_documents}\n", + "\n", + "train_dataset = {\n", + " \"questions\" : training_questions,\n", + " \"relevant_contexts\" : training_relevant_contexts,\n", + " \"corpus\" : training_corpus\n", + "}\n", + "\n", + "with open(\"training_dataset.jsonl\", \"w\") as f:\n", + " json.dump(train_dataset, f)\n", + "\n", + "val_corpus = {val_item.metadata[\"id\"] : val_item.page_content for val_item in val_split_documents}\n", + "\n", + "val_dataset = {\n", + " \"questions\" : val_questions,\n", + " \"relevant_contexts\" : val_relevant_contexts,\n", + " \"corpus\" : val_corpus\n", + "}\n", + "\n", + "with open(\"val_dataset.jsonl\", \"w\") as f:\n", + " json.dump(val_dataset, f)\n", + "\n", + "train_corpus = {test_item.metadata[\"id\"] : test_item.page_content for test_item in test_split_documents}\n", + "\n", + "test_dataset = {\n", + " \"questions\" : test_questions,\n", + " \"relevant_contexts\" : test_relevant_contexts,\n", + " \"corpus\" : train_corpus\n", + "}\n", + "\n", + "with open(\"test_dataset.jsonl\", \"w\") as f:\n", + " json.dump(test_dataset, f)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fine-tuning `Snowflake/snowflake-arctic-embed-l`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sentence_transformers import SentenceTransformer\n", + "from torch.utils.data import DataLoader\n", + "from torch.utils.data import Dataset\n", + "from sentence_transformers import InputExample\n", + "\n", + "model_id = \"Snowflake/snowflake-arctic-embed-l\"\n", + "model = SentenceTransformer(model_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sentence_transformers.losses import MatryoshkaLoss, MultipleNegativesRankingLoss\n", + "\n", + "corpus = train_dataset['corpus']\n", + "queries = train_dataset['questions']\n", + "relevant_docs = train_dataset['relevant_contexts']\n", + "\n", + "examples = []\n", + "for query_id, query in queries.items():\n", + " doc_id = relevant_docs[query_id][0]\n", + " text = corpus[doc_id]\n", + " example = InputExample(texts=[query, text])\n", + " examples.append(example)\n", + "\n", + "# tourch DataLoader\n", + "\n", + "loader = DataLoader(\n", + " examples\n", + ")\n", + "\n", + "# Using MultipleNegativesRankingLoss and MartyoshkaLoss for training\n", + "\n", + "matryoshka_dimensions = [768, 512, 256, 128, 64]\n", + "inner_train_loss = MultipleNegativesRankingLoss(model)\n", + "train_loss = MatryoshkaLoss(\n", + " model, inner_train_loss, matryoshka_dims=matryoshka_dimensions\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sentence_transformers.evaluation import InformationRetrievalEvaluator\n", + "\n", + "corpus = val_dataset['corpus']\n", + "queries = val_dataset['questions']\n", + "relevant_docs = val_dataset['relevant_contexts']\n", + "\n", + "evaluator = InformationRetrievalEvaluator(queries, corpus, relevant_docs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "EPOCHS = 5" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "warmup_steps = int(len(loader) * EPOCHS * 0.1)\n", + "\n", + "model.fit(\n", + " train_objectives=[(loader, train_loss)],\n", + " epochs=EPOCHS,\n", + " warmup_steps=warmup_steps,\n", + " output_path='finetuned_arctic',\n", + " show_progress_bar=True,\n", + " evaluator=evaluator,\n", + " evaluation_steps=50,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai.embeddings import OpenAIEmbeddings\n", + "from langchain_core.documents import Document\n", + "\n", + "def evaluate_openai(\n", + " dataset,\n", + " embed_model,\n", + " top_k=5,\n", + " verbose=False,\n", + "):\n", + " corpus = dataset['corpus']\n", + " questions = dataset['questions']\n", + " relevant_docs = dataset['relevant_contexts']\n", + " documents = [Document(page_content=content, metadata={\"id\": doc_id}) for doc_id, content in corpus.items()]\n", + " vectorstore = FAISS.from_documents(documents, embed_model)\n", + "\n", + " retriever = vectorstore.as_retriever(search_kwargs={\"k\": top_k})\n", + "\n", + " eval_results = []\n", + " for id, question in tqdm.tqdm(questions.items()):\n", + " retrieved_nodes = retriever.invoke(question)\n", + " retrieved_ids = [node.metadata[\"id\"] for node in retrieved_nodes]\n", + " expected_id = relevant_docs[id][0]\n", + " is_hit = expected_id in retrieved_ids\n", + " eval_results.append({\"id\": id, \"question\": question, \"expected_id\": expected_id, \"is_hit\": is_hit})\n", + "\n", + " return eval_results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are taking 3 models for comparison\n", + "1. text-embedding-3-small\n", + "2. Snowflake/snowflake-arctic-embed-l(base)\n", + "3. Snowflake/snowflake-arctic-embed-l(fine-tuned)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Evaluating `text-embedding-3-small`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "te3_openai = OpenAIEmbeddings(model=\"text-embedding-3-small\")\n", + "te3_results = evaluate_openai(test_dataset, te3_openai)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "te3_results_df = pd.DataFrame(te3_results)\n", + "te3_hit_rate = te3_results_df[\"is_hit\"].mean()\n", + "te3_hit_rate" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Evaluating `Snowflake/snowflage-arctic-embed-l`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_huggingface import HuggingFaceEmbeddings\n", + "\n", + "huggingface_embeddings = HuggingFaceEmbeddings(model_name=\"Snowflake/snowflake-arctic-embed-l\")\n", + "arctic_embed_m_results = evaluate_openai(test_dataset, huggingface_embeddings)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "arctic_embed_m_results_df = pd.DataFrame(arctic_embed_m_results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "arctic_embed_m_hit_rate = arctic_embed_m_results_df[\"is_hit\"].mean()\n", + "arctic_embed_m_hit_rate" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Evaluating `Snowflake/snowflage-arctic-embed-l` (fine-tuned)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "finetune_embeddings = HuggingFaceEmbeddings(model_name=\"finetuned_arctic\")\n", + "finetune_results = evaluate_openai(test_dataset, finetune_embeddings)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "finetune_results_df = pd.DataFrame(finetune_results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "finetune_hit_rate = finetune_results_df[\"is_hit\"].mean()\n", + "finetune_hit_rate" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Checking the models with rag" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.vectorstores import FAISS\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from operator import itemgetter\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", + "\n", + "# using the training_documents which already loaded as split documents\n", + "def create_rag_chain(huggingface_embeddings):\n", + "\n", + " vectorstore = FAISS.from_documents(training_documents, huggingface_embeddings)\n", + " retriever = vectorstore.as_retriever(search_kwargs={\"k\": 6})\n", + "\n", + " RAG_PROMPT = \"\"\"\\\n", + " Given a provided context and a question, you must answer the question. If you do not know the answer, you must state that you do not know.\n", + "\n", + " Context:\n", + " {context}\n", + "\n", + " Question:\n", + " {question}\n", + "\n", + " Answer:\n", + " \"\"\"\n", + "\n", + " rag_prompt_template = ChatPromptTemplate.from_template(RAG_PROMPT)\n", + "\n", + " rag_llm = ChatOpenAI(\n", + " model=\"gpt-4o\",\n", + " temperature=0\n", + " )\n", + "\n", + " rag_chain = (\n", + " {\"context\": itemgetter(\"question\") | retriever, \"question\": itemgetter(\"question\")}\n", + " | RunnablePassthrough.assign(context=itemgetter(\"context\"))\n", + " | {\"response\": rag_prompt_template | rag_llm | StrOutputParser(), \"context\": itemgetter(\"context\")}\n", + " )\n", + " return rag_chain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_rag_chain = create_rag_chain(huggingface_embeddings)\n", + "fine_tuned_rag_chain = create_rag_chain(finetune_embeddings)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Try some questions here" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RAGAS Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from ragas.testset.generator import TestsetGenerator\n", + "from ragas.testset.evolutions import simple, reasoning, multi_context\n", + "from langchain_openai import OpenAIEmbeddings\n", + "\n", + "generator_llm = ChatOpenAI(model=\"gpt-4o\")\n", + "critic_llm = ChatOpenAI(model=\"gpt-4o\")\n", + "embeddings = OpenAIEmbeddings()\n", + "\n", + "generator = TestsetGenerator.from_langchain(\n", + " generator_llm,\n", + " critic_llm,\n", + " embeddings\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# We are going to use the test_split_documents that we created earlier\n", + "testset = generator.generate_with_langchain_docs(test_split_documents, test_size=20, distributions={simple: 0.5, reasoning: 0.25, multi_context: 0.25})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testset.to_pandas().head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datasets import Dataset\n", + "\n", + "def generate_answers(chain, testset):\n", + " answers = []\n", + " contexts = []\n", + " questions = testset.to_pandas()[\"question\"].values.tolist()\n", + " ground_truths = testset.to_pandas()[\"ground_truth\"].values.tolist()\n", + "\n", + " for question in tqdm.tqdm(questions):\n", + " answer = chain.invoke({\"question\" : question})\n", + " answers.append(answer[\"response\"])\n", + " contexts.append([context.page_content for context in answer[\"context\"]])\n", + "\n", + " return Dataset.from_dict({\n", + " \"question\" : questions,\n", + " \"answer\" : answers,\n", + " \"contexts\" : contexts,\n", + " \"ground_truth\" : ground_truths\n", + " })" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_dataset = generate_answers(base_rag_chain, testset)\n", + "finetune_dataset = generate_answers(fine_tuned_rag_chain, testset)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from ragas.metrics import (\n", + " faithfulness,\n", + " answer_relevancy,\n", + " context_recall,\n", + " context_precision,\n", + ")\n", + "from ragas import evaluate\n", + "\n", + "base_result = evaluate(\n", + " base_dataset,\n", + " metrics=[\n", + " faithfulness,\n", + " answer_relevancy,\n", + " context_recall,\n", + " context_precision,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "base_result.to_pandas().head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fine_tuned_result = evaluate(\n", + " finetune_dataset,\n", + " metrics=[\n", + " faithfulness,\n", + " answer_relevancy,\n", + " context_recall,\n", + " context_precision,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fine_tuned_result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fine_tuned_result.to_pandas().head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Merge base_result and fine_tuned_result to compare the results\n", + "merged_result = base_result.merge(fine_tuned_result, on=\"question\")\n", + "\n", + "merged_result.to_pandas().head()\n", + "\n", + "# Display graphs for the merged results\n", + "merged_result.plot()\n", + "\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/Task 4/test_dataset (2).jsonl b/Tasks/Task 4/test_dataset (2).jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9630134677c51d49b277e44b21e1f72820d2f256 --- /dev/null +++ b/Tasks/Task 4/test_dataset (2).jsonl @@ -0,0 +1 @@ +{"questions": {"a188ff31-68db-46bc-b419-ac0fdecc6b1b": "What is data poisoning in the context of GAI and how does it affect model outputs?", "c9f79a82-8775-4461-ac2f-b7b4e8734e96": "How do GAI systems pose intellectual property risks related to copyrighted material in their training data?", "80b370f2-6d21-4ff6-bc9b-763317af5321": "How can governance structures help mitigate bias in human-based systems?", "d4e20011-f4f9-4de6-8453-8144d77ba3a7": "What are some common concerns associated with human-based systems in terms of bias and effectiveness?", "12534c5f-eb68-41cb-a879-b908e95a65d0": "What are the best practices for establishing policies for data collection and retention to mitigate risks such as data privacy breaches and harmful biases?", "11951856-b35d-4123-ab15-80c38cd82195": "How can organizations effectively implement policies to protect third-party intellectual property and training data?", "869c7e52-2b77-4144-af5f-d5beebc138f9": "What are the key safeguards that should be included in automated systems to protect the public from harm?", "a310a614-4bd4-4302-ab42-37b6faa7d838": "How can early-stage public consultation improve the safety and effectiveness of automated systems?", "468bff4b-cdbb-489a-9cbd-95d9062f93dc": "What are some of the key investigative projects that Surya Mattu has worked on at The Markup?", "3e5b4545-4ceb-424f-b519-16c3870dd543": "How has Mariah Montgomery's role as National Campaign Director at the Partnership for Working Families impacted labor rights and policies?", "0f0eea1d-f05e-458e-b31a-e955629c7e44": "How can organizations effectively integrate pre- and post-deployment feedback into their monitoring processes for GAI models?", "6cc0bcc5-9c82-49a6-b118-3e333e70ee9e": "What are the benefits of using AI red-teaming in the pre-deployment testing phase for capturing external feedback?", "4441faa1-8f27-4fc7-bea0-847caa1c1505": "What are the potential negative impacts of automated systems on individuals and communities?", "0db8fdee-99cf-47c6-9d8d-a85f3b294826": "How can confirmation bias affect the effectiveness of safety mechanisms in technology?", "57db460e-0123-4edf-b7df-87a967a60c26": "What are the key safety metrics used to evaluate AI system reliability and robustness?", "48589831-4f3c-4bf6-9cb4-bc4277c489dd": "How can AI systems be designed to fail safely when operating beyond their knowledge limits?", "1df11168-7aa5-4b43-91df-c14c32f01440": "What are the risks associated with data brokers collecting consumer data without permission?", "2127b35f-68cd-4e5f-a669-a6a4bb532fa8": "How does the use of surveillance technologies in schools and workplaces impact mental health?", "afefb290-48ec-450c-b530-5fe1b6c5340b": "What is ballot curing and how does it impact the election process?", "eecbf085-2f16-45c4-ba65-35813ca84568": "How do different states handle signature discrepancies in mail-in ballots?", "43b6add5-244e-4c11-be3b-0944fecfa6b9": "What are the best practices for detecting and mitigating algorithmic bias according to the Brookings Report?", "37bbd6b6-d24a-4b73-a4f4-f532d8c1793a": "How can public agencies implement Algorithmic Impact Assessments to ensure accountability, as suggested by the AI Now Institute Report?", "318fe73a-0591-41e8-b65e-925c71b2caab": "How is the federal government addressing discrimination in mortgage lending through the Department of Justice's nationwide initiative?", "56664bc2-0933-4e58-8d03-5c06b9d06c04": "What role do federal agencies like the Consumer Financial Protection Bureau play in the Action Plan to Advance Property Appraisal and Valuation Equity?", "7f8b418c-6e85-4ab0-83db-b7ed7dc49a45": "What are the best practices for updating due diligence processes to include intellectual property and data privacy for GAI acquisitions?", "e81617a3-9609-4012-ba46-caa374c306de": "How can organizations effectively monitor and assess third-party GAI risks in real-time?", "054e5797-d024-41bd-8de9-983d038a8797": "What are the best practices for performing disparity testing and making the results public?", "fdb81ad2-acf2-4aa4-b551-fe766d22f273": "How can organizations effectively mitigate disparities identified through testing?", "09a4ef32-a01e-4ca9-9bf6-4704e328ccef": "How can people protect themselves from being tracked by devices originally meant for finding lost items?", "e314f460-f6e2-4d11-b612-d51529a9dee6": "What are the potential issues with using algorithms to deploy police in neighborhoods?", "741f5989-422f-4bc5-9f72-0f3b22bb4f25": "What are the mental health impacts of NCII on women and sexual minorities?", "19592c9a-0621-4629-bdfc-8a08f0d396b4": "How can GAI training datasets be protected from including CSAM and NCII?", "f95100da-f55f-4402-909d-fdde5cf17d25": "What are the key challenges in designing non-discriminatory AI technology discussed in the panel?", "bb3e7970-5b1e-4e98-87ad-b30d33ff6a89": "How can community participation enhance human-computer interaction in AI systems?", "796ffa10-1532-4fa1-b832-d8ee058d410d": "What are the potential sociotechnical harms of algorithmic systems as discussed by Shelby et al (2023)?", "2c38117e-4b2d-4553-b319-f4ba3997996e": "How does training on generated data affect AI models according to Shumailov et al (2023)?", "3b9c9379-cc75-4b9d-a68a-dc6b0a48fd9c": "What are the key suggested actions for managing GAI risks according to the AI RMF 10 and Playbook?", "1a02235f-7bf0-4e7e-8149-ab610eacb769": "How do the suggested actions for managing GAI risks vary depending on the stage of the GAI lifecycle?", "08cbf993-d60b-4982-bf84-140c29d30450": "How can organizations ensure that consent practices do not allow for abusive surveillance practices?", "e253b5ac-feb7-4116-9e68-d2c817da36a5": "What are the best practices for re-acquiring consent if the use case of data changes or if data is transferred to another entity?", "0260750e-4f7d-4c1b-b0b7-ae4c36cc8fc3": "What are the key principles outlined in the AI Bill of Rights?", "39863570-2d41-4d21-bde1-1afc78c157b0": "How does the AI Bill of Rights address algorithmic discrimination?", "8b4fd9d7-e1d4-472e-bd34-35fa98299c07": "How can we effectively track and document instances of anthropomorphization in GAI system interfaces?", "1f34befe-4432-419f-8465-066a0d82ff77": "What are the best practices for verifying the provenance of GAI system training data and TEVV data?", "3e35db8c-c1b3-4b9f-b6c0-a3fd6e52d2b0": "What is the importance of having demographically and interdisciplinarily diverse AI red teams in pre-deployment contexts?", "0c3b35ca-f421-41e4-b016-8f367561acbe": "How can general public involvement in AI red-teaming contribute to identifying flaws in AI models?", "d73655e4-93f0-41c5-b69e-814ff8189db8": "QUESTION #1: How are major universities using race as a predictor of student success?", "bb8b9729-d1b6-407d-9f0c-aa1bd62a8d78": "QUESTION #2: What concerns do students and professors have about using race as a predictor in education?", "3f1dec42-4087-4e06-9e7e-491c96cdee67": "How can AI-enabled systems contribute to building better and more innovative infrastructure?", "9a94cfb2-25b9-4aa8-94c5-987c53fa42bf": "What lessons from urban planning can be applied to the integration of AI technologies in communities?", "f445449e-b75f-44ee-a819-018ad630bd35": "What are the benefits of having a human alternative to automated systems?", "58fd202f-0791-411a-9124-09381dbbad11": "How can one ensure timely human consideration and remedy when an automated system fails?", "26ee0f55-a947-440f-b4bc-4b7def4e3545": "What are the main findings of the Department of Justice's report on the risk assessment tool for predicting recidivism?", "dcb01564-a34f-42a7-ac6c-13764525a7d2": "How is the Department of Justice addressing the disparities in the risk assessment tool for predicting recidivism among different groups of color?", "8e29d29a-fc98-4a6f-b42b-580fc084dd71": "What is the Executive Order on Advancing Racial Equity and Support for Underserved Communities Through the Federal Government?", "4bbc6d4b-6b67-4831-8bca-853eb46aec3a": "What were President Biden's remarks on the Supreme Court decision to overturn Roe v Wade?", "919fdd1d-2abb-472e-ac8d-bde9df2bb391": "What are the best practices for re-assessing model risks after implementing fine-tuning or retrieval-augmented generation?", "a795e873-419b-454b-8598-fb0c49a7e5cc": "How can organizations effectively review and manage training data to prevent the reproduction of intellectual property or CBRN information in AI outputs?", "242b750e-1236-41f7-a1cc-eedef8f0427d": "What are some common examples of AI incidents that organizations should be aware of?", "c506e557-776f-42ed-99f9-c752ac2bb94b": "How can organizations effectively track and document the provenance of datasets to identify AI-generated data issues?", "426616e2-6297-47c3-89c7-71ec1186cdba": "What is the role of the American Civil Liberties Union in protecting digital privacy?", "db43af55-434d-441f-8dc7-acc8ff3f8432": "How does the Center for Democracy & Technology advocate for internet freedom and security?", "f2913868-28a6-4558-904a-0486fbfc1f6e": "How can organizations ensure the accuracy of predictions or recommendations generated by automated systems?", "5a4faa70-0364-4fd0-9c98-b26fb63f7786": "What are the best practices for implementing ongoing monitoring procedures for automated systems?", "3ad57490-e4f7-4fd2-bff4-93211043ec13": "What are the key considerations for implementing automated systems in sensitive domains like criminal justice and health?", "3a05d7ba-2e46-406b-aeb9-51b33efff15f": "How can organizations ensure meaningful oversight and human consideration in high-risk automated decision-making systems?", "6ae09ea8-3090-401b-9f1e-4ce5270152cd": "What are the privacy expectations for automated systems handling sensitive data?", "207207ff-faab-4342-b76f-ef0c6fac88c9": "How should consent be managed for automated systems collecting sensitive data?", "d15a10aa-36cb-4f3a-9f9e-2c0416ce1084": "What is the contact information for inquiries related to NIST AI publications?", "c9f4fb11-9365-4354-aa94-7cc93efcafb5": "Where can I find additional information about NIST AI publications?", "4aebac20-11d4-42c8-be6a-f7ac4e43cbbc": "How can organizations effectively combat automation bias in automated systems?", "71537f88-7e77-4720-9cc0-bca516b4721f": "What are the best practices for training individuals to properly interpret outputs from automated systems?", "e004e796-65d5-4109-89bd-472cae5b6c75": "What were the FTC's findings in the case against Everalbum Inc?", "290cd0b2-456b-41bc-bf0e-3ea3e32f480d": "How did the FTC address privacy concerns in the case against Weight Watchers and Kurbo?", "1c416614-5e28-45f4-9e8e-937971dcff9a": "What are the potential harms of GAI related to misinformation, disinformation, and deepfakes?", "d83ab93d-9be0-488d-94fd-8e58074a3388": "How should organizations disclose the use of GAI to end users to mitigate risks?", "bfc45e93-d073-4348-8fb1-03dfaf4e73f3": "What measures can designers and developers take to prevent algorithmic discrimination?", "4819bdb4-1724-4318-855c-9c4f680c0655": "How does algorithmic discrimination impact different protected classes such as race, gender, and disability?", "a8a96840-d387-42d9-9b56-f05b73027f5c": "What are some innovative solutions provided by the industry to mitigate risks to the safety and efficacy of AI systems?", "7fdb6c15-c3f8-4327-b2fe-0169c08ce375": "How does the Office of Management and Budget (OMB) suggest expanding opportunities for stakeholder engagement in program design?", "3509c40f-7af0-49a5-bd16-c7da584b3980": "What are the nine principles outlined in Executive Order 13960 for the use of AI in the federal government?", "a86eba64-72a8-4afa-a7f5-8c50c3b0c660": "How can laws and policies ensure that AI systems are accurate, reliable, and effective in real-life applications?", "9eee9d68-6e0f-4430-989f-cb569677d74c": "How can we distinguish between fact and opinion in the content generated by AI systems?", "6fba0797-2aaa-4686-9325-999b5396f47b": "What are the risks associated with the anthropomorphization of General AI (GAI) systems?", "449ab90b-3762-4d3e-99ea-899bd340c42b": "What are confabulations in the context of text-based outputs?", "1c57be24-8e1d-4a3a-a29e-1d153c019510": "How do legal confabulations manifest in state-of-the-art language models?", "6b30e12e-cecf-4cd7-936e-84468c950a36": "What is the purpose of the Executive Order on Advancing Racial Equity and Support for Underserved Communities Through the Federal Government?", "5547bf9b-ceae-4386-a486-7708637ab6a1": "What role do Navigators play according to HealthCaregov?", "a520c4cc-f2f6-4dd8-bd3a-a1a750440209": "What are the key principles outlined in the ISO/IEC Guide 71:2014 for addressing accessibility in standards?", "d72c0d17-abee-470b-8725-abf4aad59b3f": "How do the Web Content Accessibility Guidelines (WCAG) 20 impact web development practices?", "8e31e286-3ac3-488f-a211-4575fd663a17": "What are the key expectations for automated systems to ensure data privacy and protection from unchecked surveillance?", "76f71eb0-f3b8-425d-8772-65a5d214634f": "How can heightened oversight of surveillance systems prevent algorithmic discrimination based on community membership?", "eb21dff3-4dd0-47af-a449-b9b525386911": "What are the key considerations for ensuring equitable outcomes in fallback and escalation systems for automated systems?", "645a1801-9128-4977-947d-5437b8933966": "How can organizations ensure that human consideration and fallback mechanisms are conducted in a timely manner for automated systems?", "50321a04-5130-43ab-9305-cc1d548da8e0": "What are the extra protections for data related to sensitive domains like health and personal finance?", "32f6e506-6e82-41f7-b80c-f0702a537ca2": "How do technological developments impact the sensitivity of data domains and the need for enhanced data protections?", "7a80ac97-319d-452b-a900-e739da72ab44": "What are some benchmarks used to quantify systemic bias in GAI system outputs?", "bedb600e-b951-4e89-9442-24b971ff1b21": "How can fairness assessments help measure systemic bias in GAI systems?", "5fa67d29-3be8-4c81-a7c9-1a4d5dfa0ba7": "What are the potential biases in hiring tools that learn from a company's predominantly male employee base?", "868637a7-88fa-4891-bcfd-da1d37772744": "How do predictive models that use race as a factor affect Black students' academic guidance and major selection?", "6410524d-24f8-4aaf-8b70-5dcfc8272cd0": "What measures are being taken to prevent the misuse of Apple AirTags for stalking and harassment?", "d0990582-29f1-41c2-90e1-89c4efc58153": "How does crime prediction software perpetuate biases despite promises of being free from them?", "cbc06d96-6605-45f4-8067-0342ab04aac4": "What are the key elements to consider when incorporating GAI systems into an AI system inventory?", "b16b4f6b-0ec2-49bc-9453-9bbf1a8feea5": "How should organizations handle inventory exemptions for GAI systems embedded into application software?", "b1da9e4e-62f7-4d08-ac87-2b196fa9114e": "What measures can be taken to ensure automated systems protect against algorithmic discrimination?", "aa279423-ea2b-4fa2-beb1-7a6e1400c36f": "How can independent evaluations of automated systems be conducted without compromising individual privacy?", "8a9ae766-2f74-4272-bd84-e95787e5e943": "What are the best practices for determining data origin and content lineage in AI systems?", "2567081e-89ba-4d98-a746-eaf8503e5c5d": "How can test and evaluation processes be instituted for data and content flows within an AI system?", "fa585f44-6fb6-443b-983e-6304d9c2f5e1": "What are the expectations for automated systems in high-risk settings like criminal justice?", "f51b4b1e-689f-4a47-82a2-d9a9a0d30ab7": "How should the level of risk influence the design of explanatory mechanisms in automated systems?", "ecb05eb6-335e-4451-bf2e-4c8ad8e800bf": "What are the current methods for reporting AI incidents?", "51d512f0-8849-48de-a188-5aab8ddee724": "How do publicly available databases decide which AI incidents to track?", "80d2e492-0668-4e82-b83e-d1cef2355444": "What is the NIST AI 600-1 framework about?", "b7e8353e-ffb6-41a5-a321-d6b5521a03d5": "How does the NIST Trustworthy and Responsible AI framework address generative artificial intelligence risks?", "acbfd37b-65e1-440b-b4b1-9b3ee9a15fac": "What are the best practices for establishing acceptable use policies for GAI in human-AI teaming settings?", "c86085f1-bc71-4d66-8869-d5335b328ec7": "How can organizations effectively implement synthetic content detection and labeling tools?"}, "relevant_contexts": {"a188ff31-68db-46bc-b419-ac0fdecc6b1b": ["1eebe549-0cfa-4adf-84b0-ed9a06656695"], "c9f79a82-8775-4461-ac2f-b7b4e8734e96": ["1eebe549-0cfa-4adf-84b0-ed9a06656695"], "80b370f2-6d21-4ff6-bc9b-763317af5321": ["bd7c4ee6-636c-4e73-8669-68ae8df8a0e8"], "d4e20011-f4f9-4de6-8453-8144d77ba3a7": ["bd7c4ee6-636c-4e73-8669-68ae8df8a0e8"], "12534c5f-eb68-41cb-a879-b908e95a65d0": ["96206509-2450-4808-b3db-0ad36b187bf3"], "11951856-b35d-4123-ab15-80c38cd82195": ["96206509-2450-4808-b3db-0ad36b187bf3"], "869c7e52-2b77-4144-af5f-d5beebc138f9": ["5b799f01-f51b-4867-8554-833805f3ab80"], "a310a614-4bd4-4302-ab42-37b6faa7d838": ["5b799f01-f51b-4867-8554-833805f3ab80"], "468bff4b-cdbb-489a-9cbd-95d9062f93dc": ["43309aea-4c65-4a8b-9dbb-ad2c5402ed13"], "3e5b4545-4ceb-424f-b519-16c3870dd543": ["43309aea-4c65-4a8b-9dbb-ad2c5402ed13"], "0f0eea1d-f05e-458e-b31a-e955629c7e44": ["4c75b2c9-d74b-46ad-b25f-e5b2bbba9a2f"], "6cc0bcc5-9c82-49a6-b118-3e333e70ee9e": ["4c75b2c9-d74b-46ad-b25f-e5b2bbba9a2f"], "4441faa1-8f27-4fc7-bea0-847caa1c1505": ["ca9ae4fc-a936-4dda-acea-192bc0206464"], "0db8fdee-99cf-47c6-9d8d-a85f3b294826": ["ca9ae4fc-a936-4dda-acea-192bc0206464"], "57db460e-0123-4edf-b7df-87a967a60c26": ["4b00025b-f3dc-41ec-b5e2-b4f77272ad81"], "48589831-4f3c-4bf6-9cb4-bc4277c489dd": ["4b00025b-f3dc-41ec-b5e2-b4f77272ad81"], "1df11168-7aa5-4b43-91df-c14c32f01440": ["054c9a30-d999-4ec7-a07e-200e0ac42d1f"], "2127b35f-68cd-4e5f-a669-a6a4bb532fa8": ["054c9a30-d999-4ec7-a07e-200e0ac42d1f"], "afefb290-48ec-450c-b530-5fe1b6c5340b": ["1b4221a5-1a5d-4193-b4c3-d0927768a090"], "eecbf085-2f16-45c4-ba65-35813ca84568": ["1b4221a5-1a5d-4193-b4c3-d0927768a090"], "43b6add5-244e-4c11-be3b-0944fecfa6b9": ["e2a458cd-3f14-4aad-ad1d-0efcae5d686c"], "37bbd6b6-d24a-4b73-a4f4-f532d8c1793a": ["e2a458cd-3f14-4aad-ad1d-0efcae5d686c"], "318fe73a-0591-41e8-b65e-925c71b2caab": ["380e7d12-ea58-4f2f-bc0c-4e04c176047d"], "56664bc2-0933-4e58-8d03-5c06b9d06c04": ["380e7d12-ea58-4f2f-bc0c-4e04c176047d"], "7f8b418c-6e85-4ab0-83db-b7ed7dc49a45": ["b73c4e8f-15b1-48df-b5d3-0dc244b5e44d"], "e81617a3-9609-4012-ba46-caa374c306de": ["b73c4e8f-15b1-48df-b5d3-0dc244b5e44d"], "054e5797-d024-41bd-8de9-983d038a8797": ["fac21c98-5e09-4073-8499-737a13a0eb2d"], "fdb81ad2-acf2-4aa4-b551-fe766d22f273": ["fac21c98-5e09-4073-8499-737a13a0eb2d"], "09a4ef32-a01e-4ca9-9bf6-4704e328ccef": ["0026669e-4953-4d6a-b1d9-ecfa12faec64"], "e314f460-f6e2-4d11-b612-d51529a9dee6": ["0026669e-4953-4d6a-b1d9-ecfa12faec64"], "741f5989-422f-4bc5-9f72-0f3b22bb4f25": ["1171bb5d-18a9-429e-8122-da09f3a0d9f2"], "19592c9a-0621-4629-bdfc-8a08f0d396b4": ["1171bb5d-18a9-429e-8122-da09f3a0d9f2"], "f95100da-f55f-4402-909d-fdde5cf17d25": ["d96f7e82-cc68-47f6-86d2-85aa141a8c9e"], "bb3e7970-5b1e-4e98-87ad-b30d33ff6a89": ["d96f7e82-cc68-47f6-86d2-85aa141a8c9e"], "796ffa10-1532-4fa1-b832-d8ee058d410d": ["d44e9dcd-c607-44be-8995-10b21aae83a5"], "2c38117e-4b2d-4553-b319-f4ba3997996e": ["d44e9dcd-c607-44be-8995-10b21aae83a5"], "3b9c9379-cc75-4b9d-a68a-dc6b0a48fd9c": ["a9851a96-2f0d-44d3-bc00-c23aaa41be72"], "1a02235f-7bf0-4e7e-8149-ab610eacb769": ["a9851a96-2f0d-44d3-bc00-c23aaa41be72"], "08cbf993-d60b-4982-bf84-140c29d30450": ["394ba34f-5572-41aa-9636-d1f9f550d321"], "e253b5ac-feb7-4116-9e68-d2c817da36a5": ["394ba34f-5572-41aa-9636-d1f9f550d321"], "0260750e-4f7d-4c1b-b0b7-ae4c36cc8fc3": ["1c921767-4d8e-42c2-b1b7-f1eef6154d6f"], "39863570-2d41-4d21-bde1-1afc78c157b0": ["1c921767-4d8e-42c2-b1b7-f1eef6154d6f"], "8b4fd9d7-e1d4-472e-bd34-35fa98299c07": ["e4a13b31-217a-46da-a63d-97fb166719a8"], "1f34befe-4432-419f-8465-066a0d82ff77": ["e4a13b31-217a-46da-a63d-97fb166719a8"], "3e35db8c-c1b3-4b9f-b6c0-a3fd6e52d2b0": ["64bede83-602b-4ecc-9aa8-b7e66674fcbf"], "0c3b35ca-f421-41e4-b016-8f367561acbe": ["64bede83-602b-4ecc-9aa8-b7e66674fcbf"], "d73655e4-93f0-41c5-b69e-814ff8189db8": ["9d624f3e-302d-4fcf-9a0e-5e84ce69a0e6"], "bb8b9729-d1b6-407d-9f0c-aa1bd62a8d78": ["9d624f3e-302d-4fcf-9a0e-5e84ce69a0e6"], "3f1dec42-4087-4e06-9e7e-491c96cdee67": ["24ba513e-4acb-465b-be49-00cb67405123"], "9a94cfb2-25b9-4aa8-94c5-987c53fa42bf": ["24ba513e-4acb-465b-be49-00cb67405123"], "f445449e-b75f-44ee-a819-018ad630bd35": ["fb71dcec-b23f-4f60-a695-56ecd3f315ac"], "58fd202f-0791-411a-9124-09381dbbad11": ["fb71dcec-b23f-4f60-a695-56ecd3f315ac"], "26ee0f55-a947-440f-b4bc-4b7def4e3545": ["ee208f32-1e0d-4e1e-a351-3417bbd87afb"], "dcb01564-a34f-42a7-ac6c-13764525a7d2": ["ee208f32-1e0d-4e1e-a351-3417bbd87afb"], "8e29d29a-fc98-4a6f-b42b-580fc084dd71": ["193dbafa-5c73-4b7a-9b65-0df439acb9d8"], "4bbc6d4b-6b67-4831-8bca-853eb46aec3a": ["193dbafa-5c73-4b7a-9b65-0df439acb9d8"], "919fdd1d-2abb-472e-ac8d-bde9df2bb391": ["b115198f-f69a-4ce2-aebb-b3842c8f5271"], "a795e873-419b-454b-8598-fb0c49a7e5cc": ["b115198f-f69a-4ce2-aebb-b3842c8f5271"], "242b750e-1236-41f7-a1cc-eedef8f0427d": ["ad125822-a8be-416c-904e-df009ec77b21"], "c506e557-776f-42ed-99f9-c752ac2bb94b": ["ad125822-a8be-416c-904e-df009ec77b21"], "426616e2-6297-47c3-89c7-71ec1186cdba": ["e44738ee-74b6-4246-bc14-d817afb94e83"], "db43af55-434d-441f-8dc7-acc8ff3f8432": ["e44738ee-74b6-4246-bc14-d817afb94e83"], "f2913868-28a6-4558-904a-0486fbfc1f6e": ["68ce524c-132f-488c-adcf-6d6b0fd3ee28"], "5a4faa70-0364-4fd0-9c98-b26fb63f7786": ["68ce524c-132f-488c-adcf-6d6b0fd3ee28"], "3ad57490-e4f7-4fd2-bff4-93211043ec13": ["ed722cdb-468f-4721-a373-d1ca5a35c1f9"], "3a05d7ba-2e46-406b-aeb9-51b33efff15f": ["ed722cdb-468f-4721-a373-d1ca5a35c1f9"], "6ae09ea8-3090-401b-9f1e-4ce5270152cd": ["4097f22e-c5bf-4c18-8078-c3a2899b5bfb"], "207207ff-faab-4342-b76f-ef0c6fac88c9": ["4097f22e-c5bf-4c18-8078-c3a2899b5bfb"], "d15a10aa-36cb-4f3a-9f9e-2c0416ce1084": ["72d14b3e-b07e-43bd-9020-1a2c23f4ef52"], "c9f4fb11-9365-4354-aa94-7cc93efcafb5": ["72d14b3e-b07e-43bd-9020-1a2c23f4ef52"], "4aebac20-11d4-42c8-be6a-f7ac4e43cbbc": ["db18094e-cd82-4e21-8d23-3a29d290999b"], "71537f88-7e77-4720-9cc0-bca516b4721f": ["db18094e-cd82-4e21-8d23-3a29d290999b"], "e004e796-65d5-4109-89bd-472cae5b6c75": ["094c20fa-14b1-497b-b40e-5b99c32cf2fc"], "290cd0b2-456b-41bc-bf0e-3ea3e32f480d": ["094c20fa-14b1-497b-b40e-5b99c32cf2fc"], "1c416614-5e28-45f4-9e8e-937971dcff9a": ["f33bc6b2-858a-46bd-ba56-b6410ce7b11b"], "d83ab93d-9be0-488d-94fd-8e58074a3388": ["f33bc6b2-858a-46bd-ba56-b6410ce7b11b"], "bfc45e93-d073-4348-8fb1-03dfaf4e73f3": ["ea01c2f2-4936-4233-8845-855c033c5a09"], "4819bdb4-1724-4318-855c-9c4f680c0655": ["ea01c2f2-4936-4233-8845-855c033c5a09"], "a8a96840-d387-42d9-9b56-f05b73027f5c": ["641dd569-3b6d-49b4-ab74-5b743949ed5d"], "7fdb6c15-c3f8-4327-b2fe-0169c08ce375": ["641dd569-3b6d-49b4-ab74-5b743949ed5d"], "3509c40f-7af0-49a5-bd16-c7da584b3980": ["ea99d79c-dacc-4993-a145-2146a1469e05"], "a86eba64-72a8-4afa-a7f5-8c50c3b0c660": ["ea99d79c-dacc-4993-a145-2146a1469e05"], "9eee9d68-6e0f-4430-989f-cb569677d74c": ["e8a4ecfe-f6e5-4984-8f0c-694996adfb03"], "6fba0797-2aaa-4686-9325-999b5396f47b": ["e8a4ecfe-f6e5-4984-8f0c-694996adfb03"], "449ab90b-3762-4d3e-99ea-899bd340c42b": ["a7b25bc5-d04c-4ce5-b11d-18080ed7322b"], "1c57be24-8e1d-4a3a-a29e-1d153c019510": ["a7b25bc5-d04c-4ce5-b11d-18080ed7322b"], "6b30e12e-cecf-4cd7-936e-84468c950a36": ["0422346b-f47b-48ad-890e-93045e292363"], "5547bf9b-ceae-4386-a486-7708637ab6a1": ["0422346b-f47b-48ad-890e-93045e292363"], "a520c4cc-f2f6-4dd8-bd3a-a1a750440209": ["d444272b-84db-47b2-8e39-d070bef54d11"], "d72c0d17-abee-470b-8725-abf4aad59b3f": ["d444272b-84db-47b2-8e39-d070bef54d11"], "8e31e286-3ac3-488f-a211-4575fd663a17": ["84e5065a-6f26-49c3-aeb8-31a8102a856b"], "76f71eb0-f3b8-425d-8772-65a5d214634f": ["84e5065a-6f26-49c3-aeb8-31a8102a856b"], "eb21dff3-4dd0-47af-a449-b9b525386911": ["3976a13c-4484-47bc-8b1d-0fcb75a19b95"], "645a1801-9128-4977-947d-5437b8933966": ["3976a13c-4484-47bc-8b1d-0fcb75a19b95"], "50321a04-5130-43ab-9305-cc1d548da8e0": ["88018024-6cf6-4719-ad61-61f79483bb74"], "32f6e506-6e82-41f7-b80c-f0702a537ca2": ["88018024-6cf6-4719-ad61-61f79483bb74"], "7a80ac97-319d-452b-a900-e739da72ab44": ["641be3b7-f879-4cc0-bc16-d9cb27069618"], "bedb600e-b951-4e89-9442-24b971ff1b21": ["641be3b7-f879-4cc0-bc16-d9cb27069618"], "5fa67d29-3be8-4c81-a7c9-1a4d5dfa0ba7": ["f12b5467-1c94-4938-98a8-5e0e4e6fff77"], "868637a7-88fa-4891-bcfd-da1d37772744": ["f12b5467-1c94-4938-98a8-5e0e4e6fff77"], "6410524d-24f8-4aaf-8b70-5dcfc8272cd0": ["380caf5a-f592-4a9d-8e55-905836b69ded"], "d0990582-29f1-41c2-90e1-89c4efc58153": ["380caf5a-f592-4a9d-8e55-905836b69ded"], "cbc06d96-6605-45f4-8067-0342ab04aac4": ["5b9ba636-3418-4270-a189-27f4e5b95ae0"], "b16b4f6b-0ec2-49bc-9453-9bbf1a8feea5": ["5b9ba636-3418-4270-a189-27f4e5b95ae0"], "b1da9e4e-62f7-4d08-ac87-2b196fa9114e": ["c3f7bcbe-0afe-4e8b-a6c2-8266ee6bec0a"], "aa279423-ea2b-4fa2-beb1-7a6e1400c36f": ["c3f7bcbe-0afe-4e8b-a6c2-8266ee6bec0a"], "8a9ae766-2f74-4272-bd84-e95787e5e943": ["f78abfc0-dc1b-4904-b10f-45b2d75bdffa"], "2567081e-89ba-4d98-a746-eaf8503e5c5d": ["f78abfc0-dc1b-4904-b10f-45b2d75bdffa"], "fa585f44-6fb6-443b-983e-6304d9c2f5e1": ["e88db2aa-0248-4c41-9ff5-f64b062d93ad"], "f51b4b1e-689f-4a47-82a2-d9a9a0d30ab7": ["e88db2aa-0248-4c41-9ff5-f64b062d93ad"], "ecb05eb6-335e-4451-bf2e-4c8ad8e800bf": ["481dbfa9-e17c-4a32-bfda-547eb5403563"], "51d512f0-8849-48de-a188-5aab8ddee724": ["481dbfa9-e17c-4a32-bfda-547eb5403563"], "80d2e492-0668-4e82-b83e-d1cef2355444": ["60edd255-562c-403c-b6b1-20d1d828e53f"], "b7e8353e-ffb6-41a5-a321-d6b5521a03d5": ["60edd255-562c-403c-b6b1-20d1d828e53f"], "acbfd37b-65e1-440b-b4b1-9b3ee9a15fac": ["810d4e10-aa6e-4399-aee2-0740c4dc03c4"], "c86085f1-bc71-4d66-8869-d5335b328ec7": ["810d4e10-aa6e-4399-aee2-0740c4dc03c4"]}, "corpus": {"1eebe549-0cfa-4adf-84b0-ed9a06656695": "Another cybersecurity risk to GAI is data poisoning, in which an adversary compromises a training \ndataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts \nof the model could exacerbate risks associated with GAI system outputs. \nTrustworthy AI Characteristics: Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n2.10. \nIntellectual Property \nIntellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair \nuse under the fair use doctrine. If a GAI system\u2019s training data included copyrighted material, GAI \noutputs displaying instances of training data memorization (see Data Privacy above) could infringe on \ncopyright. \nHow GAI relates to copyright, including the status of generated content that is similar to but does not \nstrictly copy work protected by copyright, is currently being debated in legal fora. Similar discussions are", "bd7c4ee6-636c-4e73-8669-68ae8df8a0e8": "or lead to algorithmic discrimination. \nOversight. Human-based systems have the potential for bias, including automation bias, as well as other \nconcerns that may limit their effectiveness. The results of assessments of the efficacy and potential bias of \nsuch human-based systems should be overseen by governance structures that have the potential to update the \noperation of the human-based system in order to mitigate these effects. \n50", "96206509-2450-4808-b3db-0ad36b187bf3": "Intellectual Property; Data Privacy; \nObscene, Degrading, and/or \nAbusive Content \nMP-4.1-005 \nEstablish policies for collection, retention, and minimum quality of data, in \nconsideration of the following risks: Disclosure of inappropriate CBRN information; \nUse of Illegal or dangerous content; O\ufb00ensive cyber capabilities; Training data \nimbalances that could give rise to harmful biases; Leak of personally identi\ufb01able \ninformation, including facial likenesses of individuals. \nCBRN Information or Capabilities; \nIntellectual Property; Information \nSecurity; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-006 Implement policies and practices de\ufb01ning how third-party intellectual property and \ntraining data will be used, stored, and protected. \nIntellectual Property; Value Chain \nand Component Integration \nMP-4.1-007 Re-evaluate models that were \ufb01ne-tuned or enhanced on top of third-party \nmodels. \nValue Chain and Component \nIntegration \nMP-4.1-008", "5b799f01-f51b-4867-8554-833805f3ab80": "SAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a", "43309aea-4c65-4a8b-9dbb-ad2c5402ed13": "\u2022\nSurya Mattu, Senior Data Engineer and Investigative Data Journalist, The Markup\n\u2022\nMariah Montgomery, National Campaign Director, Partnership for Working Families\n55", "4c75b2c9-d74b-46ad-b25f-e5b2bbba9a2f": "While indirect feedback methods such as automated error collection systems are useful, they often lack \nthe context and depth that direct input from end users can provide. Organizations can leverage feedback \napproaches described in the Pre-Deployment Testing section to capture input from external sources such \nas through AI red-teaming. \nIntegrating pre- and post-deployment external feedback into the monitoring process for GAI models and \ncorresponding applications can help enhance awareness of performance changes and mitigate potential \nrisks and harms from outputs. There are many ways to capture and make use of user feedback \u2013 before \nand after GAI systems and digital content transparency approaches are deployed \u2013 to gain insights about \nauthentication e\ufb03cacy and vulnerabilities, impacts of adversarial threats on techniques, and unintended \nconsequences resulting from the utilization of content provenance approaches on users and", "ca9ae4fc-a936-4dda-acea-192bc0206464": "technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of", "4b00025b-f3dc-41ec-b5e2-b4f77272ad81": "32 \nMEASURE 2.6: The AI system is evaluated regularly for safety risks \u2013 as identi\ufb01ed in the MAP function. The AI system to be \ndeployed is demonstrated to be safe, its residual negative risk does not exceed the risk tolerance, and it can fail safely, particularly if \nmade to operate beyond its knowledge limits. Safety metrics re\ufb02ect system reliability and robustness, real-time monitoring, and \nresponse times for AI system failures. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.6-001 \nAssess adverse impacts, including health and wellbeing impacts for value chain \nor other AI Actors that are exposed to sexually explicit, o\ufb00ensive, or violent \ninformation during GAI training and maintenance. \nHuman-AI Con\ufb01guration; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; \nDangerous, Violent, or Hateful \nContent \nMS-2.6-002 \nAssess existence or levels of harmful bias, intellectual property infringement,", "054c9a30-d999-4ec7-a07e-200e0ac42d1f": "into other automated systems that directly impact people\u2019s lives. Federal law has not grown to address the expanding \nscale of private data collection, or of the ability of governments at all levels to access that data and leverage the means \nof private collection. \nMeanwhile, members of the American public are often unable to access their personal data or make critical decisions \nabout its collection and use. Data brokers frequently collect consumer data from numerous sources without \nconsumers\u2019 permission or knowledge.60 Moreover, there is a risk that inaccurate and faulty data can be used to \nmake decisions about their lives, such as whether they will qualify for a loan or get a job. Use of surveillance \ntechnologies has increased in schools and workplaces, and, when coupled with consequential management and \nevaluation decisions, it is leading to mental health harms such as lowered self-confidence, anxiety, depression, and", "1b4221a5-1a5d-4193-b4c3-d0927768a090": "110. Rachel Orey and Owen Bacskai. The Low Down on Ballot Curing. Nov. 04, 2020. https://\nbipartisanpolicy.org/blog/the-low-down-on-ballot-curing/; Zahavah Levine and Thea Raymond-\nSeidel. Mail Voting Litigation in 2020, Part IV: Verifying Mail Ballots. Oct. 29, 2020.\nhttps://www.lawfareblog.com/mail-voting-litigation-2020-part-iv-verifying-mail-ballots\n111. National Conference of State Legislatures. Table 15: States With Signature Cure Processes. Jan. 18,\n2022.\nhttps://www.ncsl.org/research/elections-and-campaigns/vopp-table-15-states-that-permit-voters-to\u00ad\ncorrect-signature-discrepancies.aspx\n112. White House Office of Science and Technology Policy. Join the Effort to Create A Bill of Rights for\nan Automated Society. Nov. 10, 2021.\nhttps://www.whitehouse.gov/ostp/news-updates/2021/11/10/join-the-effort-to-create-a-bill-of\u00ad\nrights-for-an-automated-society/\n113. White House Office of Science and Technology Policy. Notice of Request for Information (RFI) on", "e2a458cd-3f14-4aad-ad1d-0efcae5d686c": "Research Institute Report. June 29, 2021. https://datasociety.net/library/assembling-accountability\u00ad\nalgorithmic-impact-assessment-for-the-public-interest/; Nicol Turner Lee, Paul Resnick, and Genie\nBarton. Algorithmic bias detection and mitigation: Best practices and policies to reduce consumer harms.\nBrookings Report. May 22, 2019.\nhttps://www.brookings.edu/research/algorithmic-bias-detection-and-mitigation-best-practices-and\u00ad\npolicies-to-reduce-consumer-harms/; Andrew D. Selbst. An Institutional View Of Algorithmic Impact\nAssessments. Harvard Journal of Law & Technology. June 15, 2021. https://ssrn.com/abstract=3867634;\nDillon Reisman, Jason Schultz, Kate Crawford, and Meredith Whittaker. Algorithmic Impact\nAssessments: A Practical Framework for Public Agency Accountability. AI Now Institute Report. April\n2018. https://ainowinstitute.org/aiareport2018.pdf\n51. Department of Justice. Justice Department Announces New Initiative to Combat Redlining. Oct. 22,", "380e7d12-ea58-4f2f-bc0c-4e04c176047d": "HOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe federal government is working to combat discrimination in mortgage lending. The Depart\u00ad\nment of Justice has launched a nationwide initiative to combat redlining, which includes reviewing how \nlenders who may be avoiding serving communities of color are conducting targeted marketing and advertising.51 \nThis initiative will draw upon strong partnerships across federal agencies, including the Consumer Financial \nProtection Bureau and prudential regulators. The Action Plan to Advance Property Appraisal and Valuation \nEquity includes a commitment from the agencies that oversee mortgage lending to include a \nnondiscrimination standard in the proposed rules for Automated Valuation Models.52", "b73c4e8f-15b1-48df-b5d3-0dc244b5e44d": "Intellectual Property \nGV-6.1-009 \nUpdate and integrate due diligence processes for GAI acquisition and \nprocurement vendor assessments to include intellectual property, data privacy, \nsecurity, and other risks. For example, update processes to: Address solutions that \nmay rely on embedded GAI technologies; Address ongoing monitoring, \nassessments, and alerting, dynamic risk assessments, and real-time reporting \ntools for monitoring third-party GAI risks; Consider policy adjustments across GAI \nmodeling libraries, tools and APIs, \ufb01ne-tuned models, and embedded tools; \nAssess GAI vendors, open-source or proprietary GAI tools, or GAI service \nproviders against incident or vulnerability databases. \nData Privacy; Human-AI \nCon\ufb01guration; Information \nSecurity; Intellectual Property; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nGV-6.1-010 \nUpdate GAI acceptable use policies to address proprietary and open-source GAI", "fac21c98-5e09-4073-8499-737a13a0eb2d": "disparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5", "0026669e-4953-4d6a-b1d9-ecfa12faec64": "\u2022\nA device originally developed to help people track and find lost items has been used as a tool by stalkers to track\nvictims\u2019 locations in violation of their privacy and safety. The device manufacturer took steps after release to\nprotect people from unwanted tracking by alerting people on their phones when a device is found to be moving\nwith them over time and also by having the device make an occasional noise, but not all phones are able\nto receive the notification and the devices remain a safety concern due to their misuse.8 \n\u2022\nAn algorithm used to deploy police was found to repeatedly send police to neighborhoods they regularly visit,\neven if those neighborhoods were not the ones with the highest crime rates. These incorrect crime predictions\nwere the result of a feedback loop generated from the reuse of data from previous arrests and algorithm\npredictions.9\n16", "1171bb5d-18a9-429e-8122-da09f3a0d9f2": "the creation and spread of NCII disproportionately impacts women and sexual minorities, and can have \nsubsequent negative consequences including decline in overall mental health, substance abuse, and \neven suicidal thoughts. \nData used for training GAI models may unintentionally include CSAM and NCII. A recent report noted \nthat several commonly used GAI training datasets were found to contain hundreds of known images of", "d96f7e82-cc68-47f6-86d2-85aa141a8c9e": "APPENDIX\nPanel 4: Artificial Intelligence and Democratic Values. This event examined challenges and opportunities in \nthe design of technology that can help support a democratic vision for AI. It included discussion of the \ntechnical aspects \nof \ndesigning \nnon-discriminatory \ntechnology, \nexplainable \nAI, \nhuman-computer \ninteraction with an emphasis on community participation, and privacy-aware design. \nWelcome:\n\u2022\nSorelle Friedler, Assistant Director for Data and Democracy, White House Office of Science and\nTechnology Policy\n\u2022\nJ. Bob Alotta, Vice President for Global Programs, Mozilla Foundation\n\u2022\nNavrina Singh, Board Member, Mozilla Foundation\nModerator: Kathy Pham Evans, Deputy Chief Technology Officer for Product and Engineering, U.S \nFederal Trade Commission. \nPanelists: \n\u2022\nLiz O\u2019Sullivan, CEO, Parity AI\n\u2022\nTimnit Gebru, Independent Scholar\n\u2022\nJennifer Wortman Vaughan, Senior Principal Researcher, Microsoft Research, New York City\n\u2022", "d44e9dcd-c607-44be-8995-10b21aae83a5": "58 \nSatariano, A. et al. (2023) The People Onscreen Are Fake. The Disinformation Is Real. New York Times. \nhttps://www.nytimes.com/2023/02/07/technology/arti\ufb01cial-intelligence-training-deepfake.html \nSchaul, K. et al. (2024) Inside the secret list of websites that make AI like ChatGPT sound smart. \nWashington Post. https://www.washingtonpost.com/technology/interactive/2023/ai-chatbot-learning/ \nScheurer, J. et al. (2023) Technical report: Large language models can strategically deceive their users \nwhen put under pressure. arXiv. https://arxiv.org/abs/2311.07590 \nShelby, R. et al. (2023) Sociotechnical Harms of Algorithmic Systems: Scoping a Taxonomy for Harm \nReduction. arXiv. https://arxiv.org/pdf/2210.05791 \nShevlane, T. et al. (2023) Model evaluation for extreme risks. arXiv. https://arxiv.org/pdf/2305.15324 \nShumailov, I. et al. (2023) The curse of recursion: training on generated data makes models forget. arXiv. \nhttps://arxiv.org/pdf/2305.17493v2", "a9851a96-2f0d-44d3-bc00-c23aaa41be72": "In addition to the suggested actions below, AI risk management activities and actions set forth in the AI \nRMF 1.0 and Playbook are already applicable for managing GAI risks. Organizations are encouraged to \napply the activities suggested in the AI RMF and its Playbook when managing the risk of GAI systems. \nImplementation of the suggested actions will vary depending on the type of risk, characteristics of GAI \nsystems, stage of the GAI lifecycle, and relevant AI actors involved. \nSuggested actions to manage GAI risks can be found in the tables below: \n\u2022 \nThe suggested actions are organized by relevant AI RMF subcategories to streamline these \nactivities alongside implementation of the AI RMF. \n\u2022 \nNot every subcategory of the AI RMF is included in this document.13 Suggested actions are \nlisted for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG e\ufb00orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.", "394ba34f-5572-41aa-9636-d1f9f550d321": "Provide the public with mechanisms for appropriate and meaningful consent, access, and \ncontrol over their data \nUse-specific consent. Consent practices should not allow for abusive surveillance practices. Where data \ncollectors or automated systems seek consent, they should seek it for specific, narrow use contexts, for specif\u00ad\nic time durations, and for use by specific entities. Consent should not extend if any of these conditions change; \nconsent should be re-acquired before using data if the use case changes, a time limit elapses, or data is trans\u00ad\nferred to another entity (including being shared or sold). Consent requested should be limited in scope and \nshould not request consent beyond what is required. Refusal to provide consent should be allowed, without \nadverse effects, to the greatest extent possible based on the needs of the use case. \nBrief and direct consent requests. When seeking consent from users short, plain language consent", "1c921767-4d8e-42c2-b1b7-f1eef6154d6f": "TABLE OF CONTENTS\nFROM PRINCIPLES TO PRACTICE: A TECHNICAL COMPANION TO THE BLUEPRINT \nFOR AN AI BILL OF RIGHTS \n \nUSING THIS TECHNICAL COMPANION\n \nSAFE AND EFFECTIVE SYSTEMS\n \nALGORITHMIC DISCRIMINATION PROTECTIONS\n \nDATA PRIVACY\n \nNOTICE AND EXPLANATION\n \nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nAPPENDIX\n \nEXAMPLES OF AUTOMATED SYSTEMS\n \nLISTENING TO THE AMERICAN PEOPLE\nENDNOTES \n12\n14\n15\n23\n30\n40\n46\n53\n53\n55\n63\n13", "e4a13b31-217a-46da-a63d-97fb166719a8": "Human-AI Con\ufb01guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Con\ufb01guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that \ufb01ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV", "64bede83-602b-4ecc-9aa8-b7e66674fcbf": "public; this section focuses on red-teaming in pre-deployment contexts. \nThe quality of AI red-teaming outputs is related to the background and expertise of the AI red team \nitself. Demographically and interdisciplinarily diverse AI red teams can be used to identify \ufb02aws in the \nvarying contexts where GAI will be used. For best results, AI red teams should demonstrate domain \nexpertise, and awareness of socio-cultural aspects within the deployment context. AI red-teaming results \nshould be given additional analysis before they are incorporated into organizational governance and \ndecision making, policy and procedural updates, and AI risk management e\ufb00orts. \nVarious types of AI red-teaming may be appropriate, depending on the use case: \n\u2022 \nGeneral Public: Performed by general users (not necessarily AI or technical experts) who are \nexpected to use the model or interact with its outputs, and who bring their own lived \nexperiences and perspectives to the task of AI red-teaming. These individuals may have been", "9d624f3e-302d-4fcf-9a0e-5e84ce69a0e6": "34. Todd Feathers. Major Universities Are Using Race as a \u201cHigh Impact Predictor\u201d of Student Success:\nStudents, professors, and education experts worry that that\u2019s pushing Black students in particular out of math\nand science. The Markup. Mar. 2, 2021. https://themarkup.org/machine-learning/2021/03/02/major\u00ad\nuniversities-are-using-race-as-a-high-impact-predictor-of-student-success\n65", "24ba513e-4acb-465b-be49-00cb67405123": "APPENDIX\nPanelists discussed the benefits of AI-enabled systems and their potential to build better and more \ninnovative infrastructure. They individually noted that while AI technologies may be new, the process of \ntechnological diffusion is not, and that it was critical to have thoughtful and responsible development and \nintegration of technology within communities. Some panelists suggested that the integration of technology \ncould benefit from examining how technological diffusion has worked in the realm of urban planning: \nlessons learned from successes and failures there include the importance of balancing ownership rights, use \nrights, and community health, safety and welfare, as well ensuring better representation of all voices, \nespecially those traditionally marginalized by technological advances. Some panelists also raised the issue of \npower structures \u2013 providing examples of how strong transparency requirements in smart city projects", "fb71dcec-b23f-4f60-a695-56ecd3f315ac": "SECTION TITLE\nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nYou should be able to opt out, where appropriate, and have access to a person who can quickly \nconsider and remedy problems you encounter. You should be able to opt out from automated systems in \nfavor of a human alternative, where appropriate. Appropriateness should be determined based on reasonable \nexpectations in a given context and with a focus on ensuring broad accessibility and protecting the public from \nespecially harmful impacts. In some cases, a human or other alternative may be required by law. You should have \naccess to timely human consideration and remedy by a fallback and escalation process if an automated system \nfails, it produces an error, or you would like to appeal or contest its impacts on you. Human consideration and \nfallback should be accessible, equitable, effective, maintained, accompanied by appropriate operator training, and", "ee208f32-1e0d-4e1e-a351-3417bbd87afb": "\u2022\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24", "193dbafa-5c73-4b7a-9b65-0df439acb9d8": "ENDNOTES\n1.The Executive Order On Advancing Racial Equity and Support for Underserved Communities Through the\nFederal\u00a0Government. https://www.whitehouse.gov/briefing-room/presidential-actions/2021/01/20/executive\norder-advancing-racial-equity-and-support-for-underserved-communities-through-the-federal-government/\n2. The White House. Remarks by President Biden on the Supreme Court Decision to Overturn Roe v. Wade. Jun.\n24, 2022. https://www.whitehouse.gov/briefing-room/speeches-remarks/2022/06/24/remarks-by-president\u00ad\nbiden-on-the-supreme-court-decision-to-overturn-roe-v-wade/\n3. The White House. Join the Effort to Create A Bill of Rights for an Automated Society. Nov. 10, 2021. https://\nwww.whitehouse.gov/ostp/news-updates/2021/11/10/join-the-effort-to-create-a-bill-of-rights-for-an\u00ad\nautomated-society/\n4. U.S. Dept. of Health, Educ. & Welfare, Report of the Sec\u2019y\u2019s Advisory Comm. on Automated Pers. Data Sys.,", "b115198f-f69a-4ce2-aebb-b3842c8f5271": "Value Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nMG-3.1-003 \nRe-assess model risks after \ufb01ne-tuning or retrieval-augmented generation \nimplementation and for any third-party GAI models deployed for applications \nand/or use cases that were not evaluated in initial testing. \nValue Chain and Component \nIntegration \nMG-3.1-004 \nTake reasonable measures to review training data for CBRN information, and \nintellectual property, and where appropriate, remove it. Implement reasonable \nmeasures to prevent, \ufb02ag, or take other action in response to outputs that \nreproduce particular training data (e.g., plagiarized, trademarked, patented, \nlicensed content or trade secret material). \nIntellectual Property; CBRN \nInformation or Capabilities", "ad125822-a8be-416c-904e-df009ec77b21": "communities. Furthermore, organizations can track and document the provenance of datasets to identify \ninstances in which AI-generated data is a potential root cause of performance issues with the GAI \nsystem. \nA.1.8. Incident Disclosure \nOverview \nAI incidents can be de\ufb01ned as an \u201cevent, circumstance, or series of events where the development, use, \nor malfunction of one or more AI systems directly or indirectly contributes to one of the following harms: \ninjury or harm to the health of a person or groups of people (including psychological harms and harms to \nmental health); disruption of the management and operation of critical infrastructure; violations of \nhuman rights or a breach of obligations under applicable law intended to protect fundamental, labor, \nand intellectual property rights; or harm to property, communities, or the environment.\u201d AI incidents can \noccur in the aggregate (i.e., for systemic discrimination) or acutely (i.e., for one individual). \nState of AI Incident Tracking and Disclosure", "e44738ee-74b6-4246-bc14-d817afb94e83": "American Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium \nat Computing Research Association \nConnected Health Initiative", "68ce524c-132f-488c-adcf-6d6b0fd3ee28": "ing should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operator\u2019s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\u00ad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\u00ad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\u00ad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing", "ed722cdb-468f-4721-a373-d1ca5a35c1f9": "should not impose an unreasonable burden on the public. Automated systems with an intended use within sensi\u00ad\ntive domains, including, but not limited to, criminal justice, employment, education, and health, should additional\u00ad\nly be tailored to the purpose, provide meaningful access for oversight, include training for any people interacting \nwith the system, and incorporate human consideration for adverse or high-risk decisions. Reporting that includes \na description of these human governance processes and assessment of their timeliness, accessibility, outcomes, \nand effectiveness should be made public whenever possible. \nDefinitions for key terms in The Blueprint for an AI Bill of Rights can be found in Applying the Blueprint for an AI Bill of Rights. \nAccompanying analysis and tools for actualizing each principle can be found in the Technical Companion. \n7", "4097f22e-c5bf-4c18-8078-c3a2899b5bfb": "DATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \u00ad\u00ad\u00ad\u00ad\u00ad\u00ad\nIn addition to the privacy expectations above for general non-sensitive data, any system collecting, using, shar-\ning, or storing sensitive data should meet the expectations below. Depending on the technological use case and \nbased on an ethical assessment, consent for sensitive data may need to be acquired from a guardian and/or child. \nProvide enhanced protections for data related to sensitive domains \nNecessary functions only. Sensitive data should only be used for functions strictly necessary for that \ndomain or for functions that are required for administrative reasons (e.g., school attendance records), unless \nconsent is acquired, if appropriate, and the additional expectations in this section are met. Consent for non-", "72d14b3e-b07e-43bd-9020-1a2c23f4ef52": "researchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identi\ufb01ed in this document in \norder to adequately describe an experimental procedure or concept. Such identi\ufb01cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it", "db18094e-cd82-4e21-8d23-3a29d290999b": "should be maintained and supported as long as the relevant automated system continues to be in use. \nInstitute training, assessment, and oversight to combat automation bias and ensure any \nhuman-based components of a system are effective. \nTraining and assessment. Anyone administering, interacting with, or interpreting the outputs of an auto\u00ad\nmated system should receive training in that system, including how to properly interpret outputs of a system \nin light of its intended purpose and in how to mitigate the effects of automation bias. The training should reoc\u00ad\ncur regularly to ensure it is up to date with the system and to ensure the system is used appropriately. Assess\u00ad\nment should be ongoing to ensure that the use of the system with human involvement provides for appropri\u00ad\nate results, i.e., that the involvement of people does not invalidate the system's assessment as safe and effective \nor lead to algorithmic discrimination.", "094c20fa-14b1-497b-b40e-5b99c32cf2fc": "(https://www.ftc.gov/legal-library/browse/cases-proceedings/192-3172-everalbum-inc-matter), and\nagainst Weight Watchers and their subsidiary Kurbo\n(https://www.ftc.gov/legal-library/browse/cases-proceedings/1923228-weight-watchersww)\n69. See, e.g., HIPAA, Pub. L 104-191 (1996); Fair Debt Collection Practices Act (FDCPA), Pub. L. 95-109\n(1977); Family Educational Rights and Privacy Act (FERPA) (20 U.S.C. \u00a7 1232g), Children's Online\nPrivacy Protection Act of 1998, 15 U.S.C. 6501\u20136505, and Confidential Information Protection and\nStatistical Efficiency Act (CIPSEA) (116 Stat. 2899)\n70. Marshall Allen. You Snooze, You Lose: Insurers Make The Old Adage Literally True. ProPublica. Nov.\n21, 2018.\nhttps://www.propublica.org/article/you-snooze-you-lose-insurers-make-the-old-adage-literally-true\n71. Charles Duhigg. How Companies Learn Your Secrets. The New York Times. Feb. 16, 2012.\nhttps://www.nytimes.com/2012/02/19/magazine/shopping-habits.html", "f33bc6b2-858a-46bd-ba56-b6410ce7b11b": "Security \nMP-5.1-002 \nIdentify potential content provenance harms of GAI, such as misinformation or \ndisinformation, deepfakes, including NCII, or tampered content. Enumerate and \nrank risks based on their likelihood and potential impact, and determine how well \nprovenance solutions address speci\ufb01c risks and/or harms. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMP-5.1-003 \nConsider disclosing use of GAI to end users in relevant contexts, while considering \nthe objective of disclosure, the context of use, the likelihood and magnitude of the \nrisk posed, the audience of the disclosure, as well as the frequency of the \ndisclosures. \nHuman-AI Con\ufb01guration \nMP-5.1-004 Prioritize GAI structured public feedback processes based on risk assessment \nestimates. \nInformation Integrity; CBRN \nInformation or Capabilities; \nDangerous, Violent, or Hateful \nContent; Harmful Bias and \nHomogenization", "ea01c2f2-4936-4233-8845-855c033c5a09": "\u00ad\u00ad\u00ad\u00ad\u00ad\u00ad\u00ad\nALGORITHMIC DISCRIMINATION Protections\nYou should not face discrimination by algorithms \nand systems should be used and designed in an \nequitable \nway. \nAlgorithmic \ndiscrimination \noccurs when \nautomated systems contribute to unjustified different treatment or \nimpacts disfavoring people based on their race, color, ethnicity, \nsex \n(including \npregnancy, \nchildbirth, \nand \nrelated \nmedical \nconditions, \ngender \nidentity, \nintersex \nstatus, \nand \nsexual \norientation), religion, age, national origin, disability, veteran status, \ngenetic infor-mation, or any other classification protected by law. \nDepending on the specific circumstances, such algorithmic \ndiscrimination may violate legal protections. Designers, developers, \nand deployers of automated systems should take proactive and \ncontinuous measures to protect individuals and communities \nfrom algorithmic discrimination and to use and design systems in \nan equitable way. This protection should include proactive equity", "641dd569-3b6d-49b4-ab74-5b743949ed5d": "requirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companies\u2019 reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-", "ea99d79c-dacc-4993-a145-2146a1469e05": "SAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \u00ad\u00ad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principles\u2014while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AI\u2014require that AI is: (a) lawful and \nrespectful of our Nation\u2019s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d)", "e8a4ecfe-f6e5-4984-8f0c-694996adfb03": "systems. \n8. Information Integrity: Lowered barrier to entry to generate and support the exchange and \nconsumption of content which may not distinguish fact from opinion or \ufb01ction or acknowledge \nuncertainties, or could be leveraged for large-scale dis- and mis-information campaigns. \n9. Information Security: Lowered barriers for o\ufb00ensive cyber capabilities, including via automated \ndiscovery and exploitation of vulnerabilities to ease hacking, malware, phishing, o\ufb00ensive cyber \n \n \n6 Some commenters have noted that the terms \u201challucination\u201d and \u201cfabrication\u201d anthropomorphize GAI, which \nitself is a risk related to GAI systems as it can inappropriately attribute human characteristics to non-human \nentities. \n7 What is categorized as sensitive data or sensitive PII can be highly contextual based on the nature of the \ninformation, but examples of sensitive information include information that relates to an information subject\u2019s", "a7b25bc5-d04c-4ce5-b11d-18080ed7322b": "9 Confabulations of falsehoods are most commonly a problem for text-based outputs; for audio, image, or video \ncontent, creative generation of non-factual content can be a desired behavior. \n10 For example, legal confabulations have been shown to be pervasive in current state-of-the-art LLMs. See also, \ne.g.,", "0422346b-f47b-48ad-890e-93045e292363": "this document as well as in Executive Order on Advancing Racial Equity and Support for Underserved\nCommunities Through the Federal Government:\nhttps://www.whitehouse.gov/briefing-room/presidential-actions/2021/01/20/executive-order\u00ad\nadvancing-racial-equity-and-support-for-underserved-communities-through-the-federal-government/\n106. HealthCare.gov. Navigator - HealthCare.gov Glossary. Accessed May 2, 2022.\nhttps://www.healthcare.gov/glossary/navigator/\n72", "d444272b-84db-47b2-8e39-d070bef54d11": "ENDNOTES\n57. ISO Technical Management Board. ISO/IEC Guide 71:2014. Guide for addressing accessibility in\nstandards. International Standards Organization. 2021. https://www.iso.org/standard/57385.html\n58. World Wide Web Consortium. Web Content Accessibility Guidelines (WCAG) 2.0. Dec. 11, 2008.\nhttps://www.w3.org/TR/WCAG20/\n59. Reva Schwartz, Apostol Vassilev, Kristen Greene, Lori Perine, and Andrew Bert. NIST Special\nPublication 1270: Towards a Standard for Identifying and Managing Bias in Artificial Intelligence. The\nNational Institute of Standards and Technology. March, 2022. https://nvlpubs.nist.gov/nistpubs/\nSpecialPublications/NIST.SP.1270.pdf\n60. See, e.g., the 2014 Federal Trade Commission report \u201cData Brokers A Call for Transparency and\nAccountability\u201d. https://www.ftc.gov/system/files/documents/reports/data-brokers-call-transparency\u00ad\naccountability-report-federal-trade-commission-may-2014/140527databrokerreport.pdf", "84e5065a-6f26-49c3-aeb8-31a8102a856b": "DATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nProtect the public from unchecked surveillance \nHeightened oversight of surveillance. Surveillance or monitoring systems should be subject to \nheightened oversight that includes at a minimum assessment of potential harms during design (before deploy\u00ad\nment) and in an ongoing manner, to ensure that the American public\u2019s rights, opportunities, and access are \nprotected. This assessment should be done before deployment and should give special attention to ensure \nthere is not algorithmic discrimination, especially based on community membership, when deployed in a \nspecific real-world context. Such assessment should then be reaffirmed in an ongoing manner as long as the \nsystem is in use.", "3976a13c-4484-47bc-8b1d-0fcb75a19b95": "HUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEquitable. Consideration should be given to ensuring outcomes of the fallback and escalation system are \nequitable when compared to those of the automated system and such that the fallback and escalation \nsystem provides equitable access to underserved communities.105 \nTimely. Human consideration and fallback are only useful if they are conducted and concluded in a \ntimely manner. The determination of what is timely should be made relative to the specific automated \nsystem, and the review system should be staffed and regularly assessed to ensure it is providing timely \nconsideration and fallback. In time-critical systems, this mechanism should be immediately available or,", "88018024-6cf6-4719-ad61-61f79483bb74": "DATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nSome domains, including health, employment, education, criminal justice, and personal finance, have long been \nsingled out as sensitive domains deserving of enhanced data protections. This is due to the intimate nature of these \ndomains as well as the inability of individuals to opt out of these domains in any meaningful way, and the \nhistorical discrimination that has often accompanied data knowledge.69 Domains understood by the public to be \nsensitive also change over time, including because of technological developments. Tracking and monitoring \ntechnologies, personal tracking devices, and our extensive data footprints are used and misused more than ever \nbefore; as such, the protections afforded by current legal guidelines may be inadequate. The American public \ndeserves assurances that data related to such sensitive domains is protected and used appropriately and only in", "641be3b7-f879-4cc0-bc16-d9cb27069618": "36 \nMEASURE 2.11: Fairness and bias \u2013 as identi\ufb01ed in the MAP function \u2013 are evaluated and results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.11-001 \nApply use-case appropriate benchmarks (e.g., Bias Benchmark Questions, Real \nHateful or Harmful Prompts, Winogender Schemas15) to quantify systemic bias, \nstereotyping, denigration, and hateful content in GAI system outputs; \nDocument assumptions and limitations of benchmarks, including any actual or \npossible training/test data cross contamination, relative to in-context \ndeployment environment. \nHarmful Bias and Homogenization \nMS-2.11-002 \nConduct fairness assessments to measure systemic bias. Measure GAI system \nperformance across demographic groups and subgroups, addressing both \nquality of service and any allocation of services and resources. Quantify harms \nusing: \ufb01eld testing with sub-group populations to determine likelihood of \nexposure to generated content exhibiting harmful bias, AI red-teaming with", "f12b5467-1c94-4938-98a8-5e0e4e6fff77": "than an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\n\u2022\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\u00ad\ncants for spurious and discriminatory reasons; resumes with the word \u201cwomen\u2019s,\u201d such as \u201cwomen\u2019s\nchess club captain,\u201d were penalized in the candidate ranking.33\n\u2022\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\n\u2022", "380caf5a-f592-4a9d-8e55-905836b69ded": "zucked-users-say-they-get-blocked-racism-discussion/2859593002/\n8. See, e.g., Michael Levitt. AirTags are being used to track people and cars. Here's what is being done about it.\nNPR. Feb. 18, 2022. https://www.npr.org/2022/02/18/1080944193/apple-airtags-theft-stalking-privacy-tech;\nSamantha Cole. Police Records Show Women Are Being Stalked With Apple AirTags Across the Country.\nMotherboard. Apr. 6, 2022. https://www.vice.com/en/article/y3vj3y/apple-airtags-police-reports-stalking\u00ad\nharassment\n9. Kristian Lum and William Isaac. To Predict and Serve? Significance. Vol. 13, No. 5, p. 14-19. Oct. 7, 2016.\nhttps://rss.onlinelibrary.wiley.com/doi/full/10.1111/j.1740-9713.2016.00960.x; Aaron Sankin, Dhruv Mehrotra,\nSurya Mattu, and Annie Gilbertson. Crime Prediction Software Promised to Be Free of Biases. New Data Shows\nIt Perpetuates Them. The Markup and Gizmodo. Dec. 2, 2021. https://themarkup.org/prediction\u00ad\nbias/2021/12/02/crime-prediction-software-promised-to-be-free-of-biases-new-data-shows-it-perpetuates\u00ad", "5b9ba636-3418-4270-a189-27f4e5b95ae0": "GOVERN 1.6: Mechanisms are in place to inventory AI systems and are resourced according to organizational risk priorities. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.6-001 Enumerate organizational GAI systems for incorporation into AI system inventory \nand adjust AI system inventory requirements to account for GAI risks. \nInformation Security \nGV-1.6-002 De\ufb01ne any inventory exemptions in organizational policies for GAI systems \nembedded into application software. \nValue Chain and Component \nIntegration \nGV-1.6-003 \nIn addition to general model, governance, and risk information, consider the \nfollowing items in GAI system inventory entries: Data provenance information \n(e.g., source, signatures, versioning, watermarks); Known issues reported from \ninternal bug tracking or external information sharing resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor); Human oversight roles \nand responsibilities; Special rights and considerations for intellectual property,", "c3f7bcbe-0afe-4e8b-a6c2-8266ee6bec0a": "WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy.", "f78abfc0-dc1b-4904-b10f-45b2d75bdffa": "24 \nMAP 2.1: The speci\ufb01c tasks and methods used to implement the tasks that the AI system will support are de\ufb01ned (e.g., classi\ufb01ers, \ngenerative models, recommenders). \nAction ID \nSuggested Action \nGAI Risks \nMP-2.1-001 \nEstablish known assumptions and practices for determining data origin and \ncontent lineage, for documentation and evaluation purposes. \nInformation Integrity \nMP-2.1-002 \nInstitute test and evaluation for data and content \ufb02ows within the GAI system, \nincluding but not limited to, original data sources, data transformations, and \ndecision-making criteria. \nIntellectual Property; Data Privacy \nAI Actor Tasks: TEVV \n \nMAP 2.2: Information about the AI system\u2019s knowledge limits and how system output may be utilized and overseen by humans is \ndocumented. Documentation provides su\ufb03cient information to assist relevant AI Actors when making decisions and taking \nsubsequent actions. \nAction ID \nSuggested Action \nGAI Risks \nMP-2.2-001", "e88db2aa-0248-4c41-9ff5-f64b062d93ad": "NOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTailored to the level of risk. An assessment should be done to determine the level of risk of the auto\u00ad\nmated system. In settings where the consequences are high as determined by a risk assessment, or extensive \noversight is expected (e.g., in criminal justice or some public sector settings), explanatory mechanisms should \nbe built into the system design so that the system\u2019s full behavior can be explained in advance (i.e., only fully \ntransparent models should be used), rather than as an after-the-decision interpretation. In other settings, the \nextent of explanation provided should be tailored to the risk level. \nValid. The explanation provided by a system should accurately reflect the factors and the influences that led", "481dbfa9-e17c-4a32-bfda-547eb5403563": "State of AI Incident Tracking and Disclosure \nFormal channels do not currently exist to report and document AI incidents. However, a number of \npublicly available databases have been created to document their occurrence. These reporting channels \nmake decisions on an ad hoc basis about what kinds of incidents to track. Some, for example, track by \namount of media coverage.", "60edd255-562c-403c-b6b1-20d1d828e53f": "NIST Trustworthy and Responsible AI \nNIST AI 600-1 \nArtificial Intelligence Risk Management \nFramework: Generative Artificial \nIntelligence Profile \n \n \n \nThis publication is available free of charge from: \nhttps://doi.org/10.6028/NIST.AI.600-1 \n \nJuly 2024 \n \n \n \n \nU.S. Department of Commerce \nGina M. Raimondo, Secretary \nNational Institute of Standards and Technology \nLaurie E. Locascio, NIST Director and Under Secretary of Commerce for Standards and Technology", "810d4e10-aa6e-4399-aee2-0740c4dc03c4": "48 \n\u2022 Data protection \n\u2022 Data retention \n\u2022 Consistency in use of de\ufb01ning key terms \n\u2022 Decommissioning \n\u2022 Discouraging anonymous use \n\u2022 Education \n\u2022 Impact assessments \n\u2022 Incident response \n\u2022 Monitoring \n\u2022 Opt-outs \n\u2022 Risk-based controls \n\u2022 Risk mapping and measurement \n\u2022 Science-backed TEVV practices \n\u2022 Secure software development practices \n\u2022 Stakeholder engagement \n\u2022 Synthetic content detection and \nlabeling tools and techniques \n\u2022 Whistleblower protections \n\u2022 Workforce diversity and \ninterdisciplinary teams\nEstablishing acceptable use policies and guidance for the use of GAI in formal human-AI teaming settings \nas well as di\ufb00erent levels of human-AI con\ufb01gurations can help to decrease risks arising from misuse, \nabuse, inappropriate repurpose, and misalignment between systems and users. These practices are just \none example of adapting existing governance protocols for GAI contexts. \nA.1.3. Third-Party Considerations"}} \ No newline at end of file diff --git a/Tasks/Task 4/test_questions.json b/Tasks/Task 4/test_questions.json new file mode 100644 index 0000000000000000000000000000000000000000..b42fdc1accadf61f16d49ae708a138ffd8ed3176 --- /dev/null +++ b/Tasks/Task 4/test_questions.json @@ -0,0 +1 @@ +{"a188ff31-68db-46bc-b419-ac0fdecc6b1b": "What is data poisoning in the context of GAI and how does it affect model outputs?", "c9f79a82-8775-4461-ac2f-b7b4e8734e96": "How do GAI systems pose intellectual property risks related to copyrighted material in their training data?", "80b370f2-6d21-4ff6-bc9b-763317af5321": "How can governance structures help mitigate bias in human-based systems?", "d4e20011-f4f9-4de6-8453-8144d77ba3a7": "What are some common concerns associated with human-based systems in terms of bias and effectiveness?", "12534c5f-eb68-41cb-a879-b908e95a65d0": "What are the best practices for establishing policies for data collection and retention to mitigate risks such as data privacy breaches and harmful biases?", "11951856-b35d-4123-ab15-80c38cd82195": "How can organizations effectively implement policies to protect third-party intellectual property and training data?", "869c7e52-2b77-4144-af5f-d5beebc138f9": "What are the key safeguards that should be included in automated systems to protect the public from harm?", "a310a614-4bd4-4302-ab42-37b6faa7d838": "How can early-stage public consultation improve the safety and effectiveness of automated systems?", "468bff4b-cdbb-489a-9cbd-95d9062f93dc": "What are some of the key investigative projects that Surya Mattu has worked on at The Markup?", "3e5b4545-4ceb-424f-b519-16c3870dd543": "How has Mariah Montgomery's role as National Campaign Director at the Partnership for Working Families impacted labor rights and policies?", "0f0eea1d-f05e-458e-b31a-e955629c7e44": "How can organizations effectively integrate pre- and post-deployment feedback into their monitoring processes for GAI models?", "6cc0bcc5-9c82-49a6-b118-3e333e70ee9e": "What are the benefits of using AI red-teaming in the pre-deployment testing phase for capturing external feedback?", "4441faa1-8f27-4fc7-bea0-847caa1c1505": "What are the potential negative impacts of automated systems on individuals and communities?", "0db8fdee-99cf-47c6-9d8d-a85f3b294826": "How can confirmation bias affect the effectiveness of safety mechanisms in technology?", "57db460e-0123-4edf-b7df-87a967a60c26": "What are the key safety metrics used to evaluate AI system reliability and robustness?", "48589831-4f3c-4bf6-9cb4-bc4277c489dd": "How can AI systems be designed to fail safely when operating beyond their knowledge limits?", "1df11168-7aa5-4b43-91df-c14c32f01440": "What are the risks associated with data brokers collecting consumer data without permission?", "2127b35f-68cd-4e5f-a669-a6a4bb532fa8": "How does the use of surveillance technologies in schools and workplaces impact mental health?", "afefb290-48ec-450c-b530-5fe1b6c5340b": "What is ballot curing and how does it impact the election process?", "eecbf085-2f16-45c4-ba65-35813ca84568": "How do different states handle signature discrepancies in mail-in ballots?", "43b6add5-244e-4c11-be3b-0944fecfa6b9": "What are the best practices for detecting and mitigating algorithmic bias according to the Brookings Report?", "37bbd6b6-d24a-4b73-a4f4-f532d8c1793a": "How can public agencies implement Algorithmic Impact Assessments to ensure accountability, as suggested by the AI Now Institute Report?", "318fe73a-0591-41e8-b65e-925c71b2caab": "How is the federal government addressing discrimination in mortgage lending through the Department of Justice's nationwide initiative?", "56664bc2-0933-4e58-8d03-5c06b9d06c04": "What role do federal agencies like the Consumer Financial Protection Bureau play in the Action Plan to Advance Property Appraisal and Valuation Equity?", "7f8b418c-6e85-4ab0-83db-b7ed7dc49a45": "What are the best practices for updating due diligence processes to include intellectual property and data privacy for GAI acquisitions?", "e81617a3-9609-4012-ba46-caa374c306de": "How can organizations effectively monitor and assess third-party GAI risks in real-time?", "054e5797-d024-41bd-8de9-983d038a8797": "What are the best practices for performing disparity testing and making the results public?", "fdb81ad2-acf2-4aa4-b551-fe766d22f273": "How can organizations effectively mitigate disparities identified through testing?", "09a4ef32-a01e-4ca9-9bf6-4704e328ccef": "How can people protect themselves from being tracked by devices originally meant for finding lost items?", "e314f460-f6e2-4d11-b612-d51529a9dee6": "What are the potential issues with using algorithms to deploy police in neighborhoods?", "741f5989-422f-4bc5-9f72-0f3b22bb4f25": "What are the mental health impacts of NCII on women and sexual minorities?", "19592c9a-0621-4629-bdfc-8a08f0d396b4": "How can GAI training datasets be protected from including CSAM and NCII?", "f95100da-f55f-4402-909d-fdde5cf17d25": "What are the key challenges in designing non-discriminatory AI technology discussed in the panel?", "bb3e7970-5b1e-4e98-87ad-b30d33ff6a89": "How can community participation enhance human-computer interaction in AI systems?", "796ffa10-1532-4fa1-b832-d8ee058d410d": "What are the potential sociotechnical harms of algorithmic systems as discussed by Shelby et al (2023)?", "2c38117e-4b2d-4553-b319-f4ba3997996e": "How does training on generated data affect AI models according to Shumailov et al (2023)?", "3b9c9379-cc75-4b9d-a68a-dc6b0a48fd9c": "What are the key suggested actions for managing GAI risks according to the AI RMF 10 and Playbook?", "1a02235f-7bf0-4e7e-8149-ab610eacb769": "How do the suggested actions for managing GAI risks vary depending on the stage of the GAI lifecycle?", "08cbf993-d60b-4982-bf84-140c29d30450": "How can organizations ensure that consent practices do not allow for abusive surveillance practices?", "e253b5ac-feb7-4116-9e68-d2c817da36a5": "What are the best practices for re-acquiring consent if the use case of data changes or if data is transferred to another entity?", "0260750e-4f7d-4c1b-b0b7-ae4c36cc8fc3": "What are the key principles outlined in the AI Bill of Rights?", "39863570-2d41-4d21-bde1-1afc78c157b0": "How does the AI Bill of Rights address algorithmic discrimination?", "8b4fd9d7-e1d4-472e-bd34-35fa98299c07": "How can we effectively track and document instances of anthropomorphization in GAI system interfaces?", "1f34befe-4432-419f-8465-066a0d82ff77": "What are the best practices for verifying the provenance of GAI system training data and TEVV data?", "3e35db8c-c1b3-4b9f-b6c0-a3fd6e52d2b0": "What is the importance of having demographically and interdisciplinarily diverse AI red teams in pre-deployment contexts?", "0c3b35ca-f421-41e4-b016-8f367561acbe": "How can general public involvement in AI red-teaming contribute to identifying flaws in AI models?", "d73655e4-93f0-41c5-b69e-814ff8189db8": "QUESTION #1: How are major universities using race as a predictor of student success?", "bb8b9729-d1b6-407d-9f0c-aa1bd62a8d78": "QUESTION #2: What concerns do students and professors have about using race as a predictor in education?", "3f1dec42-4087-4e06-9e7e-491c96cdee67": "How can AI-enabled systems contribute to building better and more innovative infrastructure?", "9a94cfb2-25b9-4aa8-94c5-987c53fa42bf": "What lessons from urban planning can be applied to the integration of AI technologies in communities?", "f445449e-b75f-44ee-a819-018ad630bd35": "What are the benefits of having a human alternative to automated systems?", "58fd202f-0791-411a-9124-09381dbbad11": "How can one ensure timely human consideration and remedy when an automated system fails?", "26ee0f55-a947-440f-b4bc-4b7def4e3545": "What are the main findings of the Department of Justice's report on the risk assessment tool for predicting recidivism?", "dcb01564-a34f-42a7-ac6c-13764525a7d2": "How is the Department of Justice addressing the disparities in the risk assessment tool for predicting recidivism among different groups of color?", "8e29d29a-fc98-4a6f-b42b-580fc084dd71": "What is the Executive Order on Advancing Racial Equity and Support for Underserved Communities Through the Federal Government?", "4bbc6d4b-6b67-4831-8bca-853eb46aec3a": "What were President Biden's remarks on the Supreme Court decision to overturn Roe v Wade?", "919fdd1d-2abb-472e-ac8d-bde9df2bb391": "What are the best practices for re-assessing model risks after implementing fine-tuning or retrieval-augmented generation?", "a795e873-419b-454b-8598-fb0c49a7e5cc": "How can organizations effectively review and manage training data to prevent the reproduction of intellectual property or CBRN information in AI outputs?", "242b750e-1236-41f7-a1cc-eedef8f0427d": "What are some common examples of AI incidents that organizations should be aware of?", "c506e557-776f-42ed-99f9-c752ac2bb94b": "How can organizations effectively track and document the provenance of datasets to identify AI-generated data issues?", "426616e2-6297-47c3-89c7-71ec1186cdba": "What is the role of the American Civil Liberties Union in protecting digital privacy?", "db43af55-434d-441f-8dc7-acc8ff3f8432": "How does the Center for Democracy & Technology advocate for internet freedom and security?", "f2913868-28a6-4558-904a-0486fbfc1f6e": "How can organizations ensure the accuracy of predictions or recommendations generated by automated systems?", "5a4faa70-0364-4fd0-9c98-b26fb63f7786": "What are the best practices for implementing ongoing monitoring procedures for automated systems?", "3ad57490-e4f7-4fd2-bff4-93211043ec13": "What are the key considerations for implementing automated systems in sensitive domains like criminal justice and health?", "3a05d7ba-2e46-406b-aeb9-51b33efff15f": "How can organizations ensure meaningful oversight and human consideration in high-risk automated decision-making systems?", "6ae09ea8-3090-401b-9f1e-4ce5270152cd": "What are the privacy expectations for automated systems handling sensitive data?", "207207ff-faab-4342-b76f-ef0c6fac88c9": "How should consent be managed for automated systems collecting sensitive data?", "d15a10aa-36cb-4f3a-9f9e-2c0416ce1084": "What is the contact information for inquiries related to NIST AI publications?", "c9f4fb11-9365-4354-aa94-7cc93efcafb5": "Where can I find additional information about NIST AI publications?", "4aebac20-11d4-42c8-be6a-f7ac4e43cbbc": "How can organizations effectively combat automation bias in automated systems?", "71537f88-7e77-4720-9cc0-bca516b4721f": "What are the best practices for training individuals to properly interpret outputs from automated systems?", "e004e796-65d5-4109-89bd-472cae5b6c75": "What were the FTC's findings in the case against Everalbum Inc?", "290cd0b2-456b-41bc-bf0e-3ea3e32f480d": "How did the FTC address privacy concerns in the case against Weight Watchers and Kurbo?", "1c416614-5e28-45f4-9e8e-937971dcff9a": "What are the potential harms of GAI related to misinformation, disinformation, and deepfakes?", "d83ab93d-9be0-488d-94fd-8e58074a3388": "How should organizations disclose the use of GAI to end users to mitigate risks?", "bfc45e93-d073-4348-8fb1-03dfaf4e73f3": "What measures can designers and developers take to prevent algorithmic discrimination?", "4819bdb4-1724-4318-855c-9c4f680c0655": "How does algorithmic discrimination impact different protected classes such as race, gender, and disability?", "a8a96840-d387-42d9-9b56-f05b73027f5c": "What are some innovative solutions provided by the industry to mitigate risks to the safety and efficacy of AI systems?", "7fdb6c15-c3f8-4327-b2fe-0169c08ce375": "How does the Office of Management and Budget (OMB) suggest expanding opportunities for stakeholder engagement in program design?", "3509c40f-7af0-49a5-bd16-c7da584b3980": "What are the nine principles outlined in Executive Order 13960 for the use of AI in the federal government?", "a86eba64-72a8-4afa-a7f5-8c50c3b0c660": "How can laws and policies ensure that AI systems are accurate, reliable, and effective in real-life applications?", "9eee9d68-6e0f-4430-989f-cb569677d74c": "How can we distinguish between fact and opinion in the content generated by AI systems?", "6fba0797-2aaa-4686-9325-999b5396f47b": "What are the risks associated with the anthropomorphization of General AI (GAI) systems?", "449ab90b-3762-4d3e-99ea-899bd340c42b": "What are confabulations in the context of text-based outputs?", "1c57be24-8e1d-4a3a-a29e-1d153c019510": "How do legal confabulations manifest in state-of-the-art language models?", "6b30e12e-cecf-4cd7-936e-84468c950a36": "What is the purpose of the Executive Order on Advancing Racial Equity and Support for Underserved Communities Through the Federal Government?", "5547bf9b-ceae-4386-a486-7708637ab6a1": "What role do Navigators play according to HealthCaregov?", "a520c4cc-f2f6-4dd8-bd3a-a1a750440209": "What are the key principles outlined in the ISO/IEC Guide 71:2014 for addressing accessibility in standards?", "d72c0d17-abee-470b-8725-abf4aad59b3f": "How do the Web Content Accessibility Guidelines (WCAG) 20 impact web development practices?", "8e31e286-3ac3-488f-a211-4575fd663a17": "What are the key expectations for automated systems to ensure data privacy and protection from unchecked surveillance?", "76f71eb0-f3b8-425d-8772-65a5d214634f": "How can heightened oversight of surveillance systems prevent algorithmic discrimination based on community membership?", "eb21dff3-4dd0-47af-a449-b9b525386911": "What are the key considerations for ensuring equitable outcomes in fallback and escalation systems for automated systems?", "645a1801-9128-4977-947d-5437b8933966": "How can organizations ensure that human consideration and fallback mechanisms are conducted in a timely manner for automated systems?", "50321a04-5130-43ab-9305-cc1d548da8e0": "What are the extra protections for data related to sensitive domains like health and personal finance?", "32f6e506-6e82-41f7-b80c-f0702a537ca2": "How do technological developments impact the sensitivity of data domains and the need for enhanced data protections?", "7a80ac97-319d-452b-a900-e739da72ab44": "What are some benchmarks used to quantify systemic bias in GAI system outputs?", "bedb600e-b951-4e89-9442-24b971ff1b21": "How can fairness assessments help measure systemic bias in GAI systems?", "5fa67d29-3be8-4c81-a7c9-1a4d5dfa0ba7": "What are the potential biases in hiring tools that learn from a company's predominantly male employee base?", "868637a7-88fa-4891-bcfd-da1d37772744": "How do predictive models that use race as a factor affect Black students' academic guidance and major selection?", "6410524d-24f8-4aaf-8b70-5dcfc8272cd0": "What measures are being taken to prevent the misuse of Apple AirTags for stalking and harassment?", "d0990582-29f1-41c2-90e1-89c4efc58153": "How does crime prediction software perpetuate biases despite promises of being free from them?", "cbc06d96-6605-45f4-8067-0342ab04aac4": "What are the key elements to consider when incorporating GAI systems into an AI system inventory?", "b16b4f6b-0ec2-49bc-9453-9bbf1a8feea5": "How should organizations handle inventory exemptions for GAI systems embedded into application software?", "b1da9e4e-62f7-4d08-ac87-2b196fa9114e": "What measures can be taken to ensure automated systems protect against algorithmic discrimination?", "aa279423-ea2b-4fa2-beb1-7a6e1400c36f": "How can independent evaluations of automated systems be conducted without compromising individual privacy?", "8a9ae766-2f74-4272-bd84-e95787e5e943": "What are the best practices for determining data origin and content lineage in AI systems?", "2567081e-89ba-4d98-a746-eaf8503e5c5d": "How can test and evaluation processes be instituted for data and content flows within an AI system?", "fa585f44-6fb6-443b-983e-6304d9c2f5e1": "What are the expectations for automated systems in high-risk settings like criminal justice?", "f51b4b1e-689f-4a47-82a2-d9a9a0d30ab7": "How should the level of risk influence the design of explanatory mechanisms in automated systems?", "ecb05eb6-335e-4451-bf2e-4c8ad8e800bf": "What are the current methods for reporting AI incidents?", "51d512f0-8849-48de-a188-5aab8ddee724": "How do publicly available databases decide which AI incidents to track?", "80d2e492-0668-4e82-b83e-d1cef2355444": "What is the NIST AI 600-1 framework about?", "b7e8353e-ffb6-41a5-a321-d6b5521a03d5": "How does the NIST Trustworthy and Responsible AI framework address generative artificial intelligence risks?", "acbfd37b-65e1-440b-b4b1-9b3ee9a15fac": "What are the best practices for establishing acceptable use policies for GAI in human-AI teaming settings?", "c86085f1-bc71-4d66-8869-d5335b328ec7": "How can organizations effectively implement synthetic content detection and labeling tools?"} \ No newline at end of file diff --git a/Tasks/Task 4/training_dataset (2).jsonl b/Tasks/Task 4/training_dataset (2).jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ce64d3ac42f979075538919414cded75dd021fb5 --- /dev/null +++ b/Tasks/Task 4/training_dataset (2).jsonl @@ -0,0 +1 @@ +{"questions": {"1d8a0cfc-df53-467f-88e8-cd378990da4b": "What are the key steps to obtain input from stakeholder communities to identify unacceptable use in AI systems?", "9ec9d981-9115-4672-bd74-23035ecc2e7f": "How can organizations maintain an updated hierarchy of identified and expected GAI risks?", "0605df28-3443-4e71-b065-c5e957b1a3be": "What are some examples of unacceptable uses of AI as identified by stakeholder communities?", "bf3d5106-092b-4dcd-84c8-a6f876550060": "How do harmful bias and homogenization impact AI systems?", "84fa4ea9-be18-4dd5-9e29-a6a5c02feb54": "What is the significance of reevaluating organizational risk tolerances in the context of AI and GAI?", "bb5dcf48-cb19-47b4-b313-265f9f7cb3c8": "What are the potential risks associated with model collapse and algorithmic monoculture in GAI systems?", "bc5f183f-8819-4d54-9853-b8f799e02906": "How can organizations address issues related to obscene, degrading, and/or abusive content in AI systems?", "8d9f5ceb-fce8-4d14-9811-6140c0e69900": "What strategies can be employed to mitigate dangerous, violent, or hateful content in AI applications?", "2f174ff7-4261-4c60-a49e-51bc536fd900": "How do immature safety or risk cultures affect the design, development, and deployment of AI and GAI systems?", "1cef392e-71ff-400b-a4bb-39f4c8d78dab": "What are the public information integrity risks associated with AI and GAI, and how can they be managed?", "b2fdeb09-6112-4cbd-a860-6b770d65c9ef": "What are the short, mid, and long-term impacts of AI in cybersecurity according to De Angelo (2024)?", "50a10f19-e2b0-4f53-bf39-c09e6b2adedb": "How do chatbots and generative AI affect mental health, based on the insights from De Freitas et al (2023)?", "41d1c796-9d40-450d-bb90-58541f4dc294": "What is algorithm aversion and why do people avoid algorithms after seeing them err, as discussed by Dietvorst et al (2014)?", "8ea4ce00-241b-4a28-9f10-127da59b19a3": "How do companies learn consumer secrets according to Duhigg (2012)?", "ee13b775-a27a-4b83-b019-8da1bfe33cd4": "How can images altered to trick machine vision influence humans, as explored by Elsayed et al (2024)?", "0835eb5b-ff9b-4c95-98a1-c1aa96b695d1": "What are the key findings of the Harvard Business School study on the safety of generative AI in mental health?", "a25fa3f1-5fad-4e5f-8635-3c5d7564bc9d": "How does the New York Times article by Duhigg (2012) explain the methods companies use to track shopping habits?", "cad3f6f1-e1cf-428f-b6c0-986a72e27923": "What are the implications of the research by Google DeepMind on altered images and machine vision?", "7795698f-b343-44b2-90b7-333d3e874947": "How does the study by Dietvorst et al (2014) contribute to our understanding of human interaction with algorithms?", "0563fa4d-b516-4410-ba89-1f0315067735": "What are the potential risks and benefits of AI in cybersecurity as outlined by Palo Alto Networks?", "32e11143-ad57-48c7-b0ce-e4922c8b1cc7": "What are the different risk response options mentioned in MANAGE 13 for high-priority AI risks?", "94e3e1d2-3500-48e6-97a6-8c83517b41e0": "How should organizations document trade-offs and decision processes for AI risks that do not surpass risk tolerance?", "abb3f98f-480d-4f2d-bf70-0bb22379dbae": "What is a staged release approach in the context of model release for AI systems?", "2a61da93-9e17-4be7-862f-ff9f941284a1": "How can organizations mitigate, transfer, or avoid AI risks that surpass their risk tolerances?", "6b6dc282-ed05-4387-ae3c-28fea9f6aa32": "What methods can be used to monitor the robustness and effectiveness of AI risk controls and mitigation plans?", "a435043d-540e-4134-9d59-e921db6866dd": "What is the role of red-teaming in assessing AI risk controls?", "c64396a8-0017-4d46-aea9-58f81191f6b9": "How can participatory engagements help in monitoring AI risk mitigation plans?", "90341a67-3948-47fe-acaa-32e9896acdd5": "Why is it important to consider the projected use cases of a model when planning its release?", "a93e9f47-5572-4f65-810c-e65f0c0cbc35": "What are some examples of performance assessments for AI risk controls?", "6b14b109-2e7e-4d40-b149-1eb05bc882a0": "How can user feedback mechanisms contribute to the effectiveness of AI risk mitigation plans?", "cde52e0d-45eb-41b7-89ac-0e8e28ac0457": "What methods can be used to trace the origin and modifications of digital content?", "0d6f47cc-2f11-4d1e-9d54-90b10f6bf2b6": "How can tools designed to analyze content provenance help in detecting data anomalies?", "ed8c6ae4-9a32-44a5-b820-0681f03fd325": "What are the best practices for verifying the authenticity of digital signatures?", "cf07e760-9b9f-4be0-aaf2-9ce158ee1c98": "How can patterns associated with misinformation or manipulation be identified?", "7a9b270d-4022-4dee-8044-c221948c7d25": "Why is it important to disaggregate evaluation metrics by demographic factors?", "93f3824c-e2a6-424d-a45b-f7a6e593c5d7": "What are the potential risks of harmful bias and homogenization in AI systems?", "c0441d12-0139-47e0-a780-13868a3b0469": "How can discrepancies in content provenance mechanisms across diverse populations be identified?", "7a65af06-b620-44fe-90b4-53bdcb7c8559": "What metrics can be used to evaluate structured public feedback exercises?", "3cc7ff9c-22a7-4d2c-a02f-353ba7ef9fe1": "How should risks or trustworthiness characteristics that cannot be measured be documented?", "daf97136-c510-4453-a3f3-dfdd41c8a7eb": "What are the most significant AI risks that should be prioritized for measurement?", "4f8ef4f7-9616-44aa-abfe-8cdaf1756b90": "What are the key points of the Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence issued by the White House in 2023?", "6f46f00e-c286-4eb0-815e-32faf4967e59": "How does the 2022 Roadmap for Researchers on Priorities Related to Information Integrity Research and Development address misinformation?", "bf6d72d3-a20e-4589-8cbd-9e4fcf8ff4f9": "What were the findings of the Stanford Cyber Policy Center's investigation into AI image generation models trained on child abuse?", "1c142ec8-b4a3-456e-ba1c-a485944b6ca4": "How does the White House's 2023 Executive Order aim to ensure the ethical development of AI technologies?", "b39288aa-0a9d-4d94-82e2-35409659c7d1": "What are the main objectives outlined in the White House's 2022 Roadmap for Information Integrity Research and Development?", "21eaa61b-6208-40e7-b9a3-5a37038f9d48": "What measures are being proposed to secure AI development according to the 2023 Executive Order by the White House?", "1233bc76-0ce0-4740-bad7-5de774ca0fd7": "How does the 2022 Roadmap for Researchers contribute to combating disinformation and ensuring information integrity?", "988d7c42-6b16-42d7-8c3b-eca27423ec98": "What are the implications of the findings from the Stanford Cyber Policy Center regarding AI image generation models and child abuse?", "d0ee89a9-21ce-4521-a946-de628b905143": "How does the White House plan to address the challenges of AI safety and security as per the 2023 Executive Order?", "23394fb9-945c-4c8e-a361-a1cfd375abe0": "What steps are being taken to ensure trustworthy AI development according to the latest executive order from the White House?", "37aabec8-0539-4438-8489-8a52afbff96e": "What are the best practices for implementing real-time monitoring processes for content integrity?", "a8ce087b-ada7-4cca-8488-e90b57a28b9f": "How can machine learning models be used to flag illegal or violent content in GAI applications?", "607fa183-8c10-4e68-860f-41798c764cc8": "What are the challenges in detecting CSAM and NCII content using automated filters?", "f9c3b55d-7f98-4fca-a8b3-2276e0cf58d9": "How can rule-based filters be effectively combined with machine learning models to identify harmful content?", "f8fcd7dd-4077-4dd8-8a19-cea03ba0bda5": "What measures can be taken to ensure information integrity in generated content?", "21f56f77-f14f-4eb0-ae84-a15ddf44c478": "How can real-time monitoring help in identifying deviations from content standards?", "c1c10c23-aba8-48a1-a47f-0ea7b9ee4058": "What are the key characteristics to monitor for ensuring content trustworthiness?", "bb1d1592-7123-4163-a963-913da8006be9": "How can human intervention be effectively triggered by automated alerts in content monitoring systems?", "07ea05d4-6b90-4936-9dae-b67d0e70640d": "What are the potential biases that can arise in machine learning models used for content filtering?", "2f84ffbd-1a21-475b-a55e-ed7b70ad40b5": "How can the homogenization of content be prevented in GAI applications?", "9e4c34f4-3d3e-45f8-b44c-86725048d92b": "What are the key findings of the study \"Dissecting racial bias in an algorithm used to manage the health of populations\" by Ziad Obermeyer and colleagues?", "64f82a67-4768-4e6b-a43d-f04086818e85": "How does the Data & Trust Alliance propose to safeguard against algorithmic bias in the workforce?", "7f45b7a7-9fcf-4391-89e9-8cc77c095f2a": "What are the main IT accessibility laws and policies outlined on Section 508gov?", "78c93a0c-adfa-4c7e-a126-d47d3e13352b": "Can you summarize the 2019 Science article on racial bias in health management algorithms?", "f9d25a5e-23c8-45ee-aef2-d7db0c12b767": "What measures are recommended by the Data & Trust Alliance to ensure fairness in workforce algorithms?", "86604d73-f71a-46b4-b444-0f78f434d3a8": "How does Section 508gov help organizations comply with IT accessibility requirements?", "511742cc-dc64-43a3-a033-bd545ce33e57": "What are the implications of the findings by Obermeyer et al on racial bias in health algorithms for healthcare providers?", "04a0b08c-92b8-44bb-886c-252f86746d2c": "What is the purpose of the Algorithmic Bias Safeguards for Workforce document by the Data & Trust Alliance?", "3e71278b-e893-4a19-955e-1505554c8686": "How can organizations implement the guidelines provided by Section 508gov for IT accessibility?", "04516eef-32ce-4fcc-8b77-263ca9de6e10": "What are the potential impacts of algorithmic bias in workforce management as discussed by the Data & Trust Alliance?", "a0c0459c-054c-42ad-bffd-3e69e7a03bbc": "What are the potential impacts of automated systems on data privacy?", "b159acfb-35e3-4cbb-8158-d4d89ccc5870": "How can we ensure that automated systems do not use data inappropriately?", "391ca451-b0e7-43cc-afcc-32b7d1fa8da7": "What measures can be taken to protect against the compounded harm of data reuse in automated systems?", "b15c0ca5-79fe-4cb6-971d-891ab63966bb": "Why is independent evaluation important for the safety and effectiveness of automated systems?", "f8ed278b-212e-45c9-ae9f-5fe98bbd00fb": "What steps should be taken to mitigate potential harms of automated systems?", "c034102c-8cc6-4f9f-aa97-54c4123b05b8": "How can the results of independent evaluations of automated systems be made public?", "19a80a1d-4c42-4b4a-a9f1-8c58f96c948b": "What are the foreseeable uses of automated systems in various industries?", "84b94c19-44d8-4b0e-a9ea-21d1b4e745a9": "How can we ensure transparency in the development and deployment of automated systems?", "08b5ee06-991e-4f06-9629-71d3500295ab": "What are the best practices for reporting the safety and effectiveness of automated systems?", "4328f04f-39f0-472e-a479-26bcd1915133": "How can we protect individuals from the misuse of data by automated systems?", "03a9fe91-d78e-473a-a2f8-3106648d7314": "What are black-box credit models and why is the CFPB acting to protect the public from them?", "c5f06a48-24b4-4bf0-85f8-836e9d96099d": "What does California's AB 701 law entail for warehouse employers regarding quotas?", "2335610a-fe1a-4ccd-b194-0d8b2734a4d1": "How does the National Institute of Standards and Technology (NIST) contribute to AI explainability research?", "fe0c5f11-3d4c-44d7-8f27-6a2da2f8536e": "What is the significance of explainable artificial intelligence (XAI) according to DARPA?", "5342b516-b61d-4ea4-a2da-9a05638d3918": "How might complex algorithms in credit models impact consumer finance?", "f43fcf7f-345c-4942-b43d-e75917a9d53a": "What are the potential benefits and challenges of implementing AB 701 in California warehouses?", "4a15e20a-7144-40e1-93e3-114864d97085": "Why is explainability important in artificial intelligence research?", "32cfcd73-0b8c-4469-91c1-5f5209f15d84": "What are some examples of black-box models in the context of credit scoring?", "85538c5c-826d-45bc-8de8-297408b1897a": "How does DARPA's XAI program aim to improve the transparency of AI systems?", "08be8570-a2e0-4e56-af80-157c2427baf9": "What steps can employers take to comply with California's AB 701 law on warehouse quotas?", "24c87258-bf5b-4144-b1cf-149daa5a5c29": "What is the NIST framework for AI technologies and systems?", "1f1e3f39-8772-4eb1-b7f0-12ff89060503": "How does the NIST framework address robustness in AI systems?", "69e2d6b3-12b3-47d1-a249-65657ba34d64": "What measures does the NIST framework propose for ensuring the safety of AI technologies?", "f139859c-a3ce-4700-a971-5548dc026253": "How does the NIST framework aim to enhance the security and resilience of AI systems?", "04bebb3a-8ecf-4543-8736-671e1a4585be": "What strategies does the NIST framework suggest for mitigating unintended and harmful biases in AI?", "df7b1cdc-bb0a-43b5-a10c-7189eb63373e": "How does the NIST framework promote transparency in the development and deployment of AI technologies?", "7cb4296e-4abe-4bd0-a8ff-edaf4a243ffd": "What accountability mechanisms are included in the NIST framework for AI systems?", "12b8555f-380e-4b4a-adc7-418d59a61297": "How does the NIST framework ensure fairness during the pre-design and design phases of AI development?", "3aef7413-5f60-4e9b-a6ac-191b6972651c": "What are the key principles of the NIST framework for AI testing and evaluation?", "65303757-3ce8-4416-96eb-99d843d040d1": "When is the NIST framework for AI technologies expected to be released?", "a7a1d3dc-f420-48c3-a3cb-1e0a676e9e7b": "What are the key considerations for using AI in sensitive domains like criminal justice and health?", "24566fed-3d75-47f8-80b2-03f3d0e9e7d3": "How can meaningful access for oversight be ensured in AI systems used in employment and education?", "1cc3e51d-3cba-4b58-b525-f86babb21bfb": "What kind of training should be provided to people interacting with AI systems in sensitive areas?", "e0f9ad8a-c33a-4717-88ac-d8559f6c974a": "Why is it important to incorporate human consideration in high-risk AI decisions?", "d4342127-5df0-405f-bbe8-dc24711944be": "How can organizations report on the human governance processes of their AI systems?", "65bf43cf-81c8-4872-ae3b-81bf21751ba3": "What are the best practices for assessing the timeliness and effectiveness of AI systems in sensitive domains?", "2f3abec1-74b5-405f-b734-843ecc211a3a": "How can transparency be maintained when using AI in areas like criminal justice and health?", "a4768a2a-62c0-471c-ab81-e9e0d81d83c0": "What are the potential risks of not including human oversight in AI systems used in employment?", "d68eeb41-7bcf-4744-80e9-7614c92438a8": "How can adverse decisions made by AI systems be mitigated in sensitive fields?", "5d143734-c64d-4ee5-9600-544de3dc380b": "What role does public reporting play in the governance of AI systems in education and health?", "2aac2f6a-920b-4393-b61e-98aa36d35f27": "What is the role of NIST in the research on explainable AI systems?", "6de62d16-19cb-45d2-949b-92609186983f": "What are the core tenets of explainable AI that NIST aims to support?", "8d9912fd-4615-4118-8134-a893b80920c1": "How does the Defense Advanced Research Projects Agency contribute to explainable AI?", "8e48c274-2976-4927-b9ab-6bef0546c6e1": "What are the goals of the Defense Advanced Research Projects Agency's program on Explainable Artificial Intelligence?", "7c6ecd5b-5475-43a4-8259-af3a5f014d39": "What is the importance of explainable AI in machine learning models?", "cfab0915-6e9f-412a-a432-bc0a06148923": "How do explainable AI systems help human users?", "eaa0dca7-f7a9-4bac-a8f3-dc0e410b9a94": "What are the potential adverse employment actions for failing to meet a defined quota?", "67312adb-b7a3-4c16-84b9-4e4697e002e8": "What kind of research is being conducted across federal agencies on explainable AI?", "d1c669d5-4d91-4300-8e70-5a49b9b7b517": "What are the best practices for implementing explainable AI according to NIST?", "a44278d2-1d80-4211-93b9-785f091ee871": "How does explainable AI maintain a high level of learning performance while being understandable to users?", "670b6be9-bff2-4e3f-9ddb-48503a59925b": "What are the key components of effective governance procedures for automated business processes?", "966409cb-250a-4100-9cb5-4cc606c16452": "How can organizations ensure that stakeholders are adequately involved in the governance of automated systems?", "42ce5b4d-fe04-4d95-87e8-bd0a8f2a1cf3": "What level of organizational responsibility is necessary for prompt decision-making in resource allocation and incident response?", "5a9cd608-2ac4-4bfa-8cfa-ca357e9841ac": "Why is it important to consider risk mitigation objectives against competing concerns in automated systems?", "a18b2932-8fa5-4635-9302-7ee0cbaf194a": "How should organizations handle use cases that have a significant impact on people's rights and opportunities?", "379549a5-d7e5-47a9-bafb-a7408746aca0": "When is it appropriate to conduct an independent ethics review before deploying an automated system?", "03540efe-c7b8-4bd6-8835-c6f7bb4e30b5": "What are the risks associated with the use of inappropriate or low-quality data in automated systems?", "09f74024-8869-4f32-b626-2138e3564a74": "How can organizations prevent the compounded harm of reusing low-quality or irrelevant data?", "d8dc0a3b-18dc-42ae-93f3-7614e9b55cfa": "What procedures should be in place for risk identification in automated business processes?", "ecdf8fe5-f149-44ac-b465-3fbacb40865e": "How can organizations balance the need for automation with the potential ethical implications for stakeholders?", "f5606d22-27dc-4b36-9ace-72d6cc48660e": "What are the best practices for engaging diverse communities when introducing a new automated system?", "ae491447-4cf4-4991-b43e-08454411a5a6": "How can we ensure that the concerns of disproportionately impacted communities are adequately addressed?", "7ee78a3e-1ea7-4da5-875a-f65aa6f20054": "What types of experts should be consulted during the development phase of an automated system?", "2078e8f7-9bea-4153-bc28-2944699c7a6e": "How can privacy and civil liberties be protected when implementing large changes in automated systems?", "7b087c9c-e491-4d32-b692-81313a1e6afe": "What are the challenges of maintaining confidentiality in private sector consultations before product launch?", "208c1297-47eb-4aca-9561-f53fdb900d7b": "How should government applications balance the need for confidentiality with stakeholder engagement?", "44b1a320-c340-4a37-bcf5-c93a6963e2e0": "What are the specific considerations for law enforcement applications of automated systems?", "c223bce2-8a81-489c-9230-31400072a3fc": "How do preexisting oversight laws impact the consultation process for new automated systems?", "8b96696a-d368-4159-b644-3e82a3269da4": "What strategies can be used to engage sector-specific experts effectively in the consultation process?", "e0ca9dfa-fbe1-41a0-b064-9e4655a76477": "How can the risks unique to certain communities be identified and mitigated during system development?", "ee3ed9e9-e13a-43ae-8468-ceb8ffcf49b6": "What are some current uses of technology that impact equity of opportunity in employment?", "56c869aa-9f69-476f-bb2c-ff1cb0080445": "How is technology influencing equal opportunities in education?", "ecc2c5eb-78a6-4d19-b33b-19b08c0fdbfe": "What role does the White House Office of Science and Technology Policy play in promoting civil justice?", "db262b36-e3f4-48a0-bddc-dc36a289a41e": "How can technology be leveraged to ensure fair housing opportunities?", "6d61f78a-7ebf-45f4-a5e6-4881a583f4c8": "What insights did Christo Wilson provide regarding technology and employment equity?", "d1b8703a-3114-4088-9309-0c02821f5a37": "How does Pymetrics, led by Frida Polli, use technology to promote equal opportunities?", "80cdf547-f019-49d5-b954-fc44fb501eef": "What are some emerging technologies that could impact civil justice, according to Karen Levy?", "074222ac-fda2-47de-b57c-5ddc1063b7ba": "How does Upturn, directed by Natasha Duarte, address issues of technology and equity?", "55189c9a-9917-4b08-9b39-7f0a6c48256e": "What legal perspectives did Elana Zeide offer on technology's role in civil justice?", "50e8150e-4538-45b0-a17d-be191d1fa248": "How does Fabian Rogers' work with NY State Senator Jabari Brisport's office relate to technology and community advocacy?", "eb5cb844-864a-468b-8c58-527d3531343d": "What are the best practices for involving national security professionals in AI system risk management?", "62d2b5a7-b986-4048-bb97-766d7b98ce5e": "How can organizations effectively map and measure national security risks associated with AI systems?", "f72b3652-6c80-4c83-8aeb-9eee8ef96c3e": "What mechanisms can be implemented to protect whistleblowers in AI governance?", "afb866f7-aa0b-4116-9378-f258a4bf12ad": "How should organizations handle CBRN information or capabilities in AI systems?", "a1ddee9f-3320-4745-8c67-624c95b262a7": "What steps can be taken to manage dangerous, violent, or hateful content in AI applications?", "099e3b3a-218d-4c72-8460-0c899484ecd1": "What are the key components of an effective whistleblower protection mechanism in AI governance?", "b923df73-4a58-4d22-8b55-1a01145c5ac7": "How can AI systems be designed to comply with information security requirements?", "1464f6bf-4914-46ea-a181-13cbb9e7058b": "What role do national security professionals play in the oversight of AI systems?", "4e333af2-62cb-4a18-9737-5e42d27891af": "How can organizations ensure that they are not violating laws related to public safety with their AI systems?", "44cf4f5a-ee5c-4173-8c24-e6a9084636d9": "What are the challenges in providing protections for whistleblowers in the context of AI governance?", "732e3e58-5725-48ab-a40c-961aea398141": "What are some methods to evaluate gender bias in coreference resolution systems?", "c0e18063-83e5-462a-b99d-ca04e73f72f1": "How can gender bias in NLP coreference resolution systems be detected?", "ab81895e-d428-4fd5-bed3-030440a61e22": "What datasets are available for assessing gender bias in coreference resolution?", "33ec5775-e26d-4365-a8e5-69cfded28cf2": "Are there specific metrics to measure gender bias in coreference resolution systems?", "595f4527-0e21-40a7-b9ec-6a757d80f511": "How does gender bias impact the performance of coreference resolution systems?", "1bc870b7-1589-44a0-ae78-e41f637ce93b": "Can you recommend tools for evaluating gender bias in NLP coreference resolution?", "25ab951c-e59e-43dc-9315-79f04297d81b": "What are the challenges in evaluating gender bias in coreference resolution systems?", "c8004ebf-4187-4dab-b1fe-1c725fa9d4f4": "How can we mitigate gender bias in coreference resolution models?", "99062576-8040-45bb-ba14-33e7d06843c4": "What role do pronouns play in gender bias within coreference resolution systems?", "f631fe4b-8511-4605-b562-6b32a5fc4ce3": "Are there any case studies on gender bias in coreference resolution systems?", "e909abea-82ea-425e-9eb4-e7fc5cfd1afa": "What are the key technical and governance interventions needed to protect against the harms of emerging technologies?", "8f2b141e-7e25-48c4-8d6e-c7649407098e": "How can transparency and data collection help in mitigating the negative impacts of new technologies?", "2504edde-ca0f-4ab0-b316-072c94ed4fd7": "Why is flexible and reactive policy development important in the context of technological advancements?", "ee944244-c6e4-4f82-81c9-f965ee4f8f7c": "What role do clear guidelines play in fostering a consistent environment for innovation in technology companies?", "fddee110-d5c1-4e71-a392-a18885c4d603": "How can principles and guardrails contribute to responsible innovation in the tech industry?", "0a8ddcee-d0d1-41e1-a6c9-246be5b5ae5f": "What are the current and emergent uses of technology in the criminal justice system?", "c51a2c1a-9f87-488a-9d42-41968bf4cab1": "How do technological advancements in the criminal justice system impact public safety?", "95c6ba82-ae2a-411b-8572-31a3d48cc6a7": "In what ways can technology undermine justice and democratic values within the criminal justice system?", "0c97280b-063a-46a8-bbd5-f47e888354e5": "What are the potential benefits of using technology in the criminal justice system?", "fa8d6dc4-7eba-45d5-a66a-eb6b8ab2ecad": "Who is Suresh Venkatasubramanian and what is his role in the White House Office of Science and Technology Policy?", "21ceca0c-ce44-43e3-8f77-a9dc1f5fbcc7": "How has facial recognition technology led to wrongful arrests?", "d3a9c980-e575-44e6-bab5-07aaca0e2886": "What are the implications of AI bias in law enforcement?", "7f116d0d-2c08-40d1-817d-6f4c1826f047": "How many Black men have been wrongfully arrested due to facial recognition errors?", "380fee90-ebb8-4757-8fa4-7bb4a3b50dfd": "What steps are being taken to prevent wrongful arrests based on AI?", "f0b6fb64-5d64-4512-97bb-0a674ad3b42f": "How did the wrongful arrest affect the Jersey man accused of shoplifting?", "e2b27dbd-6b32-4fba-8260-c9b54e2ac8de": "What are the consequences of AI bias in recruitment, as seen with Amazon's tool?", "dbe0bf87-a716-4127-9f80-f9e325332d95": "How can AI systems be improved to avoid racial and gender biases?", "b8b6fbea-dea8-487e-bf33-6235d3a607e2": "What is educational redlining, and how does it affect student borrowers?", "a0a96c91-6c5a-4fd0-8127-b7c974303838": "How did the wrongful arrests based on AI impact the lives of the three men mentioned?", "dbd41bb0-0c09-4495-b286-dc9622de9e86": "What measures are being implemented to address AI bias in various sectors?", "0e1301c3-0b83-4603-9d39-8c374b37340d": "What are the benefits of documenting and reporting GAI incidents?", "c00fe054-52c2-4561-80e1-4b27a535ac03": "How can standardization of GAI incident reporting improve risk management?", "fd45462e-12d0-40d0-a428-90041a7dff99": "What roles do AI Actors play in reporting AI incidents?", "a531e2af-3989-43fb-8d5a-5808c6ec17bc": "Why is it important for organizations to develop guidelines for incident reporting?", "69db45ba-aef9-4c24-b15a-824ead34c0cf": "How can guidelines help AI system operators identify GAI incidents?", "c8828f01-879b-4d90-bc2c-f97dde5bf765": "What should be included in the guidelines for publicly available incident reporting?", "ce0d270a-d64e-4526-97cf-96ef519cf24a": "Why is the documentation of third-party inputs and plugins crucial for GAI systems?", "b8747453-8022-467b-b1b5-5145d44ff203": "How can greater awareness of GAI incident reporting promote transparency?", "ddd8c6c0-1296-4762-b557-374a51ff3add": "What measures can organizations implement to prevent future AI incidents?", "14fc80ec-eef8-48bb-9d01-33069ee9b8fe": "How can AI Actors trace the impacts of GAI incidents to their source?", "df8094ac-8e28-4a78-838d-606526447758": "What are the different applications of GAI systems in organizational settings?", "4b8eb074-1b7f-448a-ac6d-be4d1ddaed05": "How can organizations restrict AI applications that exceed risk tolerances?", "3710b3fa-3a13-4381-b514-9fc7f8dea69b": "What governance tools can be applied to GAI systems?", "8aa6ab37-2653-45b2-a725-a7735747f754": "How is content moderation handled by AI technology?", "4c0876f0-776a-4fac-a0e2-a3148db5b6f9": "What are the qualifications required for AI actors interacting with GAI systems?", "288d8ca3-7d48-4d33-8b24-d9e7bc4d94a8": "How can AI technology be used for code generation and review?", "fe80034d-0f1d-4bd0-8551-8f0d79001f23": "What protocols are involved in auditing and assessing GAI systems?", "23dc8c4c-f982-4955-b9bd-af9b54a6b57e": "How do organizations ensure AI applications align with their values?", "90900ded-0b53-44a3-9a22-13f8a5988204": "What are the change-management controls for GAI systems?", "0ca2f759-ebbe-4a9b-817b-1c16c7d1e376": "How can AI technology assist in data labeling and preparation?", "40801caf-7abe-479e-9818-e654ea44c389": "What are the key differences between AI risks and traditional software risks according to the National Institute of Standards and Technology (NIST)?", "0943156a-41b4-4d6c-8add-0bb5010e1936": "How does the NIST AI Risk Management Framework (AI RMF) address AI-specific risks?", "10a26c62-5c72-487a-b041-044cc2f6e7e7": "What resources are available in the NIST AI RMF Playbook for managing AI risks?", "66155b2d-caf6-421b-86c6-ca18b2579570": "How does NIST suggest framing risk in the context of AI?", "120752de-fe52-4f54-9e4a-49a9641d77ff": "What terms are included in NIST's glossary for trustworthy AI?", "28130de3-a5d1-4a75-aef6-60681684793e": "What steps does NIST recommend for identifying and managing bias in artificial intelligence?", "a74132c1-3fa1-462f-88d0-d418f51aaebf": "How can the NIST AI RMF Playbook be utilized by organizations to improve AI risk management?", "66be8191-b960-4fed-b0ba-c92018e56838": "What foundational information does NIST provide for understanding AI risks?", "edbb2f46-b7fe-4483-98f0-ebc5eef57da8": "How does NIST's 2022 publication contribute to the standardization of bias management in AI?", "cb87b719-4b5f-41e2-af94-c5ee3c301e18": "What are the main components of the NIST AI Risk Management Framework?", "02e280c0-6258-47f4-bb62-1324b012cd32": "What are the key challenges in ensuring automated systems work as intended?", "57a1c358-afc7-4547-8390-0545a91d0b6f": "How can automated systems be protected from unintended harmful outcomes?", "9ec63ba3-4a78-4919-9641-06406a26d3df": "What are the consequences of 'alert fatigue' in medical settings?", "30f03404-8efd-4a10-8b15-4563ba490e24": "How can the accuracy of predictive models in healthcare be improved?", "5911ed64-f3fb-4bbc-a354-710e9eb4a6c8": "What measures can be taken to prevent automated moderation systems from silencing counter speech?", "0a2e4e30-4e96-4313-8701-66fb74fbaa6e": "How do automated systems impact the safety and well-being of the public?", "754bdb70-fd0d-42db-b514-ed3e792c9c66": "What are the ethical considerations in designing automated systems for social media moderation?", "2b123ed9-5e26-4f5b-8bac-71dde9f51ca1": "How can hospitals mitigate the negative effects of false alerts in predictive models?", "a72510fb-3627-482b-bc59-e8a9d2d9c7fe": "What role do independent studies play in validating the performance of automated systems?", "77f146ae-3822-4a25-b535-3110c58c0759": "How can automated systems be designed to better distinguish between harmful and non-harmful content?", "b2d56c7c-fa94-47b2-9b32-0c4729fb2f12": "What are the primary information security risks associated with Generative AI (GAI)?", "31cf8128-b127-4673-88f6-875bd9d671be": "How can GAI lower the barriers for offensive cybersecurity capabilities?", "4c7d4d1b-256a-408a-8d84-d8a691961a20": "What types of attacks are GAI systems vulnerable to?", "b5d4dcd0-59de-49a8-97e5-1bc4c4de0c99": "How might GAI expand the available attack surface in cybersecurity?", "43f7aac1-4168-4700-b4c2-ba575419b046": "In what ways can GAI augment traditional cybersecurity attacks like hacking, malware, and phishing?", "b5e47640-3a12-4f25-80db-01036d1872b5": "Are there reports indicating that Large Language Models (LLMs) can discover system vulnerabilities?", "032a7d8e-cbd4-4005-96ab-461828a13663": "How can sophisticated threat actors use GAI-powered security co-pilots in cyber attacks?", "7ab5047e-51ca-40c0-8cc4-48fd68fcb7df": "What role can GAI play in helping attackers evade threat detection?", "0338aa3a-47c1-4a51-acce-90bfd9a2dcf2": "How might GAI assist attackers in escalating privileges after gaining system access?", "5001a4f7-f1ac-4bf1-9152-b893c1f45412": "What measures are necessary to maintain the availability of GAI systems in the context of information security?", "06e02bbe-ec13-46b6-a82a-c3eedbe9df94": "What is the significance of a waiver of sovereign immunity in legal terms?", "c5af22e4-d127-4a04-a563-47eaad15d6e0": "How does sovereign immunity protect the United States and its entities?", "d073d233-f4cd-4e5a-a3af-b8fa829b6a27": "What is the difference between substantive and procedural defenses?", "f810f097-05b7-43e9-b7e2-fa7d21b5c53b": "Can a work of the United States Government be copyrighted?", "b56876d6-1fb9-40ab-86b9-a78a889bbbec": "What does 17 USC \u00a7105 state about government works and public domain?", "73e2f0c1-f0cf-49da-906d-fc8510921e3f": "How can one enforce a legal defense against a US government agency?", "d8c94105-67dc-479c-a469-65e8104efca1": "What are the implications of a document being in the public domain?", "1a7e6b55-7f76-4439-9567-12cc8d835c96": "What legal protections do officers, employees, or agents of the US government have?", "c0aa67d6-de29-4bf6-8b18-04bbaf49240a": "How does the concept of equity differ from law in legal proceedings?", "88df5351-74b0-465c-9de9-b5807548c0dd": "What are the limitations of enforcing legal claims against the United States government?", "168b2e48-4e05-44a6-b944-aa2feb95f9e4": "What are the potential impacts of surveillance technologies on individual rights and opportunities?", "b72701b8-aef7-496e-8747-79854e65fcda": "How can one ensure that their data decisions are respected in automated systems?", "68af651a-6b8b-41bb-8f3e-14604272284d": "Why is it important to have access to reporting on the use of surveillance technologies?", "7faa544a-97f1-4d9c-804c-36a978652159": "What kind of documentation should designers and developers of automated systems provide?", "3d27b603-05bb-4a64-bc55-e9add2c9f314": "How can automated systems limit access to education, work, and housing?", "63d1952d-d789-4fd2-b18a-c7ddf8ce8cc1": "What should be included in the notice provided by automated systems?", "44065a66-2c8a-4d0f-9ebb-8dcfbdefe195": "Why is plain language documentation important for automated systems?", "fa9aff28-f81d-4034-acb6-19b295cae5fa": "Who is responsible for providing explanations of outcomes in automated systems?", "7e79602f-7d17-47d4-8874-50337e61fdc9": "How can individuals understand the role of automation in systems that impact them?", "eba5ba0f-95a3-4429-9437-7d0be9b5e0b8": "What measures can be taken to ensure transparency in the use of automated systems?", "b3486f90-735d-49df-86bc-739ba6403a31": "What are some top progressive companies building ethical AI in 2021?", "d5195aba-7870-4c55-961d-ed0ffe276376": "What methods are being used to assess equity according to the Office of Management and Budget's 2021 report?", "f0365668-2d3e-4a22-aac1-d25a64683ef8": "What is the AI Risk Management Framework by the National Institute of Standards and Technology?", "ce4365ac-9fdf-43ce-b139-503ec9ad37a4": "What is the purpose of the US Department of Energy's Artificial Intelligence Advancement Council?", "6737307a-5bbc-4ab4-b524-9437eee812f1": "When was the US Department of Energy's Artificial Intelligence Advancement Council established?", "e209d21e-f9c1-41cd-bbf5-14d112a1db70": "What are the key points of the US Department of Defense's Responsible Artificial Intelligence Strategy?", "139ad9ad-384f-497d-af84-dc9b24d2dade": "How does the AI Risk Management Framework help in managing AI risks?", "a24c2df7-395d-4019-8e4c-feb9d16b19d6": "What are the main objectives of the US Department of Energy's Artificial Intelligence and Technology Office?", "93b1cd97-2fe1-4152-ab7c-a0efbf49a6b5": "How can companies ensure they are building ethical AI?", "9c028b3c-c92c-4ca5-b7c4-e8d1c358b6c6": "What are the latest developments in AI ethics according to the 2021 article by Disha Sinha?", "0bfaf653-4226-41e5-865c-a5d63578d4e7": "What are the main concerns associated with the expense of broadband service for telehealth?", "0fd4687b-e9e7-498f-acd7-9eb020850852": "How do privacy concerns impact the adoption of telehealth systems?", "ebdadd5c-1215-40f5-8b2b-f242fbd35451": "What are the equity issues related to the cost of health monitoring devices?", "fba46c57-4104-40f5-bc91-fb9b706c2ef2": "How can racial biases in technology-enhanced care perpetuate discrimination in medicine?", "dfef4df6-fa61-419f-82ae-6a148536db56": "Why is it important for medical technologies to be accountable to relevant stakeholders?", "0fa04860-d710-40b5-856d-1dbc9d3bc759": "How can the voices of those subjected to medical technologies be better heard?", "6275567f-707b-48df-93ce-787e58e407de": "What are the potential solutions to address the expense of broadband service for telehealth?", "5c0dd9dd-bd3d-4f2f-b1d3-e8d142665e33": "How can privacy concerns in telehealth systems be mitigated?", "be446845-3854-499c-886a-d08121ae6700": "What steps can be taken to reduce the cost of health monitoring devices?", "21abc9b0-3037-4ca1-bebe-3ebbb20cbd20": "How can the medical community ensure that technology-enhanced care does not perpetuate racial biases?", "735c4032-1519-4fd5-8cdb-0e9a0ce6da5c": "How can the principles of the AI Bill of Rights be implemented in real-life scenarios?", "9daf1259-d81a-4cc8-8d81-e60f2962b064": "What are some practical technical approaches to protecting rights and opportunities in AI?", "d927d226-242f-4436-8243-c62ee8339ab8": "How can sociotechnical approaches help in actualizing the AI Bill of Rights?", "920eaf59-a989-498c-8964-7ac0a342d7b2": "What role do laws and policies play in ensuring the AI Bill of Rights is respected?", "1ddaf3a2-3fbb-4e1f-8bea-3a5bd70e5f8d": "Can you provide examples of how industry and civil society can collaborate to protect AI rights?", "aa9fb7f7-6205-45a6-8e2c-e7713a8420fc": "What are some illustrative cases that show the implementation of the AI Bill of Rights?", "f738f637-ecf5-43fc-b89f-65431bac4cb2": "How important is the cooperation among researchers, policymakers, and technologists in implementing AI rights?", "9db348ec-94bf-435c-aec7-b1da0e3acad9": "What are the expectations about technologies in the context of the AI Bill of Rights?", "2af37117-da7d-4d16-afb2-b96ea0513e67": "How can the public contribute to the effective implementation of the AI Bill of Rights?", "2939615e-23fc-4214-966c-6ba4fe1b7335": "What are the challenges in moving the principles of the AI Bill of Rights into practice?", "a7c4e78f-9e12-4e77-80a9-1b6d0e378dc7": "What are the key findings of Karasavva et al (2021) regarding the predictors of non-consensual dissemination of intimate images?", "f9d93ab9-108e-49db-901e-3dc647847f97": "How does the study by Katzman et al (2023) contribute to understanding representational harms in image tagging?", "73c8f947-0282-4541-8b91-1746ecf82b5a": "What are the main components of the value chain analysis for generative AI as discussed by Khan et al (2024)?", "f28b1c3a-c692-41ea-8144-dd7b5230aab8": "What is the purpose of the watermark proposed by Kirchenbauer et al (2023) for large language models?", "14f8c882-d08f-4bd0-923e-5d8b8841fd45": "How does Kleinberg et al (2021) address the issue of algorithmic monoculture and its impact on social welfare?", "17eb019d-fa03-4024-90b1-435b97d565de": "What insights does Lakatos (2023) provide in the report \"A Revealing Picture\" published by Graphika?", "e0f19f85-0585-4ce1-b033-c658c72c6731": "How can the findings of Karasavva et al (2021) be applied to prevent the non-consensual dissemination of intimate images?", "ff276cd8-c1b4-4ac4-ad1a-fe49517a74c0": "What methodologies were used by Katzman et al (2023) to measure representational harms in image tagging?", "afaa6994-4806-44cf-958a-057084b24a97": "What are the implications of the value chain analysis by Khan et al (2024) for the future development of generative AI?", "b7b673c0-0b37-42fe-9b9a-93758490f9ef": "How effective is the watermarking technique proposed by Kirchenbauer et al (2023) in protecting large language models?", "98816e76-3c36-44a7-9f7b-e1c94ca23586": "How can active learning techniques be used to identify model failures or unexpected outputs?", "8fe3db0a-9fc1-49f4-b08a-ed387dd67784": "What are the benefits of sharing transparency reports with stakeholders regarding GAI system updates?", "62865470-d8f3-4ebf-bf2b-050f5fcbce3b": "How does tracking dataset modifications help in maintaining the provenance and integrity of information?", "fe41122b-c0e0-4a47-9424-b21efd0d8405": "What role do AI actors play in user research and experience?", "c5fabcc7-22e9-49c6-b965-c80316d5b4d8": "How can harmful bias and homogenization be mitigated in Human-AI configurations?", "8086f26a-3e53-45ca-8e09-adcaeb35b100": "What steps should be included in transparency reports to enhance accountability in AI systems?", "61e1e6a5-ba18-4763-9a90-5f6b278dcda0": "Why is it important to monitor data deletions and rectification requests in AI datasets?", "45090c7b-a6aa-4517-8b79-30964e872d62": "How can collaboration with AI actors improve content performance and impact?", "3b005aea-9582-4e7e-b2ed-86f7c16dffa6": "What are some effective methods for ensuring the veri\ufb01ability of content origins in AI systems?", "2bb3e489-4c81-4e1b-aa8a-149336e61c0f": "How does confabulation affect the transparency and accountability of AI systems?", "77aa993b-372c-4534-8252-ca9c4010c1cf": "What is predictive policing and how does it work?", "6a1dc5f7-53a2-431d-80bd-8ddfbb546084": "How can predictive policing systems impact individuals' privacy and rights?", "0b5870d7-28c3-4da6-82be-4f2dcc63e1bf": "What are the potential risks of using automated systems in law enforcement?", "673c25a5-a7a8-4bfc-82aa-81b7f1bda04f": "Why is transparency important in predictive policing systems?", "ea441231-122d-47f5-8f5c-11085f9fd1f4": "How can data entry errors in automated systems affect individuals' access to benefits?", "0ed1645c-4661-46cb-8443-35a4c8e8ce60": "What measures can be taken to ensure fairness in automated decision-making systems?", "11076db2-fd60-471a-aba9-62b355aed19e": "How can the public be informed about the criteria used by predictive policing systems?", "3ae32342-738f-4cb9-b5dc-16fcf2130ea2": "What are the ethical concerns surrounding the use of predictive policing?", "ff50e166-1288-43c1-a067-65b0cf902d06": "How can individuals challenge decisions made by automated systems?", "f12d83b4-be05-4565-86df-f93a0c64b324": "What role does public transparency play in the effectiveness of predictive policing systems?", "37dd7447-0e75-4e14-8e17-865d6555c74a": "What is the Labor-Management Reporting and Disclosure Act (LMRDA) and what does Section 203 cover?", "1687fec4-dc70-4040-b728-4b5e44edc435": "How do I fill out the US Department of Labor Form LM-10?", "10d62236-489c-42fd-81dd-7885112ad9b2": "What are the key points in the OLMS Fact Sheet regarding Form LM-10?", "e9cd49c5-55d2-4b58-b05a-d0534264115e": "How does Apple protect user privacy according to their documentation?", "e659b628-dfa2-49f4-a768-e8761ae1c74e": "What measures does Google take to ensure Android is secure by default and private by design?", "7ea78477-98bc-4ff3-8d95-b316f391a035": "What are the main arguments in Karen Hao's article about algorithms trapping people in poverty?", "74061af4-43ea-4d23-9765-abe2b5fbb9e1": "How are lawyers fighting back against hidden algorithms that create poverty traps, as discussed in the MIT Tech Review article?", "dfdb3835-219b-4d3a-9673-d2923df2fb4b": "What concerns does the ACLU raise about family surveillance by algorithm?", "18a0946d-d4d3-4c73-b1bc-ef5d2d6cca86": "What are the implications of algorithmic surveillance on families, according to the ACLU fact sheet?", "3f73048f-6c23-48db-be72-402295190c87": "How can developers design for safety and privacy in mobile applications, as suggested by Apple and Google?", "f9d78f26-8f38-45a5-a05d-126e99e0b295": "What are the best practices for testing automated systems in real-world contexts?", "caec4118-4465-454f-8361-aebd8d21a912": "How should feedback be documented when reconsidering the deployment of an automated system?", "318fb3a4-b202-4e61-870b-b9a18c7c8f54": "What are the key differences between automated systems testing and human-led testing?", "3681a289-6754-4f14-9c68-bba82f3179af": "Why is it important to mirror real-world conditions during the testing of automated systems?", "424ac5ef-46e1-4f01-bee3-d71a0e907e5d": "How can material differences in deployment conditions affect the performance of automated systems?", "032fd89f-0b92-4533-a520-39fe68cc1744": "What role do human operators or reviewers play in the effectiveness of automated systems?", "a788ae08-165b-4797-b530-9054fffc79b9": "How should system performance be compared with existing human-driven procedures after testing?", "c823ae3a-5c23-421a-a316-3fb0a90fdff6": "What domain-specific best practices should be followed for testing new technologies?", "4e06d594-f59f-43ed-9c42-766b3d71377a": "Why might new testing be required for each deployment of an automated system?", "f0a8cfb7-ca5a-41af-874f-24ca1c10564a": "How can the outcomes of testing influence the decision to deploy an automated system?", "ceea82a8-3f36-499c-8999-f705089fd5c4": "What are the expectations for automated systems in sensitive domains?", "4da1b1fe-5a0f-4873-9a51-592e7031b5f0": "How can users opt out of automated systems in favor of human alternatives?", "2d5f0e7a-37f4-41f3-a270-083718406673": "What mechanisms should automated systems provide for human oversight?", "b597f3f4-73ca-41d6-805f-ea8566ee4335": "Why is it important to have human consideration and remedy in automated systems?", "51a36031-cbe6-4896-a26f-0d1231ac9ac5": "What kind of training and assessment is necessary for human-based portions of automated systems?", "28e9d3c8-9aad-4ac2-8b76-8112ba317fc7": "How should automated systems ensure the effectiveness of human alternatives?", "3cd1b051-c5ec-4ba6-85a3-6c8ecc3d382e": "What safeguards are recommended for automated systems in sensitive areas?", "93157218-65e8-4b27-bdc0-d3b5c75016f2": "How can automated systems provide clear and accessible instructions for opting out?", "6a607a7d-6808-444f-a772-aa55eccd8e61": "What role does human oversight play in the effectiveness of automated systems?", "522e086d-9b12-48c9-b070-b9f0c6b871e8": "Why is it important to have fallback systems with human oversight in automated systems?", "ce02496c-71b3-4ab7-98bb-eed1cc3edc11": "What are built-in protections for abusive data practices?", "55c73a61-2c93-43db-b06b-142fea09d1a3": "How can I ensure I have control over how my data is used?", "8dae2406-bb95-4836-9224-224fa830dd81": "What design choices help protect my privacy by default?", "1ddfec1f-63dc-4114-8d2c-6c6b080df23d": "What are reasonable expectations for data collection?", "ca588f53-c672-4ec4-8c2f-4edb75780625": "How can designers and developers seek my permission for data use?", "f01609ce-a9ca-4166-bc21-4837e9f8dbe6": "What are alternative privacy by design safeguards?", "bf05189a-5783-4529-9a04-33ecb2b039f6": "How can I avoid privacy-invasive default settings?", "69cd9e44-ebad-4444-bd6a-0ffe7dd0fea1": "When is consent necessary for data collection?", "5592a53b-bc51-4eb2-ad09-d91eab84cbca": "What are the best practices for respecting user decisions on data use?", "da87855a-e8d3-464f-b44a-2f1c0f36efd9": "How can automated systems ensure my data privacy?", "537d6d33-2d70-4cda-bfd2-a7e7d58dca1c": "What are the guidelines for ensuring surveillance is proportionate and necessary?", "f9c45eea-e5e9-4963-b36b-eac4e1041f10": "How can designers and developers minimize the invasiveness of surveillance systems?", "95df7bca-5759-48de-aded-0bdcfd73b47d": "What measures should be taken to inform individuals about surveillance activities?", "08b6077f-18cf-437e-93e3-b2272cafdf09": "How can surveillance systems be designed to protect civil liberties and civil rights?", "32c66693-7c38-4482-b27e-1226a0d82810": "What are the legal requirements for providing notice to individuals subject to surveillance?", "b0213953-d766-4b10-aa1b-c7170ea2c1ab": "How can surveillance be limited to avoid infringing on democratic rights like voting?", "a7153e52-4939-4c06-bfa6-43f880a88d70": "What are the best practices for restricting the number of subjects monitored by surveillance systems?", "c1a6fa3b-e163-482c-8ca2-b05ca142d9f1": "How should data gathered through surveillance be managed and used responsibly?", "1b8e4a92-c4c9-4200-9e7f-a441aa82cd9b": "What are the ethical considerations for deploying automated surveillance systems?", "b3ed9e28-9cca-467c-9c04-1b533b1ff751": "How can surveillance systems be aligned with national security needs without compromising individual rights?", "b814a05e-73e6-4ac4-8098-646df1806acd": "What are the common biases found in automated sentiment analyzers?", "e2b9260e-db2c-4449-9f56-d5cbf8e617c2": "How do automated sentiment analyzers affect social media content moderation?", "9a8125b8-6346-4652-9584-b5e5399fa7bd": "What steps are being taken to address bias in sentiment analysis tools?", "2c59dede-dec0-4ef4-9a1c-f4e8e47a6727": "Why do searches for terms like \"Black girls\" or \"Asian girls\" often return sexualized content?", "3c7f39ce-861e-4abc-8b91-941233a6ff50": "How can researchers help in mitigating bias in sentiment analysis tools?", "0e532265-3e9e-4043-ac99-d073b610bb72": "What are the implications of biased sentiment analysis on minority groups?", "3479b71e-e61b-42b5-9866-827680891f1c": "How can technology platforms ensure fair sentiment analysis for all users?", "6334ec05-5071-42d8-98b8-fc5306d236b2": "What examples illustrate the bias in automated sentiment analyzers?", "c5293c46-c7bd-4332-b088-0f07e7fca312": "How does bias in sentiment analysis impact online communities?", "5386554b-00b3-4e7e-b130-eafd4d8eb057": "What measures can be taken to improve the accuracy of sentiment analyzers?", "22b750f1-231b-489a-bebd-3b193bea2a49": "What is the importance of calibrating automated systems to the level of risk?", "5de799b0-d874-492e-aa45-4fd7647abab2": "How can summary information about automated systems be effectively communicated in plain language?", "81900311-01e7-4092-a602-1d8187f760e4": "Why should assessments of the clarity and quality of notices and explanations be made public?", "65bccbee-7732-4dd2-be47-39627cdfeab0": "What are the key components of a good notice and explanation for automated systems?", "db31ef6b-186d-4690-9967-65f38c663c5b": "How can organizations ensure that their automated systems are transparent to the public?", "540f0169-64c9-4548-888a-05f286d81496": "What methods can be used to assess the quality of explanations provided by automated systems?", "415e4fbc-b69e-428a-9f94-7312d737e467": "Why is it important to report on the clarity of notices related to automated systems?", "1bfaa0c5-41a4-4311-a8ca-15d61ff4d3cf": "How can the public benefit from having access to summary information about automated systems?", "768e8af3-91d4-480a-8e3c-f3fb6fa362a0": "What are the challenges in making automated system reports understandable to the general public?", "85eec5c3-d2d6-458e-ada1-b311bf4a72f9": "How often should assessments of automated systems' notices and explanations be updated and made public?", "3d150e71-c79a-4bd8-8084-2ae5bafda260": "What are the key findings of Acemoglu's 2024 paper \"The Simple Macroeconomics of AI\"?", "1a24bdd1-8636-4d56-8125-f82bbece4d8e": "How does the AI Incident Database track and report AI-related incidents?", "84f606b0-d5e2-46fe-b0fe-9f8bd603926f": "What were the main incidents and responses discussed in Atherton's 2024 survey on deepfakes and child safety?", "b0068345-aa37-45f2-a66d-ab765c64e4c3": "What are the implications of intentional biases in LLM responses as discussed by Badyal et al in their 2023 paper?", "322cd2e4-d7f9-48b7-8de7-f1bb820e45a4": "How was the Bing Chat data exfiltration exploit discovered and fixed, according to Embrace The Red's 2023 blog post?", "555f7d72-2732-4953-b60e-5aa9bb3b5329": "What is the concept of algorithmic monoculture and its potential impact on outcome homogenization as explored by Bommasani et al in 2022?", "0a9a8d6d-03b6-4a09-a022-86b5bcbb4f9d": "What strategies do Boyarskaya et al suggest for overcoming failures of imagination in AI system development and deployment?", "28ffa310-5101-4271-a96f-ae81552638be": "What are the main security challenges in the AI pipeline as identified by Browne et al in their 2023 report?", "e59e161d-73b9-44a7-ad00-86e936500c7d": "How does the AI Incident Database contribute to improving AI safety and accountability?", "7712df59-b5e6-4300-81f2-1d0e16da04e2": "What are the potential risks associated with deepfakes, particularly concerning child safety, as highlighted in Atherton's 2024 analysis?", "ecfdcb90-ee70-422b-9850-10c9ebbf34c6": "What is the AI Bill of Rights?", "6dd78dc1-02ed-4f5b-abe6-398caa87184d": "How can AI improve Americans' lives?", "43f126a5-f5cf-47b2-a267-d84e162dc483": "What are the potential harms of AI technologies?", "2d69fe62-48f6-443a-af60-47dd8aa8e223": "How can we prevent the harms of AI technologies?", "b15d2c3c-6578-4ac7-bf29-1bb9ffae84d3": "What role did public servants play in shaping the AI Bill of Rights?", "c4aaa7b2-60f1-480b-9a24-5f8d421c6136": "How did the international community contribute to the AI Bill of Rights?", "98490660-182e-4994-a0e9-79f5063132a2": "What are some examples of public engagements included in the Appendix?", "db8d310e-c511-4d66-9486-f8723f206d2f": "Why is it important to have a formal request for information regarding AI?", "4dce7da8-27f9-40a3-917b-17d3726fd5ee": "How was the input from the public collected for the AI Bill of Rights?", "9f0a6fe1-594c-447e-8837-2a78a94f7b03": "What are the core messages from the discussions about AI technologies?", "60709a95-43fe-4194-a602-00bee13a364a": "What is algorithmic discrimination?", "d64d1e05-da97-4caa-a99f-c615a4ec29f3": "How can we protect against algorithmic discrimination?", "ec9e492e-7c12-49e0-b47d-e552f7d1f972": "What are some examples of algorithmic discrimination?", "336ad8fa-faaf-4447-bfc6-ef161da017ce": "What laws exist to prevent algorithmic discrimination?", "3b634a49-58a5-4fcf-a112-9a86f5ac0893": "How do algorithms contribute to discrimination in hiring practices?", "4b58cb88-0109-4889-95e2-055c73c49939": "Can algorithmic discrimination be completely eliminated?", "21b86043-f03c-4c93-a916-be8fd8b2ad61": "What role does transparency play in preventing algorithmic discrimination?", "63133742-a77f-4db6-846d-4d1f5059f01a": "How can companies ensure their algorithms are not discriminatory?", "661fa6f5-10ba-4b3b-a3aa-67fcb6b7c0ec": "What are the ethical implications of algorithmic discrimination?", "d181a784-4cc1-4749-98e2-0f0ce4f2e3be": "How do biases in data lead to algorithmic discrimination?", "ebf17b14-0e3f-4c3d-904d-649dfc42fa95": "What are examples of automated systems that can impact individuals' rights or opportunities?", "a6fabc41-7dfb-4c46-aa88-b4a3530b1d84": "How do automated systems derived from machine learning differ from passive computing infrastructure?", "24c03da2-ba86-485a-86f8-59894c03d3e1": "What is considered passive computing infrastructure in the context of automated systems?", "5e4464c8-91c2-44f1-9ab2-914ebaa083c0": "How can automated systems influence policy implementation?", "6784a5e2-6056-4306-93f8-80a495e09e01": "What types of data processing techniques are included in the definition of automated systems?", "79f1c48b-968c-4851-928d-ae6f42c61343": "Why are web hosting and domain registration considered passive computing infrastructure?", "925e7b0d-8fe7-40c2-a359-c8ee0f5ed344": "How do automated systems interact with individuals and communities?", "3397b416-793c-48c5-be7e-199cd416c910": "What criteria determine if an automated system is in scope for impacting communities' rights?", "fc6683f3-3edb-4168-be40-07bfdd0d288b": "How do automated systems aid in decision-making processes?", "f7d73d0c-df20-4ca2-91d2-a8c5afdc14c4": "What is the significance of excluding passive computing infrastructure from the definition of automated systems?", "36690cae-c8ac-4416-86a2-94636c14e662": "What is algorithmic discrimination and how can it be avoided in automated systems?", "ed1dbeba-1948-4424-9f4e-49940e6ced35": "Why is it important to limit data use in automated systems to avoid group-based inferences?", "6144bb8c-dc20-482c-b508-3a6746a31aad": "How can human oversight ensure that automated systems are tailored to specific use cases?", "10bc0fe7-54f9-4f07-8e70-4dc100547fa8": "What are the risks of using automated systems in sensitive domains without human oversight?", "a2df3dd2-4f82-4dc5-a09b-87f4bb447213": "Why should validation testing of automated systems not be assumed to transfer to different locations or use cases?", "1ceaf948-0544-42aa-998b-834363202b17": "What role should human consideration play in high-risk decisions involving automated systems?", "7ecb6173-5692-4d0c-9c5e-8b0b13bffa23": "How can automated systems provide positive outcomes without directly intervening in high-risk situations?", "1e214f08-1e4a-4588-b61a-55376c21b541": "What are some examples of high-risk decisions where automated systems should not intervene without human oversight?", "68fab0c7-c29b-44ec-af9d-05ad2474f8b0": "How can evaluation testing ensure that an automated system is safe and effective for a specific situation?", "7bd24a17-0f1e-4165-ad6c-a177ddda289f": "What are the potential consequences of allowing automated systems to make high-risk decisions without human consideration?", "c8b856a5-bdd4-468f-967f-2a2d971d1c03": "What are the legal limitations on the use of surveillance technologies by the government?", "403aa84b-752d-4588-b4e3-fa5a8c170913": "How do surveillance technologies impact the rights and opportunities of individuals?", "d6cc3ca1-888e-40c0-be4f-a80b26fe264f": "What constitutes \"real-time or subsequent automated analysis\" in the context of surveillance?", "33590f14-1e42-4a8a-b98e-4b50ed420647": "How can surveillance technologies affect underserved communities?", "a2a6d551-b7e7-4648-9043-311d908a8546": "What is the definition of \"underserved communities\" in relation to surveillance technology?", "8da5e9a2-f967-4417-a687-2d49feb921bc": "How does the framework address the protection of data and communications?", "2826ff48-bba2-4f87-9f2e-e425dd1999ff": "What are the ethical considerations for using surveillance technologies on individuals or groups?", "ff195633-1d86-4b29-8add-e361af17a5fd": "How can commercial use of surveillance technologies be regulated to protect individual rights?", "d1ee3ce0-b52c-4ea9-a326-9e3a36f91eab": "What are the potential consequences of surveillance technologies on community access to opportunities?", "655de1e5-ce3a-4bce-95ec-ebe789004f66": "How does the framework ensure the preservation and protection of identifying information?", "ec6b21b6-677f-430b-b3f1-682615b0c999": "What are the best practices for obtaining consent for data collection in sensitive domains?", "16885872-4b72-4e64-b7db-18121bb64984": "How can notice-and-choice practices for data use be made more understandable?", "f9d9b18c-d71e-4abd-b3ca-0223dec3ad95": "What are the enhanced protections for data related to health, work, education, criminal justice, and finance?", "2bb558fb-06ba-4259-b5c2-1069543d1700": "How should data pertaining to youth be handled to ensure their protection?", "9082084b-5bcf-44c9-acbf-4b018579d045": "What are the necessary functions for which data and related inferences can be used in sensitive domains?", "5fc54a90-cce1-45f8-8502-abf134313535": "What ethical reviews are required for the use of data in sensitive domains?", "98454bb2-eaf7-45a3-8e6d-9781a4a9d584": "How can communities protect themselves from unchecked surveillance?", "e9753e53-f009-433f-83b1-86b01c4fdf46": "What kind of oversight is necessary for surveillance technologies to protect privacy and civil liberties?", "7c787e2d-b2de-4444-a4e9-08b349e388a4": "What are the potential harms of surveillance technologies that need to be assessed before deployment?", "6c304545-a0cf-4473-aec5-25670f40a825": "How can continuous surveillance and monitoring be regulated to protect individual privacy?", "34a2354d-7bf6-4914-8c4a-cf196db36040": "What are the key points discussed in the document \"Records, Computers, and the Rights of Citizens\" from July 1973?", "0aad6105-223c-4dab-912f-526f486cdc6c": "How does the Office of Management and Budget's Circular A-130 guide the management of information as a strategic resource?", "f59c944c-112c-40a8-a47f-ac2b0347e2c4": "What are the main recommendations in the OECD's guidelines on the protection of privacy and transborder flows of personal data?", "c8df4ea5-17bd-4cca-a295-e7ee59f9e7d0": "How effective is the proprietary sepsis prediction model validated by Andrew Wong and colleagues in hospitalized patients?", "98b08556-abfd-4833-ae35-a8535571fea9": "What are the implications of the findings from the study on the sepsis prediction model published in JAMA Internal Medicine?", "cba6f835-caec-48d5-9b82-232ba8416963": "How does Facebook's moderation policy affect discussions about racism, according to Jessica Guynn's article in USA Today?", "d5c902d5-6803-43ce-9cc7-e41da57f53fe": "What does the term 'Zucked' mean in the context of Facebook users discussing racism?", "b810fe11-8639-4aa1-9680-e9aed0a7f23f": "How has the recommendation of the OECD Council concerning privacy and data flows evolved since its revision in 2013?", "cf8d9d77-d12d-4feb-affb-6668a8f6b2ce": "What are the criticisms of Facebook's handling of hate speech and discussions about racism as reported by USA Today?", "d47a2d26-34fd-45b5-b3ae-e8dfe20c77ac": "How does the 2016 update to Circular A-130 impact federal agencies' information management practices?", "345ce069-495e-49b5-af9c-c3149193f72c": "How can new technologies help reduce health disparities?", "74332ba4-7dba-484a-b7f3-db3c9b6c6955": "What are the potential risks of relying on technology for healthcare delivery?", "8480a6c9-125b-435b-87a1-ffad65340e1f": "How can policymakers ensure equitable access to healthcare technology?", "d4afcb88-ecf5-4717-9b29-d9b4cd68121b": "What role does artificial intelligence play in improving healthcare outcomes?", "468af870-6b02-4fa5-9205-37a33e43b63c": "How can healthcare providers integrate new technologies into their practice effectively?", "12891465-101e-4897-a86f-046d85706186": "What are the ethical considerations of using technology in healthcare?", "14c6eb6a-036f-4f26-bb80-a075fd2979a1": "How can technology improve healthcare access in underserved communities?", "861a1a80-5a7a-4b03-b86b-e6d7b357b14c": "What are the most promising areas for research in health technology?", "beb717de-9182-47e4-82bf-c27fde4a43fa": "How can healthcare systems balance technology use with patient privacy concerns?", "840b62a5-c2ca-45e6-9793-40fed59c838d": "What impact has telemedicine had on healthcare delivery during the COVID-19 pandemic?", "bd65a985-6adf-4ca0-800a-2f5e4eaf9313": "What are the best practices for monitoring AI-generated content for privacy risks?", "d1bdd54c-5d9d-4677-b5ac-065fd4b4e8e8": "How can organizations address instances of PII or sensitive data exposure in AI-generated content?", "e937f160-66b0-4289-83b1-2a5616b701ae": "What processes should be implemented to respond to potential intellectual property infringement claims in AI?", "a9f3fd07-2b06-4abc-9bae-9cb5c884420a": "How can new GAI policies be integrated with existing IT governance and legal compliance activities?", "00c694b6-964a-4ff8-9c00-082579e3705e": "What are the key components of effective data curation policies for AI training data?", "0d2d3927-99e8-44c3-aec5-08fdc4fc2fd1": "How can companies ensure their AI technology does not infringe on third-party intellectual property rights?", "227007e5-7052-4b42-b7b0-b914bcbe185f": "What are the legal risks associated with using third-party data or software in AI development?", "262df558-56e7-445f-9a93-ad716c929f95": "How often should organizations conduct monitoring of AI-generated content for privacy risks?", "b6f4c293-115e-4811-a3aa-9a9b8ddf6f11": "What steps can be taken to document AI training data curation policies according to applicable laws?", "46fd166c-6f0e-4c3f-85c3-b30f0a1d53a0": "How can information security be maintained while integrating new GAI policies with existing systems?", "560ee295-11c4-41df-a82c-d7e7f0f44386": "What is a performance baseline for an algorithm, and why is it important before deployment?", "66e50da4-fea8-4795-8a27-663c0cde6f02": "How can human performance be used as a lifecycle minimum performance standard for automated systems?", "4d449176-2653-4092-8b2f-4f047ac02fb5": "What are the decision possibilities resulting from performance testing of an automated system?", "b8a080cb-dbf5-4ca7-b5a4-5c84f821b1ef": "Why is it important to consider the possibility of not deploying an automated system after performance testing?", "cde58352-24e9-4fc4-aae0-9d41f4d197d6": "What are the key steps in identifying and mitigating risks before deploying an automated system?", "3426f11e-e5f4-418e-b97a-01f241a68b12": "How can potential risks of an automated system impact people's rights, opportunities, or access?", "f3e038cf-7997-4724-8e6b-99c701363442": "Why should risks to impacted communities that are not direct users of the automated system be considered?", "d730bbe4-b1d4-48dc-9ae6-737485ab0e96": "What are some examples of risks resulting from the purposeful misuse of an automated system?", "e4aae9a3-d377-43f0-b0b7-61cca8c61714": "How can the consultation process help in identifying concerns related to the deployment of an automated system?", "4237e65b-dc5a-4002-b88b-cd45f37bc5f7": "Why is it important to measure the impact of risks and balance attention towards high impact risks?", "a824bf8c-fd69-4e1b-a7fa-d8a0e946b3e3": "What are the key factors to consider when determining the expected and acceptable GAI system context of use?", "305a21f4-80f2-4d2b-9d75-6745caca88fb": "How can organizations assess the potential positive and negative impacts of GAI systems on public safety and democratic institutions?", "d4751075-70d8-4ec0-ae48-12518f492e05": "What are some common assumptions and limitations that need to be documented for GAI systems?", "9a023a00-7a74-40a4-9616-0444aeadb0bb": "How can socio-cultural and domain experts contribute to the assessment of GAI systems?", "01f67521-6c20-4fd8-8434-616c85133c1e": "What are the potential risks associated with individual and group cognitive biases in the design and use of GAI systems?", "2fd099a7-bb3b-4ae7-97cf-d4a44e7b14fd": "How should organizations document risk measurement plans for GAI systems?", "c09872bc-2fe0-44c2-92ff-4737772a9980": "What are some known past incidents and failure modes of GAI systems that should be considered in risk assessment?", "01e6920d-c467-4660-aeea-b58e1ad39cbf": "How can organizations address the issue of over-reliance on quantitative metrics in the evaluation of GAI systems?", "1ef08275-0345-43e2-87d8-2c0014a954c9": "What are the potential consequences of harmful bias and homogenization in GAI systems?", "5d1ea4e8-03c0-4918-a4b0-ee3fb7de061a": "How can organizations ensure that GAI systems align with social norms and expectations?", "f15a27f7-7498-4ada-be1c-3cac898dc341": "What are the key expectations for automated systems in terms of notice and explanation?", "b3491e6c-e129-4aa9-81b0-cb27a5390728": "How should an automated system provide notice of its use?", "02f41084-6947-4e5d-a014-2701aa7a8524": "What type of documentation should be provided for an automated system?", "37901011-f77e-472d-bc43-758d35691417": "Why is it important for the documentation of an automated system to be in plain language?", "d152bc28-9605-4041-84f0-75278caf5c62": "Who is responsible for ensuring that the documentation of an automated system is accessible?", "29842944-f55f-426a-b9fe-f93af91ad99a": "What should the documentation of an automated system include?", "8eb02857-2af2-4998-a936-ddb76db5f513": "How can an entity ensure that the documentation for an automated system is easy to find?", "7bebfe19-175e-4123-abe1-e08b0c2d683d": "What are the benefits of providing clear and understandable explanations for automated system decisions?", "98de5d6b-fc75-4dd6-90ed-3be730aeab0c": "How can automated systems ensure that their actions are transparent to users?", "45c83246-49f0-4517-8449-22f61e1ac6a2": "What role do human components play in the documentation of automated systems?", "d0176225-348b-4c83-ab7d-82277925e6e9": "What is the Blueprint for an AI Bill of Rights?", "f75dba8f-4254-4fad-b3ba-5ce51b3065e9": "How do AI and data-driven systems impact Indigenous communities like Tribes and Clans?", "d6be5ece-a538-4e8a-a374-7789a34d34c8": "Why is the concept of community integral to the Blueprint for an AI Bill of Rights?", "a1d04dee-74ab-4d43-8717-6e2684c5670b": "How does United States law currently protect the rights of individuals against AI harms?", "48ff1eaf-6834-4862-8e80-624bb71169e7": "What challenges exist in protecting communities from the effects of automated systems?", "7b9de209-f9fa-4ef0-8ced-8969ee2ae73c": "How can the harms of automated systems be evaluated at the community level?", "a2d29100-ebeb-4076-9ddf-1f64b365290a": "What are some examples of formal organizational ties mentioned in the context?", "fd57bf95-f994-4cf5-b914-af75dc140b11": "Why might the impacts of AI be more visible at the community level than the individual level?", "4eae01bd-26ed-4b64-9606-abf42d1039e7": "How does the Blueprint for an AI Bill of Rights propose to redress harms caused by AI systems?", "c93c5ccd-cd7b-4a31-b8fe-2730399bf76e": "What are the potential benefits of evaluating AI impacts at both individual and community levels?", "8e93918d-d152-4bda-a527-51eb1e4a563a": "What are interpretability and explainability methods in AI systems?", "8bd0ab3a-519e-4f8b-bd09-52e086f06ab1": "How can we evaluate GAI system decisions for alignment with their intended purpose?", "9ea19168-923a-4776-b043-9cc4c8d33b84": "What is the importance of monitoring and documenting instances where human operators override GAI decisions?", "48f1823c-987c-43b8-ba09-2f7f30c39478": "How can content provenance issues affect GAI system decisions?", "69904a33-e3bc-4725-a589-ae8903083507": "What are structured public feedback exercises in the context of AI system design and deployment?", "1ff81582-703f-4f1c-9591-b190d995ac44": "How should the results of public feedback be incorporated into AI system decisions?", "70b48d78-cf62-458d-9cc8-cdee5b6b30ce": "What are the roles of AI deployment, domain experts, end-users, and operation and monitoring in AI systems?", "3cbba8c5-608e-41ba-acc1-e266269a8d04": "What is the significance of verifying information integrity in AI systems?", "c0dbcb52-583f-455e-ae5b-a4baefe89ffb": "How can harmful bias and homogenization be addressed in AI systems?", "022fb6ca-e847-4f32-a2d8-a01f772f1c21": "What are the key considerations for making \"go\"/\"no-go\" decisions in AI system deployment?", "776a11f6-1df2-463f-8f5e-9ab8c838c2ed": "What is the role of the Connected Health Initiative in advancing healthcare technology?", "b183d8c6-087c-4de9-9b29-7b89974ffa14": "How does the Consumer Technology Association influence tech policy and innovation?", "611c347f-fd3b-498b-87ff-095aa469d501": "What contributions has Courtney Radsch made to digital rights and online freedom?", "ccd936e2-e89b-41c3-9b5d-4c171d9f1eb6": "What are the primary research focuses of the Data & Society Research Institute?", "de9aab5b-822b-43a9-97f2-e9d6959e33ba": "How does Data for Black Lives use data to address racial disparities?", "daa39ee8-a5c1-4bef-bb7a-eadbcc70f33b": "What projects are currently being undertaken by the Data to Actionable Knowledge Lab at Harvard University?", "cc6d42a1-429f-4ea8-a0c8-481571c586ea": "What services does Deloitte provide in the realm of technology consulting?", "1fa9c10e-8e74-472e-98c1-62ee3c81305f": "How does the Digital Therapeutics Alliance support the development of digital health solutions?", "3d8c9a00-451d-4097-95da-7db467588dc5": "What is the mission of the Electronic Frontier Foundation in protecting digital privacy?", "d5fd9d65-12bb-4f39-a0f1-3cb54d012fe7": "How does the Electronic Privacy Information Center advocate for consumer privacy rights?", "cb0bf5f3-38b6-4eec-81ff-530a778e0328": "What are the key findings of Smith et al (2023) regarding the use of neuroanatomy as a metaphor in large language models?", "207c0258-6572-48ec-ab8d-8f6558681013": "How do Soice et al (2023) propose that large language models can democratize access to dual-use biotechnology?", "eba2a62c-de0b-4649-8f1e-438799536f1a": "What methods and considerations are discussed by Solaiman et al (2023) in \"The Gradient of Generative AI Release\"?", "015148ae-410b-409f-a89d-73d1a425a37d": "What privacy concerns are raised by Staab et al (2023) in their study on violating privacy via inference with large language models?", "b4463b2e-409a-48a2-9a74-455373d37285": "According to Stanford et al (2023), whose opinions do language models reflect, and what implications does this have?", "6ae4fc70-185d-4aae-bc07-faedd4dbae8e": "What are the energy and policy considerations for deep learning in NLP discussed by Strubell et al (2019)?", "f67fd894-f032-407f-8771-8f564f4f9ca2": "How does the White House's Circular No A-130 (2016) relate to managing information as a strategic resource?", "ae278041-1973-4238-aa7f-a6cac1551a8c": "What are the potential risks of hallucination or confabulation in large language models as discussed by Smith et al (2023)?", "f6cbd86e-2f15-4311-9c83-e407554ff032": "How might the democratization of dual-use biotechnology through large language models impact society, according to Soice et al (2023)?", "38192107-9ce8-4cda-b34a-cc111e33be8a": "What are the ethical considerations in the release of generative AI as outlined by Solaiman et al (2023)?", "af1bb742-8ec7-4495-9141-a44df3bda1a0": "What is the Jigsaw Unintended Bias in Toxicity Classification competition on Kaggle about?", "379fa7b3-02e9-4abb-a67d-f5611f67453f": "How does the paper by Lucas Dixon et al address unintended bias in text classification?", "938ae17a-7800-4fa0-9d13-64d885add576": "What are the key findings of the AAAI/ACM Conference on AI, Ethics, and Society paper by Lucas Dixon and colleagues?", "c1a34901-9205-4c40-81b9-92d9bccfe577": "How has Google reduced racy search results for terms like 'Latina teenager'?", "fcd299fd-aad2-49b8-9fd4-d6d51e7d6d06": "What impact did Google's reduction of racy search results have on search outcomes?", "8fe9af88-6425-4dce-ba89-5f8170af3a50": "What are the main arguments presented in Safiya Umoja Noble's book \"Algorithms of Oppression\"?", "dbf83cba-a1ec-4513-a5ee-dbb91d87f238": "How do search engines reinforce racism according to Safiya Umoja Noble?", "07560e9e-9c12-44bd-8b3e-a2aa039ef9bc": "What measures has Google taken to address bias in its search algorithms?", "773283eb-6b5a-4256-8ddf-8f255df3f593": "How effective are Google's efforts in mitigating unintended bias in search results?", "d57e3ef2-9ba0-4083-91e5-bbbc46ea63c6": "What are the ethical implications of unintended bias in text classification and search algorithms?", "ae1eb349-2033-4722-ae50-edd77d230dee": "What are the expectations for automated systems in sensitive domains like criminal justice and health?", "f123f8ad-0f9d-473a-a63f-f10e7686745d": "How can human oversight be implemented in automated systems to avoid discriminatory impacts?", "61295097-606f-4249-968f-47372578c40c": "What safeguards should be in place for automated systems used in employment and education?", "663dd7a9-c1b9-432e-8fbf-ea8ec15a775b": "Why is it important for automated systems to have narrowly scoped data and inferences in sensitive domains?", "a0a14c6f-86f9-4164-99b9-4db3716b659e": "What are some examples of inappropriate impacts that automated systems should avoid in sensitive areas?", "5b5f3867-1803-4238-af24-bd788829ee4c": "How can technical standards and practices be tailored for specific sectors when developing automated systems?", "5e60b645-855e-47df-8ee9-48b1a1a83efd": "What role does human oversight play in ensuring the proper functioning of automated systems in sensitive domains?", "7e2af9b3-68e7-46c0-9213-acdc636f4273": "How can automated systems justify each included data item or attribute in sensitive domains?", "3176e372-80fb-44f7-ba7b-210022be925b": "What are the potential risks of using automated systems in criminal justice and how can they be mitigated?", "dc35ee01-7ac3-4ccf-8b45-14b5aa5a6c9e": "How can automated systems be designed to meet the expectations laid out in the framework for sensitive domains?", "d599c961-25c4-4fee-8ed1-d5c62a73647f": "What is AI red-teaming and how does it work?", "73118592-ad30-419d-b6b2-3dd0d9384da9": "Why is it important to have large groups of AI red-teamers?", "e5807faa-9a9f-4510-807d-c4553fcd9780": "What are the benefits of having domain experts in AI red-teaming exercises?", "d70f0a08-4229-4e3c-96e1-75b97e2bd955": "How can AI red-teaming be applied in the field of cybersecurity?", "e56fdc33-7ce6-4293-9d14-efd06aa134c7": "What challenges are faced when recruiting specialists for AI red-teaming?", "e3954bfb-ba3d-4cae-bbcf-993ccb500b97": "How can a combination of experts and non-experts improve AI red-teaming exercises?", "1bbd547f-2198-4ec1-ae87-5a5ac9ebc2d1": "What specific skills are required for effective AI red-teaming in medicine?", "54655f06-a110-44d5-84da-2b618c017d76": "How do AI red-teaming exercises help in identifying harmful model behaviors?", "775f37c2-5451-467c-8c55-81cbdf42dc44": "What are some examples of harmful behaviors that AI red-teaming aims to prevent?", "6092bd99-29ad-4e06-bb27-ad1ae9006a58": "How can AI red-teaming be beneficial in the biotech industry?", "bc600deb-fbcd-400f-9140-0d915fcdaa91": "What are the limitations of current pre-deployment TEVV processes for GAI applications?", "2aef734a-ac35-429c-a7ed-45f4bc41a30b": "How do organizations measure performance and capabilities in pre-deployment testing for GAI?", "14c90494-2295-4e90-ae42-73e7ae4a413c": "Why might anecdotal testing through video games be inadequate for GAI systems?", "cb928351-26a1-491c-9262-10297fb46ede": "What are some recommended \"pre-deployment testing\" practices for GAI?", "90e2f5ef-ab38-4486-ac93-f172d2500a85": "How do current pre-deployment TEVV processes fail to reflect deployment contexts?", "d860db03-3ac9-4ee9-8e9d-68730a2dbd6e": "What are the risks associated with using standardized tests designed for humans on GAI systems?", "7629f220-d4f3-493d-8160-89a311a495a7": "How can organizations improve the validity and reliability of GAI system testing?", "e54dbc2a-f5cc-47d4-b7f5-2cdad36a161d": "What is the role of risk measurement and estimation in pre-deployment TEVV for GAI?", "864610a5-0a37-4ef4-98d4-1b6d7d32f886": "Why might jailbreaking or prompt engineering tests be insufficient for assessing GAI systems?", "8fc6576e-192f-4f3b-af15-d1c21b908da9": "What are the state-of-play methodologies for pre-deployment testing of GAI systems?", "9bec12eb-4fa7-477e-a667-fd165c16f38d": "What are the key principles of the Privacy Act of 1974?", "ce2ec10e-da61-4462-b06b-0aa052282a80": "How does the Privacy Act of 1974 limit data retention in federal records systems?", "782d7b57-c109-49a4-aee8-fce11007be0d": "What rights do individuals have under the Privacy Act of 1974 regarding their personal information?", "04329baa-7e91-421f-b129-dcab9672ade8": "How can federal agencies determine what data is \"relevant and necessary\" under the Privacy Act?", "f84ca6ea-58d1-48f6-a5a1-cd8ef9f4658f": "What are some real-life examples of laws that protect data privacy?", "f2ce84d8-0c07-4e0e-9bcf-3aaecc8dc8a2": "How do technical approaches help in protecting data privacy?", "366462e4-8d9e-40ee-b30d-f4817377ba05": "What are sociotechnical approaches to data privacy, and how do they work?", "e8433dc8-3bc8-4eee-9a40-71e4005f3386": "How can policies be designed to ensure data privacy in practice?", "ead8c5d0-e517-4da2-b4b7-885f6c6031b4": "What are the limitations of the Privacy Act of 1974 in protecting personal information?", "7f75740d-9d77-4338-9f3a-78ab85ae1906": "How can individuals access and correct their data under the Privacy Act of 1974?", "caeaec67-6507-428f-8b4b-41751a7af496": "What are the common data privacy violations in system training data?", "2987bea3-66c4-4ccc-92fb-5c073f60fd97": "How can organizations address intellectual property concerns in AI training data?", "48581f7f-03f6-4ee0-abe0-09bdb7f54800": "What measures can be taken to prevent obscene or degrading content in AI outputs?", "66f09013-2126-4c82-bc9a-c3a11320b3d7": "How can harmful bias and homogenization be mitigated in AI systems?", "a7a8bc83-2e72-4a24-a793-f1b816f0c283": "What are the risks associated with dangerous, violent, or hateful content in AI-generated outputs?", "cfb4d915-c088-4618-925e-02e974b919bd": "How should organizations re-evaluate safety features of fine-tuned models when risks exceed tolerance levels?", "ae31cf33-efff-4297-a26b-ea5db0b92c1c": "What steps should be taken to review GAI system outputs for validity and safety?", "c8138f61-21b5-4802-ac6d-f18233410054": "How can organizations ensure that generated code does not lead to unreliable downstream decision-making?", "0f1e8318-43b1-4523-8576-17ceb1fc8061": "What are the best practices for verifying that GAI system architecture can handle and recover from security anomalies?", "25ff123c-f46a-4669-9903-16149a4ec2d2": "How can confabulation and information integrity issues be addressed in AI systems?", "1e6c80b3-721c-45ae-88a6-cf0f8ced7156": "How can organizations effectively leverage feedback from boards or committees when deploying GAI applications?", "df6054be-5a75-416a-9eaf-0be1a05ee1d6": "What are the best practices for using human moderation systems to review AI-generated content?", "288339c7-c00a-4dd4-aa6b-e2002ba889ce": "How should organizations align human-AI configuration policies with socio-cultural norms?", "deaad193-bda5-46ff-afa9-72206425c873": "What criteria should be used to evaluate the performance of pre-trained AI models?", "2d47d0ae-c76d-4ae5-a638-a6517c6220ba": "When is it appropriate to decommission or retrain pre-trained AI models?", "76fab1b0-0e24-4c35-a7ed-8d49cfe2720b": "How can organizations determine their risk tolerance for AI deployment?", "495b8ac6-a421-46e9-a6c3-985e29b71272": "What are the key considerations for integrating third-party pre-trained models into an organization's value chain?", "9326e08e-8852-4d72-a983-7e09ff35af6e": "How can human moderation systems be configured to handle AI models that perform poorly?", "a4b17ec5-fc35-40d7-9721-d18501274964": "What role do organizational boards or committees play in ensuring information integrity in AI applications?", "9bc8756b-e0e6-4b2c-8ff9-a26618ca918d": "How can organizations monitor and manage the risks associated with AI deployment and operation?", "fc3b2287-133b-4a7d-beb3-436fe108fc3f": "What are the best practices for reporting AI system errors and near-misses?", "b35cb75c-71fc-4af1-9c57-c5ae6ccd8cb3": "How should incidents involving AI systems be communicated to regulatory bodies?", "381e4eab-6b5b-4ca7-8571-31aba3dfe943": "What policies should be in place to track negative impacts of AI systems?", "08bba123-7cf2-4473-830e-9c28dc6dbce4": "How can organizations ensure information integrity when dealing with AI confabulation?", "ff25d5f3-7350-498b-8341-45dae4e79977": "What are the legal requirements for reporting AI-related incidents?", "8ae5b2b8-aeff-4c5e-aa72-6924b2fc4dd3": "How can companies establish effective procedures for recording AI system errors?", "c1f383d3-687b-4a57-a98a-c2f2212368bf": "What steps should be taken to maintain information security in AI systems?", "a8540ee7-471f-42c8-8ff5-4d74af3e9e84": "How do you handle confabulation in AI systems to ensure accurate information?", "abee8f0e-95b6-4a66-9584-2fa2c87344cd": "What are the key components of an incident communication plan for AI systems?", "86a7086e-9edd-4247-96a7-f4bd37a9d20e": "How can organizations track and mitigate the negative impacts of AI systems?", "582fa583-c146-4227-a529-e7428ecadbd5": "What are transparency artifacts in AI, such as system cards and model cards?", "f5ab7c6f-0d8c-4379-a524-ea0abad40378": "How do transparency artifacts help in managing third-party AI models?", "62dbacb4-0a6c-437d-b130-68d38e4fb182": "What is the importance of monitoring pre-trained models in AI systems?", "a8ba6c23-7fb6-47bc-8dcd-4242dca46afe": "What are some techniques used in explainable AI (XAI)?", "495f990c-cfe1-4bd2-99e7-728aaca889b1": "How can explainable AI (XAI) techniques mitigate risks in AI systems?", "dbf021da-491b-4d03-b6c9-7641362406aa": "What is model compression/distillation in the context of explainable AI?", "f2e9d547-10c8-470f-be55-7919d3b9fc76": "How does gradient-based attribution work in explainable AI?", "333ba50b-1816-4a01-9ec5-0dc8942bd96e": "What is the role of counterfactual prompts in explainable AI?", "f743f398-d354-4757-a791-669763758105": "Why is it important to document adaptations of pre-trained models?", "493410a8-8da9-4c05-af25-02344ce5fb36": "What are the risks associated with unexplainable generative AI systems?", "d43b5165-b424-4efb-8ac5-c70839068a9b": "What are the key components that should be included in the reporting of accessibility evaluations for sensitive domain systems?", "e745cebf-169d-42f5-a769-451d4e947df1": "How should training and governance procedures for technologies in sensitive domains be documented?", "48cc0ef1-0635-4131-ad9d-a75b6f87ba6c": "What kind of information should be included in the documentation of goals and their assessment?", "77a87b11-faf6-450d-bc8c-8fc3bb450471": "Why is it important to consider the data included in the reporting of accessibility evaluations?", "32dbfbc4-6fc4-4307-879f-1a14277108ac": "How can the governance of reasonable access to technology be documented effectively?", "d129f587-db8c-42b2-b15b-f48854e829b5": "What are the benefits of providing reporting in a clear and machine-readable manner?", "595e4aba-2967-46e1-9276-4619eaf23ee3": "What are the best practices for documenting training procedures for sensitive domain technologies?", "70ad54f1-132d-4b05-b0d5-ec776fc6a092": "How can organizations ensure that their accessibility evaluation reports meet the required standards?", "e782da3f-efb4-4a2b-952d-815b7704c1c2": "What challenges might arise in documenting the governance of access to technology?", "d7780233-e970-44f8-941c-d04776c461f0": "How can the assessment of meeting goals be effectively reported in accessibility evaluations?", "411f2cb4-761f-4481-acc9-3379b98be822": "What are gradient-based attributions in the context of GAI risk measurement?", "aff655e4-7805-4005-96dd-97e57a4ed1f6": "How does occlusion/term reduction help in improving the transparency of GAI systems?", "b2cca45d-920c-4cfe-81d7-5dfa18ea4ac3": "What are counterfactual prompts and how are they used in GAI risk assessment?", "c834f9d2-569f-4a49-aecb-ae1382201dda": "Can you explain the role of prompt engineering in mitigating risks associated with GAI systems?", "15467914-2915-4097-a975-a2b7a2bf6e0d": "How is the analysis of embeddings useful in measuring GAI risks?", "7233a36e-44dd-4945-bdca-92ff30a47933": "Why is it important to assess and update risk measurement approaches for GAI systems regularly?", "22334989-4055-455c-a95e-7e517df2a381": "What are the benefits of using standardized measurement protocols in GAI risk assessment?", "d818454e-443f-4926-8398-9edbf7ce1b23": "How can AI red-teaming contribute to the risk measurement of GAI systems?", "895f4711-def6-4b59-833c-f71a070facef": "What is the significance of independent external evaluations in the context of GAI risk measurement?", "c4d00e1c-836a-4ceb-973d-e0df3779b23f": "How do policies, procedures, and processes help in detailing risk measurement for GAI systems?", "1057d1c5-3ac4-4d8c-9193-566dac0f5640": "How do new surveillance technologies in education disproportionately harm disabled people?", "8c3f757b-977d-4050-b35d-aea49c010cf6": "What are the impacts of surveillance technologies on disabled individuals in the workplace?", "1468fc8a-bccd-4e2e-a99c-fd18b4bd12c2": "How does policing with new surveillance technologies affect disabled people?", "ff6dc571-3da6-4cb6-ab29-a41494392ed0": "What are the specific ways in which health care surveillance technologies harm disabled individuals?", "5e523726-a9d1-4807-9295-3881cd46f457": "How can we mitigate the negative effects of surveillance technologies on disabled people in various sectors?", "83c676ae-600e-4c27-98f4-24e784b1d7e9": "What are some examples of ableism in new surveillance technologies?", "c2b9fec3-5182-456a-8d05-894e91037f8d": "How does the Center for Democracy and Technology suggest addressing disability discrimination in surveillance?", "0da8dd83-5ddb-4544-a4ca-0fe64ff97e5d": "What are the ethical concerns regarding the use of surveillance technologies on disabled people?", "3d03cf3e-8e29-4424-b5ee-db3ce2aef10c": "How can education systems ensure that surveillance technologies do not harm disabled students?", "9466ba73-2db9-4dbb-a45c-9cf5fc4b9233": "What policies can be implemented to protect disabled people from the harms of surveillance technologies in the workplace?", "c8753999-1abf-4fe0-b602-96c7badbe1b8": "What is the main argument presented by Tirrell in \"Toxic Speech: Toward an Epidemiology of Discursive Harm\"?", "b146064b-3808-4dc7-ae9c-1fd32c51462d": "How does Tufekci address the challenges of computational agency beyond Facebook and Google?", "26296ada-303b-4757-99c1-d0b666b5ebe6": "What are the key findings of Turri et al in their study on AI incident documentation practices?", "22a9a1b9-5625-453a-8f38-7e6e1faece16": "What concerns are raised by Urbina et al regarding the dual use of AI-powered drug discovery?", "456281a3-2fc9-42d0-aaee-b9e644b01898": "How do Wang et al evaluate the energy and carbon considerations of fine-tuning BERT?", "d0fe0b08-146c-467e-8042-9160ccec66de": "What is the purpose of the \"Do-Not-Answer\" dataset introduced by Wang et al in their 2023 study?", "1c65bc00-7698-4aac-ac75-f4cf0a56d7d7": "How does the concept of discursive harm relate to toxic speech according to Tirrell?", "3b23f18c-6948-482c-ad4b-4094da577801": "What emergent challenges of computational agency are highlighted by Tufekci in her 2015 paper?", "4474dccd-e072-4606-bf97-01a59797d81c": "Why do Turri et al believe it is important to improve AI incident documentation practices?", "bc98f064-db21-492e-8cbe-6bb73bfc8422": "What are the potential risks associated with AI-powered drug discovery as discussed by Urbina et al?", "660c7ba7-9614-4425-afbe-8e9230f9e5a3": "What are the main risks and harms associated with automated systems discussed in the panel?", "3d79fef1-7ec1-4629-8dbb-c4c88e55354f": "How do automated systems impact consumer rights and protections?", "19a6f91d-d99b-479f-974f-60046204684a": "What insights were offered regarding the use of automated systems in the criminal justice system?", "926d89a3-3c21-4df7-9dfd-ced249915ecb": "How can automated systems promote equal opportunities and civil justice?", "8aee8607-430b-469e-86d5-4232adc7e194": "What are the policy opportunities for regulating artificial intelligence to align with democratic values?", "f7611088-a63f-4ae9-a027-929e84a6f966": "How do automated systems affect social welfare and development?", "93b98b07-e59b-4886-bc4c-6b487d0c9dc7": "What are the benefits of automated systems in the healthcare system?", "47a623c2-1493-45ff-82a5-c288ad0a01c7": "Who were the key experts and practitioners involved in these panel discussions?", "b42c870c-4507-4740-8c36-662932a8b9fc": "Are there any specific case studies or examples mentioned in the discussions about automated systems?", "38e32513-a3a7-4720-96e6-35e11ae830b5": "How can the public access the recordings of these panel discussions?", "1c8ce2b7-1996-402e-b151-319c407a4805": "What is the purpose of the framework developed by non-profit organizations and companies for machine learning systems?", "0ac24fc9-606c-4eaa-8576-5bafe235903d": "How does the framework for machine learning systems go beyond simple notice to inform the public?", "66d10b11-4e1c-4323-90e6-162723085f38": "What are some of the reporting elements included in the framework for machine learning systems?", "be510be5-869b-457f-8999-878fdb0b09ba": "What federal laws require lenders to notify consumers about certain credit decisions?", "e5d619fa-49b5-435a-adc3-565acb71c415": "What is an \"adverse action\" notice in the context of credit reporting?", "c5c5e007-9f35-46e7-aa48-ce8e0f608d5b": "Under the Fair Credit Reporting Act, what must be included in an \"adverse action\" notice?", "5437738f-c791-44c0-bb4c-acc4b6d2ff09": "How do the Fair Credit Reporting Act and the Equal Credit Opportunity Act protect consumers?", "cfe9c2ed-5bf9-478c-89c7-794e93148715": "What are disparity assessments in the context of machine learning systems?", "48425d12-7cb9-4383-b947-e6e9566b9e7e": "Why is transparency important for machine learning systems used by companies and non-profits?", "692bc9da-55ae-42fc-b205-f3bafca5a068": "How do safety evaluations contribute to the transparency of machine learning systems?", "d7c50441-1bb2-4a13-a9b7-608f14fd60fe": "What are the key actions taken based on the results of independent evaluations?", "614fef06-0fe5-4830-8050-73ba9a266f5a": "How can I ensure that my reporting is in plain language and machine-readable?", "1a1246aa-f540-4d9b-970a-69324e141925": "What procedures are typically followed during independent evaluations?", "c5c03047-1888-498b-8f37-9d697a2d08d0": "Why is it important for reporting to be in a machine-readable format?", "0b6f15fa-f4eb-4918-81c3-03c0a661c2c6": "What are the benefits of using plain language in reporting?", "69197eb3-9af4-4e2f-a207-c4e672d92d0c": "How do independent evaluations impact decision-making processes?", "018142f3-7555-4ee1-9679-91e57ae8774f": "What tools can be used to create machine-readable reports?", "fb522bed-6381-4de1-8e9f-51b4ea153f04": "How often should independent evaluations be conducted?", "1cb575b3-9f7b-4da7-8af1-f027dd6c2a12": "What are some common challenges in making reports machine-readable?", "433a4b30-f091-441a-af1e-ce2dcc4d8afb": "How can I improve the clarity of my reports using plain language?", "1437e5d7-0d57-47c3-a4a7-051a8d7ff1b3": "What are the best practices for allocating risk management resources in the context of GAI use?", "ad5327c9-e60c-470b-8f4b-f68f13ef62c3": "How can organizations effectively assess the severity and likelihood of negative impacts from GAI?", "e37a9ed4-73f7-42c6-8260-88f6a0063f99": "What are the differences between model-level and use-case level risk mitigations in GAI?", "f2c16031-33cc-4d57-989d-4964ca15cdae": "How can unknown GAI risks be scoped or evaluated given the uncertainty about its scale and capabilities?", "118530e2-7547-4cc3-ad5b-6568ecae31c4": "What challenges do organizations face in estimating known GAI risks?", "c573b1a0-67f0-492f-bcc7-67d803c58308": "How does the lack of visibility into GAI training data affect risk estimation?", "e343ee0e-f5b4-42b5-bc79-b18001db5dc5": "What are the current limitations in the science of AI measurement and safety?", "55a01f18-ee4f-4970-ac12-de3cd610e0b0": "How can empirical evidence be used to address GAI risks?", "0bd8ff53-8c08-4507-a5f3-550b439e5efc": "What are some examples of speculative risks associated with GAI?", "bb56381e-85cc-43fb-8e88-7117f7aacd52": "How can stakeholders manage the wide range of inputs and outputs in GAI to mitigate risks?", "a975c7d1-2ee6-4b57-849b-847b4a68ac46": "What happens to former Apple employees' titles when employers verify their resume information?", "0611d75e-5339-4d6b-8f5d-9768777ec2f5": "Why does Apple replace former employees' titles with a generic title?", "afb164b5-c73e-4af5-984b-7ac0babf5ec6": "What is the National Institute of Standards and Technology's Privacy Framework?", "290e48c4-183f-4050-a61b-d3d835bca368": "Where can I find success stories related to the NIST Privacy Framework?", "6bb17fa3-0268-4f77-a5f6-77274be300b9": "What is the ACLU of New York's stance on facial recognition in schools?", "741f4783-a448-4014-a7f1-a230bb81f3da": "What should I know about New York\u2019s temporary ban on facial recognition in schools?", "46545ce3-f86d-4a59-89f3-76f23173b07e": "When was the amendment to the New York State Education Law enacted?", "9cc2b4c1-cc01-463e-bc84-67dd2629f727": "How can I access the text of the New York State Assembly's amendment to the Education Law?", "818ef24e-1ab9-460f-ab5c-12753804fd50": "What is the Labor-Management Reporting and Disclosure Act of 1959?", "5741db49-72b8-4e42-bb0c-8b664a4b6a54": "Where can I find information about the amendments to the Labor-Management Reporting and Disclosure Act of 1959?", "243360b2-c711-4a99-94ee-2d017ae0b586": "How can generative AI contribute to the spread of disinformation and misinformation?", "a6579796-6adc-43f8-b162-a104e18bfd58": "What impact did the synthetic image of a Pentagon blast have on the stock market?", "4e1b2a11-72d4-409a-bff8-53dbfd748eef": "How do generative AI models assist malicious actors in creating propaganda?", "867fc430-fe4c-420b-a096-016280fb19a5": "What are the characteristics of trustworthy AI?", "1c1161cf-f257-43b3-820c-89d59cf30c25": "How can generative AI be used to create fraudulent content?", "b06a86d1-7e66-456c-ac18-f879ce265907": "What are the potential downstream effects of disinformation facilitated by generative AI?", "78140874-f80e-4e3b-929f-f5a10c950800": "How can AI-generated imagery affect public trust in valid information?", "cffecf56-bb7c-43ce-ac11-2bc5285c2d7a": "What are the standardized practices for information security in computer systems?", "782f973d-10c8-4637-b58d-78028023f937": "How can generative AI models be made accountable and transparent?", "c1507631-a4af-4438-86d8-01fe4c764de7": "What are the challenges in ensuring the safety and reliability of generative AI systems?", "77d8a4e8-7880-4a14-9f78-6951755d3877": "What are the five principles identified by the White House Office of Science and Technology Policy for guiding the design and use of automated systems?", "31445acf-bbb3-4ba2-815f-8f4fc6c17277": "How does the Blueprint for an AI Bill of Rights aim to protect civil rights in the age of artificial intelligence?", "45302ae8-2e25-49cc-bf44-08b27c61b02b": "What role does the right to privacy play in President Biden\u2019s vision for civil rights?", "3bbac743-bb7b-4abb-901a-036782abe40d": "How can the Blueprint for an AI Bill of Rights reinforce the highest values of society?", "3c04f443-114b-4094-b7ea-0850a1205413": "Who contributed insights to the framework for the Blueprint for an AI Bill of Rights?", "5f301a21-1a6e-4fe4-bbc4-3ed0207e093c": "What is the purpose of the technical companion to the Blueprint for an AI Bill of Rights?", "117fc83e-30f4-4fe8-b500-30f64f143ed2": "How does the Blueprint for an AI Bill of Rights respond to the experiences of the American public?", "0cb33d22-ac9a-4005-af6e-aa9926c5be8b": "What are some threats that the Blueprint for an AI Bill of Rights aims to protect people from?", "bb631381-802a-4eaf-ad0f-69f0760683bd": "How can policymakers incorporate the protections outlined in the Blueprint for an AI Bill of Rights into practice?", "f58c6ed9-bdbc-40f5-a23e-b7a8a3b74b39": "What is the significance of the right to privacy in the context of automated systems and artificial intelligence?", "c4d1371c-ac60-4fbb-9bbf-22fd9a717c1f": "What is the role of the Innovation Foundation in the tech industry?", "fd3f2838-886a-4dc2-b79c-47708237c0d6": "How does the Information Technology Industry Council influence IT policies?", "1c90fdb5-fe0e-4f62-81ef-68f4f27e5611": "What are the main objectives of the Innocence Project?", "7d7082bc-3629-4c33-bf06-8d1de10409f5": "What research areas does the Institute for Human-Centered Artificial Intelligence at Stanford University focus on?", "b9df2f99-799c-46bd-a20d-5f8d2914b226": "How does the Integrated Justice Information Systems Institute contribute to law enforcement?", "a6631d31-0e87-473e-a319-90fcda9c1c3d": "What initiatives does the International Association of Chiefs of Police undertake to improve policing?", "d7d6e108-130a-4ab9-915e-d9ee5a20c635": "What are the key functions of the International Biometrics + Identity Association?", "b9ccc9b0-0028-4ba4-bbb6-5233720451b2": "How is IBM (International Business Machines Corporation) involved in AI development?", "609aa6d8-e41e-4fc7-83a5-2cf32901c497": "What humanitarian efforts are led by the International Committee of the Red Cross?", "2e407155-794d-4127-a539-3071466d8964": "What is the mission of the Lawyers\u2019 Committee for Civil Rights Under Law?", "b3a14e62-cbd5-446d-bce9-c73acd2baea8": "What are the main challenges in mitigating bias in AI systems?", "236f0306-90cd-49d3-8ec0-6a0cba6252a5": "How do datasets contribute to bias in AI?", "e08d701d-e244-404e-a35d-5e7a74b18eb5": "What are the best practices for testing and evaluating AI for bias?", "a0516a4f-2bd5-4c53-9ddc-49b6d709e2ef": "How can human factors influence bias in AI?", "18f7d41a-eacc-4b77-bff7-dd63adf0b0e5": "What is a socio-technical perspective in the context of AI bias?", "b1ff8e57-f9c8-4c9c-b80d-0b8cab8414d0": "What preliminary guidance is available for addressing AI bias?", "99396743-8ac7-4e6a-8174-ca002a8bc22c": "How can algorithmic discrimination protections be implemented in AI systems?", "e1119908-110c-41ef-a480-2c1db95e1206": "What role do datasets play in algorithmic discrimination?", "59a7e5e5-28fd-40e7-9eef-7a8c47988333": "How can we ensure fair testing and evaluation of AI systems?", "f6fccc6d-ae1d-4832-a6a1-6a02d216d0ef": "What are some strategies to manage human factors that contribute to AI bias?", "2ad0d5f2-d9ad-4172-9854-643c6a273f4a": "What are the ethical considerations for human subject experimentation in medical and scientific research?", "d378ac98-249d-4ce1-9350-5282c48770f8": "How can organizations ensure the quality of data in sensitive domains?", "7dee64d8-bf12-4366-bc43-ff71821a2047": "What are the consequences of using flawed or inaccurate data in decision-making?", "79cbd8a2-e147-44bc-8a55-26401ebfb951": "Why is it important to conduct regular, independent audits of data?", "5841722f-bf15-4fbd-a76a-b321b179aec4": "What measures can be taken to maintain accurate, timely, and complete data?", "6b8892ae-acb4-4abd-9d0e-befbaa39382f": "How should entities handle the burden of reviewing and correcting data?", "d7804551-0ff7-4e12-b264-7be2a008bed1": "What are the best practices for limiting access to sensitive data?", "f7d942c1-1e8f-484b-8edf-c8d0a64caf8b": "Why should sensitive data and derived data not be sold?", "42a9d92b-e27b-472c-ad56-07cb1140ffd9": "What governance procedures are necessary for human subject experimentation ethics?", "480733dd-d93f-4c4b-9b40-56e5910e831f": "How can organizations prevent adverse consequences from decision-making based on inaccurate data?", "7798df83-4529-4317-b89d-746485eb64a4": "What is the purpose of the yearlong process led by the OSTP?", "f695f50d-54d0-49c4-be89-203b99be0e61": "How did the OSTP gather input from people across the country?", "0daa74c8-7975-4a0e-ad33-5c760795c2d8": "What types of stakeholders were involved in the OSTP's process?", "28c42deb-d5d5-4709-b15d-3cea81950b3f": "What are some potential harms of algorithmic and data-driven technologies discussed?", "ae24e00b-3f03-4260-b4ed-5e6848c7864b": "How did public listening sessions contribute to the Blueprint for an AI Bill of Rights?", "8b794925-1544-465c-82ec-bc7f37298137": "What role did the Center for American Progress play in the panel discussions?", "605617f7-69f2-410c-b50a-c0974457693e": "How were impacted communities involved in the OSTP's process?", "47c40993-468f-4792-b50a-d2624c30ed04": "What is the Blueprint for an AI Bill of Rights?", "8f2a4098-0fb5-4199-b811-3a63eacf1485": "How did the OSTP ensure the process was inclusive of various experts and policymakers?", "ebef13ba-6679-4da6-92e5-0cf132673dba": "What were some of the promises of algorithmic and data-driven technologies mentioned?", "bc196094-f147-4147-8ab1-25cf7cad041b": "What are the key components of effective change-management controls?", "aa3c17a6-1a44-48e0-9bac-245822879aee": "How can businesses ensure data provenance in their commercial operations?", "d4eb8817-0f37-4caa-ab0c-ed5583f18564": "What are the best practices for implementing change-management controls in a commercial setting?", "74a83c47-6ea7-4e21-9325-9cd0a9b29316": "Why is data provenance important for commercial use?", "40cace51-c6f7-4263-909a-934539ab4313": "How do change-management controls impact data integrity?", "104aa783-0d34-44c5-bf81-967d2c43daf8": "What tools are available for tracking data provenance in commercial applications?", "e1f816cd-f3d5-4510-b7af-29babd74b1d9": "How can change-management controls help in regulatory compliance for businesses?", "561ce82a-df07-4979-adaa-2450c3b8fcda": "What challenges do companies face in maintaining data provenance?", "8868798f-7d64-4e60-bc77-56725a2a1c37": "How can change-management controls be integrated into existing business processes?", "8474f531-52b9-49b1-96e3-7e64a855cafe": "What are the benefits of ensuring data provenance for commercial enterprises?", "2f0ad5b7-bd5d-4662-a86c-d0ad96f67935": "What is the DeepNude app and how does it work?", "6a01a7c9-235a-4a09-9bd5-e38c1dbc4b94": "Why is the DeepNude app considered horrifying?", "b8db359d-0588-4011-aae6-c6838c31805d": "What are the ethical concerns surrounding the use of the DeepNude app?", "754f8572-6872-48c8-a9c5-c998b8091f62": "How has the public reacted to the release of the DeepNude app?", "eeb0bfdc-2e3d-4d88-b02e-9c4f33d462a8": "What measures have been taken to address the issues caused by the DeepNude app?", "5412af4a-850c-44fb-9945-95686cb9ad6a": "How do Amazon\u2019s AI cameras monitor drivers?", "6351f3cd-667a-4aad-857f-81df7b601da0": "What kind of mistakes are Amazon\u2019s AI cameras punishing drivers for?", "8dbba1cb-269d-4957-b4fd-308f00b91882": "What are the implications of AI cameras punishing drivers for mistakes they didn\u2019t make?", "fcbbdd00-85b9-4396-9b7a-5de417c1179f": "How have Amazon drivers responded to the AI camera system?", "4d05c32c-5866-4426-a745-2d2c554a5691": "What steps can be taken to improve the accuracy of AI cameras used by Amazon?", "7f590c18-47a2-438e-977a-cdd098e7fa86": "What are participatory engagement methods in product development?", "edde2af3-d243-4ef8-869b-bbc0626bc609": "How can focus groups with experts be used in AI development?", "541450fd-838a-4b58-94cc-20e3944691ea": "What is the role of small user studies in evaluating new products?", "b0e29ede-db9c-4382-943f-c21b4946bdb7": "How do anonymous surveys help in gauging reactions to specific features?", "e6832258-3b72-4cb6-a1d3-f906eb781da4": "Why are participatory engagement methods often used in the early stages of AI development?", "b6d68612-46b8-42cc-ab3e-31be5702199b": "What is the difference between field testing and participatory engagement methods?", "f46074c9-19da-4ec7-90c0-400d08cb34d4": "How can field testing simulate the conditions under which an AI system will be deployed?", "a8106443-688c-4550-a5fe-1c5ab81fcab9": "What are the benefits of using structured settings in field testing for AI systems?", "4b8e23f4-abf1-4143-93a4-47f53784794f": "How can field style tests be adapted to focus on AI risks and impacts?", "fb69db56-a88a-4b72-818e-8881601a2bc5": "What are the key differences between field testing and red teaming in AI development?", "b78a1120-785c-4e9b-adaa-d3f2d69f0561": "What initiatives has the Biden-Harris Administration taken to increase the number of Health Care Navigators?", "18726698-c2d1-4390-aa93-ba12c019bdec": "How has the number of Health Care Navigators changed ahead of the HealthCaregov Open Enrollment Period?", "05e386f4-de4f-4b24-911b-1719161ea196": "What are the key findings of McKinsey & Company's report on the state of customer care in 2022?", "3cd1a4cc-ca2b-4d1e-8ada-446a7c42cb20": "What customer service solutions are recommended for small businesses according to Business News Daily?", "182886ff-5412-4948-8295-701e0d7fbfe1": "How can small businesses improve their customer service based on Sara Angeles' article?", "37631ff1-592b-4472-94a4-2ce2caf8eafc": "What are the benefits of co-intelligence between robots and humans in customer service, as discussed by Mike Hughes?", "75e3d9c3-d911-48df-9ddb-8914d28e9008": "How effective are bots in customer service according to the Forbes article by Mike Hughes?", "421b5545-cea3-4524-bdc8-d3a22c30dd6c": "What strategies can businesses use to get the best out of their bots in customer service?", "83379563-52c0-4b59-b40b-f4418017f011": "What are the latest trends in customer care as reported by McKinsey & Company in 2022?", "d76b947b-8f88-4163-9e86-05a0d2e86981": "How has the Biden-Harris Administration's approach to Health Care Navigators impacted the HealthCaregov Open Enrollment Period?", "9bd4e2e2-cd2d-4535-9dce-679178b1e43c": "How can we reduce the environmental impacts of AI models during inference time?", "d3ba2ee8-7ef9-41ad-92b7-b27c7adbe4b0": "What are the challenges in estimating the environmental impacts of Generative AI (GAI)?", "787e31c9-98ed-4d18-a73c-6e891f3e92a6": "How do AI systems perpetuate harmful biases?", "0649769f-3bca-4cfa-8515-d227e06e7fe9": "What are some examples of biases in text-to-image models?", "83843f77-efa5-4c8e-b8ea-95ff293e2900": "How can AI systems be made more accountable and transparent?", "94bc029d-f508-4768-a427-507ea45dfe8e": "What steps can be taken to ensure AI systems are safe?", "679f0e14-0fda-4866-baa6-90cbd9892af0": "How do biases in AI systems affect different demographic groups?", "93f909c5-8ab0-445e-893d-16ef7d118607": "What is the impact of harmful biases on society when using AI systems?", "571ae548-9f61-4e1e-95a1-4357dbb1c8b8": "How can we address the underrepresentation of women and racial minorities in AI-generated images?", "30e3bc0e-5f22-4216-a2c0-f27f861b5634": "What are the consequences of biased or stereotyped outputs from image generator models?", "1b6a2ed0-d374-4c90-9b2f-26802ca2cc85": "What are examples of sensitive domains that require enhanced data protections?", "6c6d9102-09bf-4244-b8ba-21686bf078c4": "How do sensitive domains impact human rights such as autonomy and dignity?", "397d1460-bd65-4980-8b60-2ff58fcfc82e": "Why is health considered a sensitive domain?", "bc39145c-d1dd-4e69-a545-04944b0249af": "What makes family planning and care a sensitive domain?", "c567b788-ba2a-471d-8a61-f8acc8154f67": "How does employment fall under the category of sensitive domains?", "29011969-35fb-4185-9b93-65a5945bf067": "Why is education considered a sensitive domain?", "5f790da9-449c-41e8-865a-7f51afe460fc": "What are the implications of criminal justice being a sensitive domain?", "cd10682a-f510-462e-b688-e6038e4d2166": "How is personal finance classified as a sensitive domain?", "20c5ca9a-d0a2-4fa0-b02f-0f7351890b70": "How do societal norms influence what is considered a sensitive domain?", "7d213d18-b9dc-4003-bf54-21ec8dfb8397": "What is the role of surveillance technology in sensitive domains?", "575386df-5852-4b96-98b2-146127539141": "What are the established security measures to assess vulnerabilities and threats in AI systems?", "f0b324e0-dd60-4f57-b879-e56ece38360e": "How can backdoors and compromised dependencies in AI systems be identified and mitigated?", "5d3ecf42-2ce1-468b-9dbf-e4bbc5707389": "What are the best practices for benchmarking AI system security and resilience?", "e4d68479-8a79-4216-bc1a-4c8b156f3b37": "How do industry standards for AI system security compare to state-of-the-art methods?", "fc9490b3-396e-4ac3-95ef-80d13f6cf544": "What methods are used to ensure the integrity and security of AI-generated content?", "1cd90900-c572-4975-b7b8-fc93f4189d8e": "How can data breaches and eavesdropping be prevented in AI systems?", "2e2511f7-776d-400f-99e8-22c70ecc04d5": "What are the common threats to AI system security, such as model theft or exposure of model weights?", "e3b81075-8ba7-49f1-a7d2-87168a99617e": "How can user satisfaction with AI-generated content be effectively measured?", "e5339170-f4f7-4d0f-851e-a4b684f4de56": "What are the implications of man-in-the-middle attacks on AI systems?", "db8ab762-0912-47d7-9d30-648164bab39b": "How can the security of AI inference and extraction processes be ensured?", "59c7ac21-d712-480d-b1f2-638dff0439e1": "What are the potential risks of using technology in social welfare systems as discussed by the panelists?", "00e72162-956b-4ed9-94b5-b0ce264a828b": "How can digital ID systems impact the efficiency and cost of social welfare programs?", "0dcfc118-6d07-45b7-9542-8c8fa5036354": "What concerns did the panelists raise about the burden on individuals interacting with new technology in social welfare?", "60fe80fd-12ab-4a19-acb4-511d6cade220": "How can feedback loops in technology systems reinforce inequality according to the panelists?", "6152c7f7-4267-40c0-a522-620e1574f605": "What are some methods suggested by the panelists to mitigate the harms caused by technology in social welfare?", "bc2942af-52bd-4fbb-816d-9db9e02d78c1": "Why is community input important in the design process of social welfare technologies?", "ba3309a2-8a2a-4ac9-a89a-5df5b997a37c": "What role does data collection play in the potential harm caused by social welfare technologies?", "8f11cb0a-3059-4f8b-991a-c534983d2ab8": "How can individuals opt out of technology systems in social welfare programs?", "18631367-2871-45d9-8b6f-049eee6e413e": "What are the benefits and drawbacks of using technology for fraud detection in social welfare?", "da3446ed-9014-4635-b576-0dc495e48111": "How can the implementation of technology in social welfare systems affect government agencies and the people they serve?", "bf7e5553-71b4-4912-8f36-d5596c343802": "What is the Blueprint for an AI Bill of Rights?", "99d4fab5-34b6-4be5-ae63-ed96a8f6f688": "How are federal agencies ensuring AI systems comply with the Executive Order?", "cee03924-6fe3-43d2-8378-250f3240ea48": "What are the key principles outlined in the Blueprint for an AI Bill of Rights?", "1ccab29f-bc4b-428c-98d7-f783de1b512b": "How does the National Highway Traffic Safety Administration ensure vehicle safety?", "e3ea9516-f177-4b9a-b7a2-f1e46892ec09": "What role do local rules play in the implementation of road safety measures?", "bc2b4942-0fd8-48fa-989a-4c49c46c7b96": "How can strong safety regulations enhance innovation in complex technologies?", "6c84c34b-2170-4b7d-b82c-331dd7539cbd": "What measures are in place to address harms caused by AI systems?", "278b5143-0278-4559-bf04-512ef1f0c323": "How are AI use case inventories being utilized by federal agencies?", "0059e842-db08-4e0a-a061-7d5f37511184": "What is the relationship between safety regulations and innovation in the automotive industry?", "ae4d086c-aee0-4107-8d11-6d98cd132c52": "How does the law and policy landscape for motor vehicles relate to AI systems?", "0f1efd9c-7fbd-4b7b-ab5b-c3e90282a9ce": "What is algorithmic discrimination and how does it affect individuals?", "067133f9-c096-431e-b340-aaafffcd7f18": "How can automated systems be designed to prevent inappropriate or irrelevant data use?", "f50ad4e5-8546-4e08-aa4a-2f9430c6ef48": "What steps can be taken to mitigate potential harms from automated systems?", "74155138-199c-4f03-8c74-36f550d07afc": "Why is independent evaluation and reporting important for automated systems?", "bc6e0508-7335-48f1-89e2-85a0b185e8d7": "How can the public access the results of evaluations of automated systems?", "8c0cd3e7-b416-4eef-9685-68b4a810756b": "What measures can be implemented to ensure equitable use of automated systems?", "90cd38de-1740-48c6-b5cd-21899eb63f8b": "How does algorithmic discrimination impact people based on their race or ethnicity?", "f5303ea3-19f1-4a4f-952a-803e65a5a0fa": "What are some examples of algorithmic discrimination in automated systems?", "4d896f9d-2d20-47a9-8054-3f5b528ef495": "How can organizations ensure that their automated systems do not contribute to unjustified different treatment?", "18fd587a-4be2-4840-9c67-9ea224d4de50": "What protections should be in place to prevent compounded harm from the reuse of data in automated systems?", "dfe37234-9ca3-43f7-a3e8-b86a80b04f3c": "What is the AI Bill of Rights?", "f23d3c7e-3ec7-4ff7-bdab-5cdac88803fa": "Why was the AI Bill of Rights created?", "702c8d56-ad6f-4de1-9926-285169f470d4": "How does the AI Bill of Rights aim to protect American citizens?", "be6e8191-35d3-4f3f-94a2-c509ba2c3e49": "What are the key principles outlined in the AI Bill of Rights?", "4f8e4872-d2dc-4de1-aea1-da76b4471020": "Who is responsible for enforcing the AI Bill of Rights?", "e614182a-3697-4fd6-9166-b61c28d345dc": "How will the AI Bill of Rights impact the development of automated systems?", "d01414e7-9f4e-4a8f-b650-88cbc28f03f1": "What are the potential benefits of the AI Bill of Rights for the American people?", "d6520c9d-85d3-4d22-81a2-22649cf66c30": "Are there any specific guidelines for companies developing AI under the AI Bill of Rights?", "ec9b0344-0489-4878-b603-f0e89be5311b": "How can individuals report violations of the AI Bill of Rights?", "85e83a32-66ea-4124-934e-cc197072ddea": "What role does the government play in the implementation of the AI Bill of Rights?", "00bb38cc-b363-4224-9cad-242db0bd67bb": "What are the applications of General Artificial Intelligence (GAI) in CBRN information management?", "a221d1ab-5fd6-45a4-b6d8-8b93d9ab4bdd": "How can AI be used to detect and prevent the spread of obscene, degrading, and abusive content online?", "12a024fb-ee6f-4e56-a00e-3f762d802ec5": "What are the key considerations for data privacy when deploying AI systems?", "857a98f9-c84c-4e60-a015-c7857dfc889e": "How can AI help in monitoring and preventing civil rights violations?", "77ca506b-f725-4298-bb8b-1192ff1727c0": "What tasks are involved in AI development and how do they differ from AI deployment?", "6ad116cd-414d-4fbe-9971-f3c0165700e9": "What governance and oversight mechanisms are necessary for responsible AI deployment?", "06c33b54-b7f9-4684-ae49-4ef790e2a2ee": "How can AI improve capabilities in handling Chemical, Biological, Radiological, and Nuclear (CBRN) threats?", "3af5e01d-6e1d-41c7-a864-f35d9f4c081a": "What are the ethical concerns related to AI in terms of degrading and abusive content?", "1e4703d7-50c1-48e9-9662-fd835aca738f": "How can AI be used to enhance data privacy protections?", "d84d75d8-f21b-48d5-87ba-40e65b40b8e5": "What roles do governance and oversight play in ensuring ethical AI development and deployment?", "dff03de3-cf16-47fe-8c62-7acc7396c8e5": "How is technology helping farmers grow food more efficiently?", "f5dd8a98-1cb1-46cc-9437-9e36b92e9027": "What role do computers play in predicting storm paths?", "4c8db1bc-307b-4041-a352-9f9af6cbb286": "How do algorithms identify diseases in patients?", "8ec278ab-326a-4b26-81cf-07a2748a5f1c": "In what ways is data revolutionizing global industries?", "8e131803-f72b-4edd-a60c-cdf1ae47f020": "How is American innovation redefining different parts of society?", "c4536521-f7e6-42da-876e-138c80d5af1d": "What are the potential benefits of these technological tools for society?", "28061b71-fa18-4a73-973b-595f5e9f23b5": "How is President Biden's administration addressing civil rights in the context of technological progress?", "6a32e6c5-5b4d-425c-9310-ac9e34058595": "What steps has the Federal government taken to root out inequity and embed fairness in decision-making processes?", "8b41d54a-0e4f-4063-9bfb-e5633ef01f3d": "How is the Biden administration advancing civil rights, equal opportunity, and racial justice in America?", "620c90fd-87a8-4508-aef6-49ce040c25b3": "What are the urgent challenges to democracy that President Biden has spoken about?", "b2771425-f693-4b11-8a2c-cd5ece2a9f39": "What are some examples of sensitive data that have changed over time due to societal norms?", "d5648e81-154d-4e7a-9f98-89e26af03143": "How do societal norms influence what is considered sensitive data?", "8c9a7763-5902-48de-9624-1fe4f06180aa": "Why is it important to understand that sensitive data can change over time?", "71023f86-db69-4922-aa49-0c7884cda337": "Can you provide historical examples of data that were once considered sensitive but are no longer viewed that way?", "95e33fee-d6c2-4676-8f91-f97b88f4e897": "How do organizations adapt to changes in what is considered sensitive data?", "c5122f36-0c06-4f80-9d69-d7b5b7ba4c0c": "What role does context play in determining the sensitivity of data?", "6eda9f1e-d41d-4951-aeeb-6e3163a690d8": "How can individuals stay informed about changes in what is considered sensitive data?", "1ee096ea-f4e1-48d5-8859-1d486e08313e": "Are there any legal implications when sensitive data definitions change over time?", "3908a945-dd75-4d68-b75a-c42acfd1c173": "How do cultural differences impact the perception of sensitive data?", "e20ddb14-9cac-4fc8-8013-cc39c4a8924f": "What are the challenges in managing sensitive data that evolves with societal norms?", "52b9675c-9516-452a-94a4-69f03f5c10d5": "What is a fallback and escalation system in the context of automated systems?", "f678bc09-26c6-49a9-838d-b7ed1c773949": "How can human consideration be proportionate to the impact of an automated system?", "56255088-9e23-47d6-b73f-3890da92fc7a": "Why is it important to have safeguards against human bias in fallback mechanisms?", "046fd2c8-285c-4ea1-b2dc-0acdba7f5f1b": "What are some examples of high-stakes decisions that might require greater human oversight in automated systems?", "7f889e3b-de8e-4bc4-895f-af886478d21f": "How can organizations ensure that their fallback mechanisms are accessible to all users?", "c66a77c5-9ed9-48c7-b414-54d06b37b5c4": "What types of training are necessary for staff involved in human consideration and fallback systems?", "14df2313-e73b-4475-9175-b15e327a4691": "How can the availability of human consideration be increased for automated systems with significant impacts?", "7442ff35-391c-4c27-821d-8246d671bdd3": "What are the potential consequences of not having an effective fallback and escalation system in place?", "6da33505-e013-4410-87b2-c06a8f9a986c": "How can the effectiveness of fallback mechanisms be tested?", "0c6a74d4-9976-4dac-82a0-85562c58e9fa": "What are some common challenges in implementing accessible fallback mechanisms for automated systems?", "90cdaeaa-2d23-47cc-9c62-a678caaff1fe": "What is the role of NIST in advancing AI technology?", "c94bb207-795e-4949-afb5-51060a4c898a": "How does NIST ensure AI is safe and trustworthy?", "aed91732-c782-4c41-b30d-74b4bb0bccdb": "What is the US AI Safety Institute established by NIST?", "9df72cbc-5cee-4e8b-a0f6-a71f2453c828": "How does NIST contribute to the 2023 Executive Order on AI?", "5fb78f30-b8ee-4a7c-9d4b-49a3d7203935": "What are the main goals of the AI Safety Institute Consortium?", "76bff3c9-e07b-42a2-a053-f2b1aaa03c51": "How long has NIST been working on AI research?", "aeb7622a-78ad-4d23-add9-376a60612c11": "What kind of standards does NIST develop for AI?", "11b4686b-199b-42c0-87ea-4cfdf61fc1a0": "How does NIST address privacy concerns in AI development?", "b80438a1-1067-4a5a-820c-4133f009d448": "What contributions did the NIST Generative AI Public Working Group make to the report?", "50466b56-2814-489e-a27c-db75ad0859de": "How does NIST ensure AI is fair and transparent?", "720b9aed-8f99-4d96-9c31-c592612cad56": "What constitutes sensitive data under the AI Bill of Rights?", "52379352-3058-4c02-8a14-bf94e6ff2793": "How is sensitive data defined in the context of AI technologies?", "a6730cb7-c308-49ef-976d-2cd8dae8fea7": "What types of data are considered sensitive for individuals who are not yet legal adults?", "eb718d42-c1f0-4bc4-b821-2f51d3d1c876": "How can sensitive data lead to meaningful harm such as identity theft?", "9d567b63-4cdc-4526-bc7c-9227afac30df": "What are some examples of sensitive domains mentioned in the AI Bill of Rights?", "c673db12-f99b-450f-b738-61159c6ee5c3": "How does the AI Bill of Rights address data related to criminal justice interactions?", "7d8746aa-a1a7-4dbc-bf5e-40f65e995b9f": "Why is biometric data considered sensitive under the AI Bill of Rights?", "449278d1-b673-4878-8c11-d1d5ab0d6db5": "What measures can be taken to protect sensitive data in AI applications?", "5969e8c6-02b2-4c2a-b739-bdc23cad38f9": "How does the AI Bill of Rights define the potential for data to cause financial harm?", "205446ad-e139-481b-81fb-2548db84a0e0": "What are the implications of handling sensitive data in AI systems for privacy protection?", "928e6923-bcf0-4580-ad02-7769964afe49": "What are the implications of digital surveillance in a post-Roe world?", "a1b627aa-a1ff-4a68-a3ac-2a726f36747d": "Why did the FTC sue Kochava for selling data that tracks people at sensitive locations?", "70fa19b9-7985-4eb6-9cc2-7bf86776c53a": "How does digital surveillance impact reproductive health clinics and places of worship?", "16562b7c-6b93-483f-a689-d28eab8e4aea": "What companies are being acquired by private equity firms that collect data on children?", "b86ef95b-cdcf-455f-bdc0-474aac5b35eb": "What are the concerns surrounding private equity firms collecting data on America's children?", "9aa09b13-c9c6-4729-8029-8b893a1772a7": "How does the collection of data on children by private equity firms affect privacy?", "c971ec0d-ff6d-4adc-8ce4-f31428e4b284": "What are the potential risks of data tracking at reproductive health clinics?", "85942b84-5c76-435a-b971-83296f24e8fc": "How does the FTC's lawsuit against Kochava highlight issues of data privacy?", "7a683ec1-c238-48c5-bf76-00845333806c": "What are the ethical considerations of collecting data at places of worship?", "aa558c0c-2ab3-40c8-bdd9-3daf1f4f43a0": "How does the job database classification of former Apple employees as 'associates' affect their career prospects?", "19703e97-9d93-49e6-864a-23847656346f": "What are the best practices for ensuring data privacy in AI systems?", "52e88558-d8cc-413e-80e0-9f094bf4270b": "How do you manage intellectual property when using AI models?", "6ad81596-3e02-481b-b7ac-73940d631df3": "What are the key components of AI governance and oversight?", "dd2e0e56-2211-4a81-9712-aecf2fda1a6b": "How can you ensure information integrity in AI applications?", "bcd77494-a31b-4fe9-b2c7-3876f1f5306e": "What are the different access modes for underlying AI models?", "83631b6a-fd3c-4582-973c-c108cbc54c76": "How do you handle sensitive data in AI projects?", "fa5423e6-cb1e-4043-af5e-e939741381e2": "What is the importance of value chain and component integration in AI?", "45fc2c2b-6bc0-4a8d-8731-dfa83c3ba355": "How do you configure AI systems to align with human-AI interaction guidelines?", "94cf27b3-d287-4e48-b4f0-1acdf891f42f": "What are the challenges in managing proprietary data in AI?", "47124446-7ae2-4444-8f9e-e86adddd76bc": "How do you oversee the tasks performed by AI actors?", "af389dfd-cca2-4851-9450-166e9fbd38d2": "What are the key technical and governance interventions needed to protect against the harms of new technologies?", "9bc603e1-c26c-4926-b91a-c8d4e1a57fbf": "How important is community input in the design and use of new technologies?", "be75402c-f043-4191-9c8b-7d5af25bef31": "Why is public reporting on crucial elements of technological systems necessary?", "0dc744a1-6026-46d6-aeb0-8af84f722ae7": "What are better notice and consent procedures, and why are they important for privacy?", "66f03050-aedc-47b1-aa15-51450e4329a2": "How can users opt-out of using certain technological systems and revert to a human process?", "9c5cf2e4-1467-4a23-8857-28214677eca8": "Why is it important to provide explanations of decisions made by technological systems?", "710fdf52-6834-4b9d-9ce6-a653d5cece1b": "What kind of training is necessary for the proper governance of new technologies?", "c505559b-8a65-4f47-80ef-b510d054137a": "How can we ensure that technological use cases are genuinely related to their goal tasks?", "b0c53b5c-f74e-4529-a8d6-a123ed691081": "Why is local validation important for the effectiveness of technological systems?", "86fad521-a1f8-4c03-9313-ef0a9cee5091": "What is the role of third-party audits in maintaining the accountability and validity of technological systems?", "55bec949-76bc-49c1-a3ae-5f874eedfddc": "What are the risks associated with the synthesis of CBRN materials?", "9cd11b1f-87d1-484b-aba4-b76a5cb30945": "How can confabulation in AI systems mislead users?", "9ee178ca-364a-4792-8ee5-68de0f17f7cc": "What measures can be taken to control the spread of dangerous, violent, or hateful content online?", "616332c3-0795-4741-9e22-e94ffa2fcb13": "How does the unauthorized use of personal data impact data privacy?", "d00c3c3b-c827-4205-bc58-f5520ba6e148": "What are the potential consequences of de-anonymization of biometric data?", "bf6e3576-bc85-4045-a9ec-7701f2ba852c": "How can access to CBRN information be restricted to prevent misuse?", "3a2a0084-eefa-406c-b6a6-ae09a47d2c23": "What are the challenges in detecting and preventing confabulation in AI-generated content?", "3139865a-00dd-4da3-9038-84cb5a1b8b12": "How can online platforms mitigate the risks of inciting or radicalizing content?", "ef18f0e4-616a-4bde-9199-8407daad074d": "What are the best practices for protecting sensitive health information from unauthorized disclosure?", "65c26694-76cc-42b2-9982-8baf476bbaed": "How can individuals protect their location data from being leaked or misused?", "ca38f0f2-707f-4776-8565-5a88ac901073": "What is confabulation in the context of GAI systems?", "dc4c667a-b953-456e-a31d-8b4221f41335": "How do confabulations differ from hallucinations or fabrications in GAI systems?", "c90a6481-11b9-4db8-b24d-cbb9f232a4a5": "Why do generative models like LLMs produce confabulations?", "f989e408-3f17-4129-8bd2-87ae2ceecc72": "Can confabulations occur in all types of GAI outputs and contexts?", "dac82b9b-8e84-414d-aafc-7f3764ca2b23": "How do statistical predictions in generative models lead to factually inaccurate outputs?", "df4dfdd7-6c4c-4dde-b6dd-5123571b1254": "What are some examples of confabulations in GAI systems?", "5a88e70b-4815-451a-bc8f-e1606c9f23e7": "How can confabulations affect the reliability of GAI-generated content?", "9882e344-3c67-494a-b083-c0b2ae59a9df": "What measures can be taken to reduce confabulations in GAI systems?", "26c3c033-6002-4d01-8f46-96bad11d3572": "How do confabulations impact the consistency of GAI-generated statements?", "6536996f-b790-440e-afc7-66774db4a2e3": "Why is it important to understand the phenomenon of confabulation in GAI systems?", "f9e8bebc-884c-4775-95e1-78d70dc642b2": "What are the best practices for verifying information sharing and feedback mechanisms in AI systems?", "6a36fd5d-98db-42d5-b0d4-493641747d7b": "How can organizations ensure data privacy while sharing information about AI system impacts?", "d4f86701-19bc-43eb-9713-57eb57e073f6": "What steps should be taken to conduct an AI impact assessment effectively?", "ff6fe932-7482-44fc-bd67-812520e17bcf": "How can feedback from affected individuals and communities be integrated into AI governance?", "830c3554-63ea-4a61-a4f9-95e5c8297675": "What are the key components of an effective AI governance and oversight framework?", "621c0613-588a-4422-951a-45a7ed78fca5": "How can organizations prioritize feedback from external stakeholders regarding AI risks?", "df860556-2e58-4780-972c-1293c0231372": "What resources are necessary for effective outreach and feedback processes in AI system development?", "9c78e9c5-3af3-4520-a602-f9a45cebb1a3": "How should interactions with GAI systems be documented to ensure transparency?", "359d8ba9-c8b2-4d18-b5c2-9c0369390b86": "What are the potential risks of harmful bias and homogenization in AI systems?", "e1de377f-ddbf-4599-99e2-116dc45fac8e": "How can organizations address the issue of confabulation in human-AI configurations?", "e5df3265-2f97-4196-b75a-ab39e66e7b97": "What are some examples of time-critical systems where immediate human intervention is necessary?", "ae852f4b-1551-4734-b962-337bb0060682": "How do automated systems impact the criminal justice system?", "5ce6cbf8-e3c8-414e-8e92-59e44f212a1f": "What safeguards are necessary to prevent unfair outcomes in automated systems used in sensitive domains?", "9a8e63e5-0caa-4533-b223-17d6c3c56133": "Why is human oversight important in systems that use automation for pre-trial risk assessments and parole decisions?", "fee74500-d0b9-468f-a6c0-cbb66d324f73": "How can automated systems in healthcare lead to dangerous outcomes without proper safeguards?", "7e4ebd6f-c29f-4c5b-a9e6-c00489ace11f": "What are the potential risks of using automated systems in employment and education?", "cf2fab83-75d6-4fea-a72d-449833ab78e8": "How do existing human processes complement automated systems in providing public access to government benefits?", "bcb8ba55-d026-4851-a522-40e9b1e175af": "What are the benefits of having a building manager available when an automated card access system fails?", "f89691af-5a2e-4c6b-8894-fd6d9cd424b7": "In what ways can automated systems in sensitive domains be made more accurate and fair?", "6c7e2aa2-558c-4280-93b7-bf73de34e57b": "Why is it important to have fallback options when using automated systems in critical areas?", "9f5142e7-fda2-481b-a08d-fddf4197454e": "What are the main findings of Northcutt et al (2021) regarding label errors in test sets and their impact on machine learning benchmarks?", "0cdcb840-24f9-49b7-b4b6-bd409bb4d50a": "How does the OECD (2023) suggest advancing accountability in AI to manage risks throughout the AI lifecycle?", "4dfa9162-0bd0-4926-a27a-2fe9231160ed": "What are the key points discussed in the OECD (2024) paper on defining AI incidents and related terms?", "49ca8dea-a08f-410a-a643-8e37fc896455": "What information is provided in the GPT-4 System Card released by OpenAI in 2023?", "4636292e-8f3e-4711-a175-b28428d0eaa9": "What technical details are covered in the GPT-4 Technical Report by OpenAI (2024)?", "69456d83-d8c3-4d1c-bd31-704d2c934902": "According to Padmakumar et al (2024), how does writing with language models affect content diversity?", "d62ca080-832e-4f4d-960d-bf8bb0d1b97e": "What examples, risks, and potential solutions are surveyed in Park et al (2024) regarding AI deception?", "fb2ddd6a-239b-4b2f-a336-f921a876390c": "How do pervasive label errors destabilize machine learning benchmarks, as discussed by Northcutt et al (2021)?", "9208081d-3da9-46db-b27d-f25ed1c64ad2": "What governance strategies does the OECD (2023) recommend for ensuring trustworthy AI?", "edb639b7-e492-4442-a339-62b5e921ceb2": "What are the implications of AI deception as explored by Park et al (2024) in their survey?", "572c1b8f-43fc-46ab-ac02-ef9756b67234": "What is the importance of proof history in AI content management?", "5e70d1eb-366b-403f-b833-9bb92e6a6cc5": "How does version control contribute to transparency in AI systems?", "f9e5f377-fe4c-43e8-8cc5-f004e49964e9": "What methods can be used to ensure traceability in AI lifecycle management?", "be867fe3-bca3-4b2b-9a5e-471f230b333d": "Why is user testing crucial for verifying the adequacy of GAI system user instructions?", "4b9a0a17-252c-4972-9be3-1b4156f5b5ce": "What are the key components of Human-AI configuration in AI systems?", "0f22a8c0-7a87-4416-a640-3c2187b8aa3b": "What tasks are involved in AI deployment?", "338a84b7-78f1-442a-8e90-093c313eceb1": "How is AI impact assessment conducted?", "bd811679-d8f6-4568-8799-aa826cd0ca8d": "What roles do domain experts play in AI operation and monitoring?", "6015d9b2-454a-4af5-9a8e-ac87189a84e1": "What does TEVV stand for in the context of AI systems?", "dc4d23d4-5c52-47f4-b5b8-01c23fc3bff9": "How can robust version control systems improve information integrity in AI?", "a6399ec7-f330-4390-8da1-35f4821069f7": "What are the safety and efficacy purposes of notice and explanations in automated systems?", "ec418640-346c-4981-a095-cbed724bdd60": "Why is it important for the American public to know if an automated system is being used?", "7feb0289-a3ca-4a4b-9809-7ff2b8a6518f": "How can clear, brief, and understandable notice help achieve protections in the framework of automated systems?", "c7d1871b-b9ea-4a5f-9ab2-e5b06fe26a8c": "Why are the decision-making processes of automated systems often opaque and complex?", "1f235bf9-1d62-49c3-9f64-b74ff53e1c0c": "What challenges do explanations of automated system decisions present?", "f370e6a8-32c6-4260-8fa8-013612614ab1": "Why should explanations not be avoided in the context of automated systems?", "ee9515a6-b640-4034-98fb-4a5837748cf7": "What is the importance of clear and valid explanations in automated systems?", "745e45f4-bba9-45ec-a7de-e6ad0e0e47e1": "How can experts verify the reasonableness of a recommendation made by an automated system?", "a02fa5da-7bd9-48d0-ade1-35bcb7529e09": "What are the potential harms of not informing the public about the use of automated systems?", "4a54ea01-870a-4fe8-aa51-bbdd95af1a67": "Why should clear and valid explanations be considered a baseline requirement for automated systems?", "d9589a42-1bfd-4e7f-b849-f21901e4405c": "What are the key expectations for automated systems regarding data privacy?", "a2cf57d9-906a-4fa1-83ca-9bcc8e64edbd": "How can individuals access and correct their data collected by automated systems?", "6bb0aeee-f203-4ae1-bdb1-ef25d1fcd53a": "What steps should entities take before sharing data with other entities?", "168e18c4-5764-40f7-83a7-4648c98b69c9": "How can users know who has access to their data in automated systems?", "251640f5-543a-41f8-9cf2-21a40ea67aa2": "What is the process for withdrawing consent for data access in automated systems?", "c1e447ca-42dc-4355-a5b2-c55e9ace2bbf": "How should entities handle the deletion of user data upon consent withdrawal?", "2fd28c08-3fd3-41b6-927b-527d88dc0694": "What records should entities keep regarding shared data and its recipients?", "756d6e43-205c-48de-84fb-df3fe1ff1fd8": "How can users ensure their data is removed from machine learning models after consent withdrawal?", "fb40f206-d170-40e2-8c0d-6fed13736a04": "What legal limitations might affect the withdrawal of data access consent?", "ffc49df6-1959-4e1b-983e-d5d6e8ee184b": "What are the best practices for entities to follow when developing technical standards for data privacy in automated systems?", "4570505c-0e36-451c-a22f-787893a90d09": "How can patients ensure their medical data from CPAP machines is not shared with insurance companies without their consent?", "6218a6ac-9d82-4ee1-92b9-bcd6637a0981": "What are the ethical implications of using predictive analytics to target advertisements based on sensitive personal data?", "5a330f85-885f-4b40-85e8-c3a6dca75ffc": "How can schools balance the need for safety with students' privacy when using audio surveillance systems?", "129f0f97-98ba-42e8-9d21-f3d2c6917470": "What measures can be taken to protect students' privacy during online proctoring exams?", "0b4f9884-918b-4031-a687-d99338d8456c": "Are there any regulations in place to prevent insurance companies from using medical device data to deny coverage?", "6e60a744-5a8f-4a68-97f1-ddbd13855330": "How can consumers protect their personal data from being used for targeted advertising without their knowledge?", "d4c7738c-fc98-4fc6-b879-f6dc0a273084": "What are the potential risks of using predictive analytics in retail for both consumers and businesses?", "3ed724ad-1946-407a-990c-be9a281312fc": "How effective are current data privacy laws in protecting sensitive information in educational settings?", "76c3b003-88f0-4704-84f6-839b2a2119c4": "What are the best practices for companies to follow when collecting and using sensitive consumer data?", "587362e8-2dd6-4f47-98cb-003f14dac0ab": "How can individuals be more aware of how their data is being used by various devices and services they interact with?", "b3192f95-ad30-4882-b2d8-fa1e9be2394a": "How does the curing process help voters with disabilities ensure their votes are counted?", "22645ff1-03b4-424e-a146-2bbf328e966b": "What steps are involved in the curing process for voters who have changed their names?", "e87363bb-640a-4feb-9d9f-9d2e0f866665": "Why is the curing process important for voters with shorter or hyphenated names?", "149c1365-2c5a-4e3c-9c87-1105b2746f03": "How can voters confirm their signatures during the curing process?", "13982a84-3a75-4f82-a35a-e52df15dc29c": "What are common voting mistakes that can be corrected through the curing process?", "e7d71aac-05de-4e78-b73e-39d585314e96": "Is the curing process a standard practice in all states across the country?", "77de8575-cb53-451b-8b8f-7da0b09fd6ec": "How do election officials assist voters in the curing process?", "b624cd05-c166-4243-91a7-4a3f693f185f": "What happens if a voter does not correct their voting mistakes during the curing process?", "68a8f208-c093-4f42-afcc-c1e71dc344bf": "Are there any specific challenges faced by voters with disabilities during the curing process?", "a001c1a3-207f-458e-8638-0dbbd1a23b20": "How can voters ensure their votes are counted if they have made a mistake on their ballot?", "be2fad5e-2033-4a8a-9741-2b83774c1ac9": "What are the risks associated with identity theft for minors?", "93c0ee03-f986-4d51-b9b3-8367e10d9256": "Why is data about minors considered sensitive even if it's not related to a sensitive domain?", "9e6f708d-f474-42bc-8052-4762c8d80510": "What types of data are considered sensitive for minors?", "ba8b12ee-5d56-421f-ace2-208edb8cb51b": "How can identity theft impact a minor's future?", "3f587118-82b2-499c-ae3a-e1ec3142c71a": "What are some examples of sensitive domains that require enhanced data protections?", "b29a4d65-c495-480a-89f0-fc4e91aa0c70": "How do sensitive domains affect human rights such as autonomy and dignity?", "4e5b85e2-a644-40e2-8abe-4c376884d3eb": "Why is public expectation important in determining sensitive domains?", "27cee14e-d18d-48ea-a59d-8e81fb9daa11": "What are the potential harms of not protecting data in sensitive domains?", "b3dd37e2-835e-4821-94fb-6bf7178aa938": "How does the framework define sensitive domains in relation to existing laws?", "1d89baef-c455-48c0-8ef3-307b96c366e3": "What measures can be taken to protect data in sensitive domains?", "aa7473ee-3668-4669-ae62-9f1a1516a53d": "What are the potential impacts of surveillance technologies on individual rights and opportunities?", "fac36b7d-de59-49ac-bb84-23feb28373e8": "How can one ensure that their data decisions are respected in the context of surveillance technologies?", "4e0354ea-fffe-4a01-a4f1-d51ae008059a": "Why should surveillance technologies be avoided in education and housing?", "0fc325e8-7e09-4ea1-9db6-258a0ae665f4": "What kind of reporting should be available to confirm data decisions in surveillance contexts?", "7d295303-fcad-4c6b-8c7b-f3d3081439ac": "How can surveillance technologies limit access to opportunities?", "96eff3ab-89f0-4b9d-a269-f0357278d6bd": "What measures can be taken to assess the impact of surveillance technologies on personal rights?", "2d48e1ac-0686-4e2e-bd95-e2db7f8c5339": "Are there any guidelines for the ethical use of surveillance technologies in the workplace?", "0b0edbb6-e73f-4cef-a8d2-76c2d7e8176c": "How does data privacy relate to the use of surveillance technologies in various contexts?", "d3db021a-8f8a-4113-b5cb-0ef5de176095": "What are the risks associated with the use of surveillance technologies in education?", "6c3c2809-c142-4336-94b6-0cf80dba8956": "How can individuals protect their data privacy against surveillance technologies?", "ed5fe850-efcd-40d3-a5d3-0cdc40a8a5d6": "How can equity assessments help improve the safety and efficacy of systems for LGBTQI+ persons?", "1aa205a1-c8ec-44b5-99e2-66b9acede8d5": "What are the best practices for collecting representative and robust data for persons with disabilities?", "107537bf-1e76-4d1e-ba37-84a3eb8872c7": "How can qualitative evaluations be integrated into the assessment of systems for older adults?", "ed6516af-d770-452e-88cb-cb49fe224a96": "What methods can be used to ensure data is free from bias when assessing systems for persons living in rural areas?", "aaec1c72-545e-420c-83dd-44ee03eb5ac9": "How can persistent poverty and inequality be factored into the development of automated systems?", "4b3d37df-b2be-4662-b131-e87a2cfbd191": "What are the potential harms of using biased data in system development for marginalized communities?", "404568f3-40d8-42a1-a031-1c920e94bdb4": "How can we guard against the use of demographic proxies in automated systems?", "b41f2aca-4395-4b37-bfec-fdbe04578292": "What role does historical and societal context play in reviewing data for bias?", "14ac205f-14d8-45f2-b537-5aafa71096fe": "How can we ensure that data used in system assessments is sufficiently robust to identify biases?", "86ffb580-7422-420e-8dcc-d891924d573c": "What are the challenges in conducting equity assessments for diverse populations?", "7d0b283d-f144-4a82-b19e-c2112a913ed5": "How does data harvesting impact mental health?", "0fe16b30-1cf2-4712-a01d-4f6ebe321037": "What are data brokers and how do they profile communities?", "30bc10d0-919a-4251-8d8d-b540b171b181": "How can data harvesting affect democratic processes?", "86589cc8-2f3e-4292-a99e-3a491d7a3790": "What measures are companies taking to protect consumer privacy?", "7bf74331-98cb-4cc6-a850-92ab97ea0e1c": "How does data collection chill speech and protest?", "6a18c1c1-6ac2-497c-a1fe-11422ba30b35": "What legal protections govern federal government surveillance?", "0af459fc-739a-4dd4-8b24-8e56ff57fd4f": "Why is it important to minimize data collection?", "8d37a2d1-6bd5-4294-8e51-2cadcc1361ac": "How can improving security practices protect consumer privacy?", "d141a6e7-cd0a-4576-a702-58134cba4442": "What are the risks associated with data aggregation by data brokers?", "940504a7-7a40-4be4-be3c-175d09bb3ccb": "How does data harvesting breed distrust and anxiety?", "2c185373-e28c-400c-a25c-f72dfe3eaa57": "What are general fairness metrics in machine learning pipelines?", "35d3df9b-ca04-45d6-ba61-910781efea5a": "How can demographic parity be applied to business outcomes?", "9dc2b214-8912-4afb-9003-08f83e541c16": "What is the significance of equalized odds in ML fairness?", "a5eacbd0-3001-4167-8925-1156e278be54": "How do you measure the prevalence of denigration in generated content?", "fbb7532d-4e3e-41f8-8fa9-f7b5a02d5cf0": "What are some methods to identify harmful bias in GAI systems?", "a9a83f45-168b-4db5-ace9-45e7fc8c91b9": "How can custom, context-specific metrics be developed for ML fairness?", "3d684fd5-d832-42dc-b175-14f788a2e83f": "What is the role of domain experts in creating fairness metrics?", "fd5250fe-6121-425e-b4d1-c8e0bb203d19": "How can potentially impacted communities be engaged in assessing GAI systems?", "08d9cafd-06aa-44de-ba02-3e37b37a6b17": "What are the implications of harmful bias and homogenization in AI?", "98218ea7-de37-4779-8228-a2bbdb9ff990": "How can environmental ecosystems be affected by GAI systems?", "3a930566-71ec-465c-b726-c04b4278a904": "What are the key characteristics of trustworthy AI?", "f5324885-70fb-44aa-abcc-bacc277ed26a": "How does information integrity impact the trustworthiness of AI systems?", "48616ea8-c86b-4f86-aa5b-e94d1bf34068": "What does it mean for AI to be accountable and transparent?", "db5d6aab-79c3-433c-9530-afa6fd56678c": "Why is it important for AI to be explainable and interpretable?", "adb92956-0666-4e9c-83d6-139c568fedf1": "How can harmful bias in AI be managed to ensure fairness?", "00d0dc92-3f12-4a6f-97a5-9bfb57bdd801": "What measures can be taken to enhance privacy in AI systems?", "1391f61d-1d2c-486f-88ab-108403d31a1c": "What defines high-integrity information according to the 2022 White House Roadmap for Researchers?", "1b6fc2b6-eb9c-4f80-9435-aed7e4c8c00b": "How can the accuracy and reliability of information be verified and authenticated?", "b79e8099-38c5-4a4f-b636-f1054ae0ca4f": "What is the significance of having a clear chain of custody for information?", "3fdc8d07-1b55-425f-93f6-e66f1429df36": "How does acknowledging uncertainties contribute to information integrity?", "a9dc1f08-5014-4341-8489-8745d5cc3daa": "What are the best practices for logging and recording AI incidents?", "1b659988-353f-4357-8611-7249beebc92b": "How can change management records help in managing AI incidents?", "ddbf64e7-c5b6-434d-8d6e-31d1e5d33d28": "Why is version history important in AI incident management?", "4a4cbdcc-49b2-4fb2-8c24-812bca763c37": "What role does metadata play in the analysis of AI incidents?", "1c1c0ea6-3ed1-421c-a55f-e58d82db9618": "How can inconsistent access control affect the distribution of content through plugins?", "62ddbc5e-ccb4-4713-b294-20cbbd2eb010": "What are the benefits of regular information sharing among AI Actors?", "ffd63cbd-ea55-4aad-aeaa-0dbe6fa096c0": "How can insufficient access control be mitigated in plugin distribution?", "483b6c63-303b-4f23-8f34-5b22a4ec2765": "What documentation practices are essential for smoother information sharing in AI?", "ac189b5e-6349-44af-bf0f-702798289347": "How can AI Actors use change management records to respond to incidents?", "8a3d3981-24a8-498a-b509-25543fd6afeb": "What are the challenges of maintaining consistent access control in distributed plugin systems?", "638b3c99-e6cb-49bb-95c2-f267b2d1ec00": "What are the risks associated with the validity and reliability of GAI systems?", "f0977b5a-0de8-435b-bfee-e6a457db0442": "How do measurement gaps between laboratory and real-world settings affect GAI systems?", "d2628c21-64b0-4b61-a595-7097a4f233c5": "Why do current testing approaches for GAI systems often fail to assess real-world impacts?", "17c050f3-312d-46b8-b066-2bf66ab0142f": "What are the challenges in estimating the ecosystem-level risks of GAI?", "1116721c-2698-4938-a6bb-f96c2698380d": "How can prompt sensitivity and context heterogeneity exacerbate measurement gaps in GAI systems?", "68367977-e97f-4f35-a78a-63620dbd4fb8": "What is the role of structured public feedback in evaluating GAI systems?", "4770227b-6fd3-4a08-93e2-eac625aa3e2f": "How can structured public feedback help calibrate traditional measurement methods for GAI?", "a50d0a5a-b180-4209-b057-8a3ce7ca5c8a": "What are some examples of structured public feedback for GAI systems?", "e538cb20-92e5-427f-b9ac-cbe7f61c3654": "Why is it difficult to estimate the political, social, and economic impacts of GAI?", "108fa021-85ba-4c80-b094-bc316a5210da": "How can real-world conditions be better integrated into the testing of GAI systems?", "25bfeba9-9dba-40e4-ad97-8c839ca8c51a": "What methodologies can be used to evaluate biases in AI-generated content?", "67f472ad-fa27-4999-b17b-c75cccb0ef3b": "How can computational testing methods help identify stereotypes in AI outputs?", "f40897d1-e74b-4ae9-bed5-132e9360c38f": "What are some common biases that might emerge from AI-generated content?", "07c1cc8e-f86c-4899-82ed-bf9b470ddf20": "How can structured feedback input be used to assess AI content for harmful biases?", "e43daa73-3e5d-4c9e-a3ca-075dec799ed3": "What is the impact of homogenization in AI-generated content?", "ac6364d5-5041-446d-b5d7-e6d6fc9fa97b": "How can we mitigate harmful biases in AI systems?", "eae909c9-9200-428d-9d2d-0cf8ae39ea48": "What role does user feedback play in identifying stereotypes in AI content?", "e04d6574-6d9f-42a2-bf82-086109b4bfec": "Can computational testing methods alone ensure the integrity of AI-generated content?", "43545e6a-0aa6-4c7c-89f0-464c8c0cb1f9": "What are the challenges in evaluating potential biases in AI-generated content?", "9ad47d0f-1d88-4ffb-b427-4f8ecaeaee01": "How can we ensure that AI-generated content does not perpetuate harmful stereotypes?", "5612aea9-1a55-450a-830f-c05c88a34e6e": "What is AI red-teaming and how is it conducted?", "69417cf7-74ea-4fe6-bb4e-c4424621c4e4": "How can organizations collect feedback on AI model outcomes and user experience?", "85b1f1c5-fb0a-43ae-ab3f-703b8aa0663b": "What are the best practices for implementing feedback activities in AI systems?", "1d2c36f2-6923-4498-8504-c5c93c6eec30": "Why is informed consent important in AI research involving human subjects?", "4d3e7bb9-45c2-4918-ad20-9225ef78885c": "How does AI red-teaming help in identifying potential adverse behaviors in AI models?", "886bdf88-e7ed-48b2-8616-3b50743a28b8": "What are the human subject standards that organizations should follow in AI research?", "f6f13394-12e4-41df-8d73-8f8a59b625c8": "How can stress testing safeguards benefit AI models?", "7b479921-3a77-4f29-8406-2f7b03ab8878": "What is the role of subject compensation in AI research?", "c60ab7a5-60a1-49f6-b20a-5707d699cb8e": "How can AI red-teaming be performed in a controlled environment?", "32fe7223-3f84-4ef2-a178-0fba0a0e178f": "What are the potential risks and impacts of AI models in real-world interactions?", "19b3ef87-4631-4218-ba95-80b2c105535a": "What are the privacy concerns associated with social media monitoring?", "005c7c98-75a6-4242-878e-530c529e641f": "How do ankle monitoring devices impact individual privacy?", "1ba49c65-ac05-43c5-806b-21171820c4da": "What are the potential risks of using signature matching tools in voting systems?", "deebad53-75a3-40d9-83f0-b09ff89b0b35": "How do smart home systems collect and use personal data?", "04ad04d2-396e-4701-a6e9-2422c69accc7": "What are the privacy implications of systems that collect health-related data?", "d70da12d-584e-4e52-9eae-1f9e2f62e745": "How do education-related data systems impact student privacy?", "56387c3a-2259-48da-88f8-d5890f89f39d": "What are the concerns with using criminal justice system data for big data analytics?", "857b6f51-d048-46ae-85a7-5495bb30553f": "How do ad-targeting systems use personal information to build profiles?", "8ae7d54f-19dc-4a4f-9065-c5993f18db14": "What is algorithmic discrimination and how can it be prevented?", "fe8e6119-c018-4ce9-9b76-272740890ecc": "How do algorithms that detect student cheating or plagiarism work?", "b1a35db2-e2c3-411a-9942-84f2ce3db112": "What is the AI Risk Management Framework by NIST?", "985b4d2d-1090-4510-b623-7cd70b25fb56": "How does the NIST AI Risk Management Framework address AI risks and trustworthiness?", "a0eb8337-f777-4726-a13b-d7ec625951a6": "What are the key characteristics of AI risks according to NIST's AI Risk Management Framework?", "6c03ed54-6ab0-4045-a251-0dce83ca395f": "What are AI RMF Profiles as described by NIST?", "402b5f7a-b09f-487b-b5a9-72eced264252": "How can organizations use the AI RMF Profiles to manage AI risks?", "18dc0b62-513e-4205-8429-d0f22c15a613": "What tasks are described for AI actors in Appendix A of the NIST AI Risk Management Framework?", "8ce49ea6-adba-418b-a59c-9007b6ae81da": "How does NIST categorize different AI actors in their Risk Management Framework?", "f8ab2a45-69cd-4dc5-a52f-d340a7e463c6": "What are some common AI attacks and their mitigations according to NIST?", "5e9092a5-7669-45cc-af06-639af0388ab2": "How does the NIST AI Risk Management Framework help in improving AI system trustworthiness?", "d829fb79-fb10-4f92-b38b-e155bb3fcee3": "Where can I find detailed descriptions of AI actor tasks in the NIST AI Risk Management Framework?", "a023b48b-2c54-40ef-8767-5d3f03fe2914": "What are the key policies needed to oversee AI systems effectively?", "33fa9547-975a-4352-8f53-81d66f44c49f": "How can independent evaluations improve the oversight of GAI systems?", "a6fa8d04-3c9b-457d-88fd-dfa23ce40ff3": "What types of risks are associated with GAI systems that require robust evaluations?", "8f6d5636-40eb-47a8-ad69-b87dfebea48d": "How should organizational roles be adjusted across the lifecycle stages of large GAI systems?", "a71268a9-4141-4a0e-b1fa-0076d05cd42e": "What is the importance of test and evaluation in the development of GAI systems?", "691fbe00-89ad-498d-8d41-323cf1e97661": "How can harmful bias and homogenization be mitigated in GAI systems?", "e6e14601-db3d-4f7d-8676-d4eab8ecb25a": "What are the best practices for GAI content moderation?", "7068ff06-d301-4011-b487-7cfba94a4861": "How can increased accessibility of GAI tools and interfaces impact their oversight?", "e8dab0e4-d104-4378-a736-39d0227294b1": "What are the critical components of incident response and containment for GAI systems?", "aca5d4a8-7b84-4056-b0e6-e04d36ece68b": "How can information security be maintained in human-AI configurations?", "6897739c-d427-406f-ad1d-d7c4c77294d3": "What are the ethical concerns associated with text-to-image models?", "e0dacf35-3db5-49c4-9766-2c3dad84baab": "How can text-to-image models be misused to promote dangerous or violent messages?", "226859d7-3332-4379-8494-f26272702d7d": "What are the risks of GAI systems producing content that recommends self-harm or illegal activities?", "b791d661-0b16-4057-b391-7d89660c8a6d": "How do current systems attempt to restrict harmful outputs from GAI models?", "18041e17-7026-4c7d-9458-03b95bed2260": "What is \"jailbreaking\" in the context of GAI systems?", "efafd5fa-5e44-473a-89e5-972ae57c28f2": "How can \"jailbreaking\" be used to circumvent output controls in GAI models?", "f6f5b288-0e56-4759-a22e-70b60f145993": "What are the limitations of GAI systems in terms of data privacy and information security?", "06185446-c806-4fde-a2bc-435e67f172e3": "How can GAI systems be harmful in the context of CBRN information or capabilities?", "e23e338c-f249-4c47-9177-bde54d129a27": "What measures can be taken to prevent GAI systems from producing obscene or abusive content?", "20366931-36fc-4932-be26-4a2236da5849": "Why is it important to study the potential harmful effects of GAI systems on mental health disclosures?", "c02e5bf5-5f98-4e9a-9951-b476de779a36": "What are the best practices for applying organizational risk tolerances to third-party GAI resources?", "67fb8dc5-8f12-4f4c-b3dc-891c8219e312": "How can organizations effectively assess personnel credentials and qualifications for GAI projects?", "6df4dda4-63e2-45ab-82e3-2ad196f97ad3": "What are the key steps in performing background checks for individuals involved in GAI development?", "1626e1c6-eaaf-458a-9d62-6539ba4af917": "How can organizations filter GAI input and outputs to mitigate risks?", "ade6ed4e-ae4a-4354-9696-9be41db683dc": "What is retrieval-augmented generation, and how does it apply to GAI risk management?", "c37cf334-7c3f-4151-88b8-baf4569164a2": "How should organizations reassess risk measurements after fine-tuning third-party GAI models?", "8ccf5159-8cbc-4786-9c65-58c3df77213f": "What are the common risks associated with the GAI system value chain?", "91601426-b8bf-4362-b7fe-360c419ef972": "How can data poisoning affect GAI systems, and what measures can be taken to prevent it?", "822d55ee-6b63-4714-a957-a43dc8d843c9": "What are the implications of data privacy and localization compliance in GAI systems?", "284bee81-fb81-49a5-bbe0-49ef74483c20": "How can organizations ensure geopolitical alignment when integrating third-party GAI resources?", "bfa377f7-0ab2-4134-b646-98d6866d752d": "What are the main priorities in information integrity research and development?", "bd0492b6-4b03-4c9c-af31-313a235c4c93": "How does information integrity impact cybersecurity measures?", "8eebfbca-8240-4861-bd05-6a53ad7ccd8f": "What are the latest advancements in information integrity research?", "3ceb3870-78e8-4ec0-9491-241fccb93ec5": "Why is information integrity crucial for data security?", "7d63dee3-6d48-4393-8be6-8480ce6ced6d": "What role does artificial intelligence play in information integrity?", "e027c23e-9be3-40d9-a43f-99b9cc85df42": "How can organizations improve their information integrity practices?", "eaf96d9f-b112-43f1-ac2a-15c56c9e2ed8": "What are the challenges faced in information integrity research?", "4752b74b-3b45-4293-84a6-9129151862be": "How does blockchain technology contribute to information integrity?", "c98a9050-6d52-4601-8554-821c0659bcb8": "What are the best practices for maintaining information integrity in databases?", "015b7ed6-dfc9-419c-b332-8d22138cadfb": "How do regulatory standards influence information integrity research and development?", "b4881d3d-9b06-4fa0-94ca-6df2782ac2fd": "What are the minimum thresholds for performance or assurance criteria in deployment approval processes?", "c187cc72-ec40-4262-a0a8-dd0a7d8236a5": "How often should the reviewed processes and approval thresholds be updated to reflect GAI capabilities and risks?", "ad51c1b1-347d-4f10-b264-363ddcdfa938": "What is the significance of establishing a test plan and response policy before developing highly capable models?", "a7fc40f7-d119-4980-b911-ad106e77b51e": "How can one evaluate if a model may misuse CBRN information or capabilities?", "102600d4-12bd-4196-b867-7b5b9f746130": "What are the key components of a \"go/no-go\" policy in deployment approval?", "307c1ef5-e97b-48be-a89d-57c5db8c3e6b": "What procedures should be followed to ensure information security when dealing with CBRN information?", "761bbb97-b978-43a2-8f4c-50adc790fd0b": "How can confabulation in models be detected and mitigated during the deployment approval process?", "9e565cf7-9f25-43f9-9c04-55f723fcfde1": "What are the risks associated with dangerous, violent, or hateful content in highly capable models?", "b14dae2d-32f4-43d6-80c9-6f7752b5ddba": "What steps should be taken to periodically evaluate offensive cyber capabilities in models?", "b976da61-57a7-4e5f-a8f0-1328f4cd25f4": "How can one ensure that the test plan and response policy are effective in preventing misuse of CBRN information?", "a21ab356-0089-4dbf-b788-2b90e85ee801": "How do remote proctoring AI systems impact students with disabilities?", "b444448c-39c6-4825-a428-c35b6541fd7a": "What are the concerns of the National Disabled Law Students Association regarding AI in remote proctoring?", "af11b4d9-49c2-4c1e-a752-6ee9983d2405": "How do healthcare algorithms contribute to racial disparities in patient care?", "667c8aaf-0338-4557-83a6-11d3be7ef5b8": "Why do AI systems in healthcare assign lower scores to Black patients compared to white patients with similar conditions?", "51f1057f-cd68-4ea1-bf25-3d2e6c150aad": "What are the implications of using sociodemographic variables in clinical algorithms?", "efd5daa3-ad2f-403d-9faa-cfc0f273c6bc": "How can AI in healthcare be improved to avoid race-based health inequities?", "cb42a96f-6b88-4a1d-933c-4b66bf56803e": "What specific disability-specific access needs might cause AI proctoring systems to flag students as suspicious?", "54e478ee-901e-4bfd-b77b-ae6dbf060187": "How do clinical decision-making algorithms affect healthcare outcomes for different racial groups?", "0c29d394-6d79-42ea-ac56-98ef3e0c1ddd": "What steps can be taken to ensure AI systems do not discriminate against individuals with disabilities?", "334d816c-6309-49be-8b84-552c577f1ecc": "How do sociodemographic adjustments in healthcare algorithms impact patient treatment and outcomes?", "ef292182-a80e-4c2e-8112-92ec138b322a": "What mechanisms are typically used to supersede or deactivate AI systems that are not performing as intended?", "9a613a68-8a96-44f3-924a-fd86cb2bd642": "How can responsibilities be effectively assigned and understood for managing AI system deactivation?", "21861de8-59d7-4dbe-a656-594afc0c20c0": "What are the key components of a communication plan for informing stakeholders about AI system deactivation?", "1744f37c-51db-4c5a-b1a0-80edb5bd1d47": "Why is it important to inform AI stakeholders about the reasons for deactivating a specific GAI system?", "4bc70be6-98e2-4044-81b3-a337497b444d": "What are some common workarounds provided to users when an AI system is deactivated?", "73b8b746-f1dc-41ac-8aec-5465b68d6593": "How should user access be managed during the deactivation or disengagement of an AI system?", "55b0124a-dcc6-42f6-b033-337a237adf54": "What alternative processes can be implemented when an AI system is deactivated?", "96eddcb3-606d-4fcf-8ecb-1aabb6410965": "What contact information should be included in communication plans for AI system deactivation?", "7c0b2c60-6f69-4f22-bb40-b69c5ea6020d": "How can organizations ensure that open-source AI models are properly deactivated or disengaged?", "fc8a6a87-f230-4bb1-bcbc-b84d5e624924": "What are the risks associated with not having a proper deactivation plan for AI systems?", "586aeb54-40fd-4a75-a7c0-246e11eb3ae8": "What are the common causes of sensitive data leaks in organizations?", "aa626c2d-93f9-4ce6-a685-92c1a419dc4e": "How can companies ensure that their data sharing practices do not present a sensitive data risk?", "3e4a7bfc-a423-403b-aecb-603a13cbe16b": "What are the best practices for conducting ethical pre-reviews of data?", "dccc7390-abc6-409c-835a-9ebe4f7d0b1d": "How should organizations report the outcomes of their ethical pre-reviews?", "731906bd-9376-465c-885b-47262c0ac060": "What types of data are most commonly sold, shared, or made public by companies?", "a869d0fb-d027-4edc-be16-ca89dad5107d": "How can organizations assess whether their data sharing practices are ethical and safe?", "05fba6f9-2740-4b27-beb4-8b707e3fa913": "What ongoing risk identification and management procedures should companies implement?", "14a3553d-b4f0-4a82-82fd-f4ded8de7679": "How can companies mitigate risks associated with data sharing and public disclosure?", "53598710-70e3-4180-bfa6-20045b17b328": "What are the key components of a clear and machine-readable data reporting format?", "d156ee61-a192-4285-85eb-3e0f4cd4eb13": "How often should organizations update their risk management procedures to address new threats?", "6aceb768-3368-49cb-90f3-790b8d4131a3": "What are the main techniques used for provenance data tracking in digital content?", "13caf8a9-5882-4525-8431-f068b49ab01c": "How does digital watermarking help in tracking the authenticity of digital content?", "0a4c99ed-0f3b-4ec0-b9c8-91d7a35b593c": "What role does metadata recording play in provenance data tracking?", "7a7a8144-8fa6-4ca8-a505-9a5781f7850e": "Can digital fingerprinting be used to verify the integrity of synthetic content?", "cb3616bd-f4d8-4795-866d-04c8fe1a8b49": "How does human authentication contribute to provenance data tracking?", "77e9d110-209d-445e-8b11-16e0f2209022": "What is the importance of tracking the origin and history of data inputs in GAI systems?", "ad18f30e-051e-4a17-aa6a-1192c3cac7a8": "How can provenance data tracking assist AI actors who lack full visibility across the lifecycle?", "4d0345e3-6423-4930-96b7-564bf85f7af2": "What are the differences between overt and covert digital watermarks?", "af6f7cb4-a265-408b-a84b-4b3fee1fde06": "How does provenance data tracking help in protecting intellectual property rights?", "0718d3de-69b5-45f0-942f-79426b4d5ad4": "What are some potential manipulations that provenance data tracking can detect in digital content?", "3e3a23a7-0b1f-4b74-85bd-a73370b108e6": "What are the main findings of Zhang et al (2023) regarding human favoritism and AI aversion in persuasive content generation?", "c4cf1592-0435-4ae4-8a4c-c5ecb9257eae": "How do people perceive generative AI compared to human experts according to the study by Zhang et al (2023)?", "d6c51c91-05a7-402e-893e-5fc29584968b": "What is the significance of human-GAI collaboration in persuasive content generation as discussed by Zhang et al (2023)?", "e0587b4e-ddbf-4754-8557-bd12c8e644da": "Can you explain the concept of \"hallucination\" in large language models as surveyed by Zhang et al (2023)?", "2544321b-4f79-413c-9a51-2b0b5346a1b3": "What are the key points from the survey on hallucination in large language models by Zhang et al (2023)?", "9011cc3e-7c14-4371-99d2-06a06ec529e7": "How does the study by Zhao et al (2023) propose to implement robust watermarking for AI-generated text?", "0048257b-c29b-4594-829c-28fc43d6a999": "What are the benefits of provable robust watermarking in AI-generated text according to Zhao et al (2023)?", "99ba8f01-a9f5-4945-bc6b-2c290ee16f8f": "How does the research by Zhao et al (2023) contribute to the field of AI-generated content security?", "78f31d73-92bf-4caf-ade9-8bed953bdfa5": "What methods are suggested by Zhao et al (2023) for ensuring the robustness of watermarks in AI-generated text?", "33b17f33-82da-409e-8c46-2ba705142a99": "How do the findings of Zhang et al (2023) and Zhao et al (2023) complement each other in the context of AI-generated content?", "8d45f0a9-2470-406e-845a-42ae6faefaaf": "What is the NIST AI 600-1 publication about?", "e1b36541-9713-4537-8ae0-68c508ee31cb": "How does the NIST Trustworthy and Responsible AI framework address AI risks?", "9761ef9d-05ad-4eb6-84fe-a3e40e15721f": "Where can I access the NIST AI 600-1 document for free?", "6f912bc7-a00d-4201-a5ed-67ad675fb196": "What are the key components of the NIST Artificial Intelligence Risk Management Framework?", "d653daf1-bc71-4825-b8c2-d7a2bbbad861": "How does the Generative Artificial Intelligence Profile fit into the NIST AI 600-1 framework?", "c65c29ab-b699-4074-98c3-1d194961096b": "What are the main objectives of the NIST Trustworthy and Responsible AI guidelines?", "2a5dd66d-4d91-48e5-b4d1-0e97ead849bc": "How can organizations implement the NIST AI 600-1 framework in their AI projects?", "7ce28472-4d89-4da8-9e89-4fc46c2a13c5": "What are the benefits of using the NIST AI 600-1 framework for AI risk management?", "afa13ecd-3449-42b8-90ba-ea5b76fb4649": "Are there any case studies or examples included in the NIST AI 600-1 publication?", "83bfcfa0-0f58-42c0-b0fe-8d24efe51b48": "How does the NIST AI 600-1 framework ensure the ethical use of AI technologies?", "efd3986d-bd53-4b00-8759-234ee59b5935": "What is the new initiative announced by the Justice Department to combat redlining in 2021?", "b9552078-c814-4194-ad10-5170b3d76f76": "What are the key objectives of the PAVE Interagency Task Force on Property Appraisal and Valuation Equity?", "24d775f9-adea-4036-a91b-2b8dbdf09f8d": "How does the PAVE Action Plan aim to close the racial wealth gap?", "dbef7a7c-d59a-44a9-85d0-0922c437187c": "What are the main concerns addressed by the EEOC regarding the use of software, algorithms, and AI in assessing job applicants and employees?", "06d81d53-f1b8-45b1-9d99-aad6159cf2f1": "How does the Americans with Disabilities Act relate to the use of AI in hiring processes?", "3e1a2639-ad92-41c2-b64f-74852b3978d5": "What guidance has the US Department of Justice provided on algorithms, artificial intelligence, and disability discrimination in hiring?", "137541ed-d9cd-4d7e-b4eb-09e87ee778a6": "What are the potential risks of using AI and algorithms in employment decisions according to the EEOC?", "f54b5a57-e54a-4eb4-b5ea-3861f01c7a4a": "How does the PAVE Action Plan propose to address mis-valuations for families and communities of color?", "dc89246f-d70d-485b-a723-08770bd02f19": "What steps are being taken by the Justice Department to address redlining practices?", "9a719c6a-76c4-4af6-ae8c-1028bbe8568e": "How can employers ensure compliance with the ADA when using AI and algorithms in their hiring processes?", "2f7c682f-8e96-43da-8397-67ce42d16885": "What are the main concerns regarding the use of biometric technologies in the public sector?", "fdb90e56-c634-47ef-8f82-cf244064da4f": "How does the private sector utilize biometric technologies differently from the public sector?", "03bc26cf-d584-4f6c-952f-6da051e2ec27": "What were the key findings of the OSTP\u2019s Request for Information on biometric technologies?", "b36444b8-7e2e-4cc0-923b-d70acd084d36": "How does the National Artificial Intelligence Initiative Office contribute to the governance of biometric technologies?", "a8d11191-6908-46c0-9ccd-5526ac9ac777": "What are the potential privacy issues associated with biometric technologies?", "4e4d21bb-0a0d-49fd-948f-d1510ecdda24": "How can biometric technologies improve security in public and private sectors?", "4ba397c0-1b13-4613-9e30-01238dcdb761": "What are the ethical considerations in the deployment of biometric technologies?", "c38be416-9682-4d97-8adc-600dfa5d5218": "How do public opinions vary on the use of biometric technologies in different sectors?", "d8c53a02-255b-4261-813a-b50fa08cd969": "What are the recommendations from the Science and Technology Policy Institute regarding biometric technologies?", "7568d3e5-b407-4b82-a31a-2d7048871862": "How is the governance of biometric technologies evolving in response to public input?", "fe6be310-abb6-4d17-b332-42b573870741": "What are the key expectations for automated systems in terms of accessibility?", "4da20bc3-35b4-40d8-8699-3a01d413ac86": "How can organizations ensure that their automated systems are accessible to people with disabilities?", "e3b5fbce-4994-4bee-bfde-419e661a8a25": "What types of disabilities should be considered when designing automated systems?", "e6f6d4ff-5bc4-4809-aa55-4639adf8fbe5": "What are some relevant accessibility standards that should be adhered to during the development of automated systems?", "f4c81b62-7898-4380-bbe0-2e9ef2568424": "Why is user experience research important before and after deploying automated systems?", "1270cceb-5232-47d9-b785-280f19ce1fb4": "How can organizations identify and address accessibility barriers in automated systems?", "77d31311-acc9-4a0a-9ff7-8cff6cda43c8": "What is the importance of disparity assessment in automated systems?", "6409d026-b4de-4585-b70c-a6aa6203ecdb": "How should automated systems be tested to ensure they do not produce disparities?", "e6682ed3-e17b-47ae-9d07-f50e8f6c2df3": "What measures can be taken during pre-deployment testing to assess disparities in automated systems?", "13d62abf-9a46-4079-9a23-4ed18c3d22a9": "How can in-context deployment testing help in identifying disparities in automated systems?", "e4777246-3922-49ed-a2cb-5205495a2534": "What is the definition of equity in the context of fair treatment?", "0f7bdfbc-7ab6-4861-8efd-a3729e4128ad": "How does equity address the needs of underserved communities?", "3fbd4df7-eb9c-4760-afb0-2f341544e453": "Which groups are considered underserved communities in the context of equity?", "9861991b-cae5-4ecc-952d-0c2a6be10285": "How does equity ensure fair treatment for LGBTQI+ persons?", "4219fb8a-0ac8-4ecb-9d80-7ff321f4cd59": "What role does equity play in addressing systemic inequality?", "74b3d860-da7f-439b-8e61-5c69ac15c28b": "How are rural residents included in the concept of equity?", "bf920e1a-ff07-4d59-b892-8609aa91fdce": "What is meant by \"rights, opportunities, or access\" in this framework?", "19f1343a-292b-4bb5-8567-c39e272ec0ae": "How does equity relate to civil rights and civil liberties?", "8e6fc90c-607d-4e5f-8946-0f2b984a4975": "In what ways does equity impact older adults?", "e2f0625f-8931-45c4-b593-bb284f09ddd2": "How does the concept of equity address persistent poverty?", "7a5e1a08-397a-440e-8f6c-1af66ee702d0": "What are the key characteristics of trustworthy AI that should be integrated into organizational policies?", "75e53249-b463-44d5-83be-bdd902fc92ce": "How can organizations establish transparency policies for documenting the origin and history of training data in AI applications?", "aa76d60e-073a-408e-bf7f-44ee6a8d0a9b": "What are the risks associated with data privacy and information integrity in AI governance?", "66ee0108-3649-4abc-95cd-c102ba9c3451": "How can organizations balance the proprietary nature of training approaches with the need for digital content transparency?", "67c464c5-77f9-4875-8e31-8402a617f457": "What policies should be in place to evaluate the risk-relevant capabilities of AI before deployment?", "2b18ac77-f66e-449f-bb8c-641d6d6e522c": "How can organizations ensure the robustness of safety measures in AI applications through internal and external evaluations?", "a8055010-820e-4e2a-acbd-4363d93ffeef": "What are the specific risks related to CBRN information or capabilities in AI governance?", "b1c0e6a1-f07d-4e7e-a97c-2264522b8bcc": "How can organizations determine the needed level of risk management activities based on their risk tolerance?", "421c1f70-89d3-45a7-92c0-6b6fd7393f78": "What are the best practices for integrating trustworthy AI characteristics into organizational processes and procedures?", "41710f51-0c31-4835-a239-bba06e0b5003": "How can organizations manage intellectual property risks in the context of AI governance?", "e9ef61d9-acfb-4295-8166-81d2d8e29cd1": "What is the Department of Defense Responsible Artificial Intelligence Strategy and Implementation Pathway?", "63251280-da3b-4d49-9e3b-8b79726b8e55": "Where can I find the Department of Defense's AI strategy document released in June 2022?", "c739a52b-1410-4168-82eb-52163e1c9cc2": "What are the key principles of Artificial Intelligence ethics for the Intelligence Community according to the Director of National Intelligence?", "7da3dff9-04d3-4cea-814c-cd1c71e8f2fa": "How does the Department of Defense plan to implement responsible AI practices?", "70a71142-ccc1-46c3-85cb-f7e5573d1ea0": "What ethical guidelines has the Director of National Intelligence set for AI in the Intelligence Community?", "b1801b81-b269-4946-9f2f-f3bd71107763": "Can you provide a summary of the Department of Defense's AI strategy and implementation pathway?", "22b9c165-4460-449c-a296-6369c392e632": "What are the main objectives of the Department of Defense's AI strategy released in June 2022?", "f40a3247-588b-4f4a-8c27-0234b1af2a95": "How does the Intelligence Community ensure ethical use of AI according to the DNI's principles?", "55315bb2-f065-44ae-bc39-ffa0fe34eb65": "What are the challenges mentioned in the Department of Defense's AI strategy document?", "f2e63c22-7347-47c9-b0a7-ff7f363ad3c1": "How can I access the full text of the Department of Defense Responsible Artificial Intelligence Strategy and Implementation Pathway?", "9128749a-0d1d-42e5-94c3-9b820c40125b": "How does automated test proctoring software discriminate against disabled students?", "aab3924b-582c-4dc6-9118-49c1c8b51b5a": "What are the main findings of Ziad Obermeyer's study on racial bias in health management algorithms?", "aa7f061b-c839-40ea-ba44-f24f4ab55467": "What are some examples of discrimination faced by disabled students using automated test proctoring software?", "bd8aaa1f-1382-47e3-9ce1-09f9e7164ebc": "How can automated test proctoring software be improved to be more inclusive for disabled students?", "048678e4-9c45-4540-a0c6-72c404e2106b": "What impact does racial bias in health management algorithms have on patient care?", "51ba72f1-b32c-479c-820e-2d02252ecc21": "Are there any legal protections for disabled students against discrimination by automated test proctoring software?", "4ec83564-814d-4175-8f0f-704d3e58d2f7": "What methods were used in the study by Ziad Obermeyer to identify racial bias in health algorithms?", "3569ca2c-24e6-4785-ba6e-317a3d302202": "How prevalent is the issue of discrimination in automated test proctoring software?", "2869107a-ce0c-41b3-8508-c60d12c31fe7": "What are the ethical implications of using biased algorithms in healthcare management?", "cc253ba2-15d7-47dc-a233-8bc4a26cdedc": "What steps can educational institutions take to ensure fair testing conditions for disabled students using automated proctoring software?", "153cebaa-730a-43c5-99b7-10c18bec1538": "What are the best practices for managing risks associated with Generative AI (GAI)?", "dba1f4e2-98a6-4d42-b384-1a54e1d6ad1c": "How can organizations govern risks across various stages of the AI lifecycle?", "becc940f-e73e-4aa7-8581-f1ba6def41f0": "What are the common risks associated with the use of large language models (LLMs)?", "7a90fb5d-f437-4b59-9931-c0af0ec35339": "How can cross-sectoral profiles help in managing AI risks?", "43e9e772-adf1-40b7-9b1a-9fc44772e900": "What are some suggested actions for organizations to manage risks in AI?", "c2a5af10-b86f-4924-8010-b80c2782b006": "How does the AI RMF profile help in mapping and measuring AI risks?", "6e36882c-0b7c-4f22-a9d0-560ddc1859f5": "What are the novel risks introduced by Generative AI?", "7e0b3a69-208b-4140-8f56-b1041396d409": "How can cloud-based services impact AI risk management?", "7968a9ac-bfaf-4a59-bb35-1125136d6a96": "What are the risk management priorities for Generative AI?", "c942dcb0-ebf6-4dc8-aef1-d3b7642ef3c5": "How can organizations use AI RMF profiles to manage risks in business processes?", "aab11cf2-1200-420c-a291-040945c00aad": "What are the best techniques for managing statistical biases in GAI content provenance?", "96ade2c4-dc96-4d8d-ab36-a0860916d265": "How can re-sampling and re-weighting help in reducing biases in AI evaluations?", "1b1d4db6-e59f-4967-9a3b-17ad799ac4b4": "What is adversarial training and how does it mitigate biases in AI systems?", "ca0cdce9-6039-460a-8ebc-0d2da442b1a8": "How should content provenance data be documented to ensure privacy and security?", "0df51587-f84b-43eb-9c03-341d92892edd": "What are effective methods for anonymizing data to protect the privacy of human subjects in AI evaluations?", "add464c4-0e7d-4348-91dc-15a25c5a309d": "How can privacy output filters be leveraged to enhance data privacy in AI systems?", "36e32c02-9ee0-485c-8d4c-110273810e28": "What steps should be taken to remove personally identifiable information (PII) from AI datasets?", "877d24bb-418d-4075-b1e9-a626881ec675": "What are the risks associated with not managing statistical biases in AI content provenance?", "3da8d02e-d37f-4c4d-a06c-1f61edd78d4b": "How can human subjects withdraw their participation or revoke their consent in AI evaluations?", "841e4207-02f3-4f15-b3cc-0574eccf296b": "What are the potential harms of not protecting human subjects' privacy in AI evaluations?", "b4cfa32e-3647-4808-9718-9b77c8956ff5": "What is the importance of assessing notice clarity and timeliness in user notifications?", "34a1099f-5eb1-4cdb-bd14-0904a0d6883c": "How can the validity and accessibility of explanations be evaluated effectively?", "baf6bb09-9e41-48d9-b4c6-1d06d90b9bb2": "Why is it crucial to assess the level of risk in system impacts and inferences?", "fd7da839-a0e8-4f92-a7fd-2e894b6e5c04": "What are the best practices for tailoring explanations to different recipients and purposes?", "a81b08ef-f745-43dc-8405-02f77e2ebee0": "How can individualized profile information be made readily available to users?", "a3e89095-24b0-4e5b-957d-ac24b193a418": "What methods can be used to ensure that reporting is clear, plain, and machine-readable?", "bf278d53-8d4e-44d1-a215-6464076e0e89": "How does the assessment of notice clarity impact user trust and understanding?", "52b91d4f-d324-4333-8d00-e513e1af51b5": "What factors should be considered when evaluating the timeliness of notifications?", "e8955df1-dc4d-47af-b782-993b5b795595": "Why is it important to provide explanations for system impacts or inferences to users?", "5cbad06d-9274-443a-8465-7a7347fd1bb8": "How can organizations ensure that their explanations are tailored to the appropriate level of risk?", "098c6a18-502f-48ec-999e-13d995b28661": "What is the Blueprint for an AI Bill of Rights?", "699f9227-0a04-4b34-9fbb-bcdabe65806c": "How does the Blueprint for an AI Bill of Rights inform policy decisions?", "da24d51e-024a-422a-976c-cbe53754d0ff": "What is the role of the White House Office of Science and Technology Policy in the context of AI?", "d984a686-20d3-4998-bddb-d15ef2a55d67": "How does the national values statement and toolkit guide the responsible use of automated systems?", "7769d886-01e5-43d6-8808-a34ed6787c48": "What are the key principles and frameworks published by consortia for automated systems?", "3f2e20af-2d2f-40e2-9c9d-5f447f5fd470": "How does the framework address sector-specific privacy laws and oversight requirements?", "8cc369e6-5dbf-4195-bdc7-59d477935e01": "What kind of input has the White House Office of Science and Technology Policy sought from the public?", "2a846355-ce0a-4a42-bf39-c58e28255baf": "How are impacted communities and industry stakeholders involved in the process of addressing algorithmic harms?", "97341ca2-a8ed-46fd-8b73-27cf49a776db": "What are the potential remedies for algorithmic and data-driven harms discussed by the White House Office of Science and Technology Policy?", "ed70d285-bd85-4086-a63a-22c25a560082": "How can policymakers use the national values statement and toolkit in the technological design process?", "8c7fb32c-c935-4651-8283-e8c472a0b365": "What are the key civil rights and liberties protected under the Blueprint for an AI Bill of Rights?", "78c6ff01-8630-4cd4-a808-4a934eacb25f": "How does the Blueprint for an AI Bill of Rights address issues of unlawful surveillance?", "cd574726-80a1-47a9-b260-413ed3f52311": "What measures are suggested to ensure equitable access to education through automated systems?", "85722140-1b7b-46da-9ec6-432923b12175": "How does the Blueprint propose to protect individuals from discrimination in employment using AI?", "eded288b-f225-4e3b-be1d-6732eeceebb3": "What guidelines are provided for ensuring privacy in both public and private sector contexts?", "d91e1537-de8d-491c-b125-765de2f2afe8": "How does the Blueprint for an AI Bill of Rights ensure access to critical healthcare services?", "c91b6fc5-1d16-48e6-b88c-8dd1e3af6559": "What are the recommended practices for providing non-deceptive information about goods and services?", "3cd4fd6f-9208-4ceb-a69d-dc920993a98d": "How does the Blueprint address the issue of excessive punishment in the context of automated systems?", "e138b6b8-2968-4bab-8d4a-edca29460bcd": "What protections are in place for voting rights within the Blueprint for an AI Bill of Rights?", "1c59e241-7137-41cc-946d-c1cb0d30a4b1": "How does the Blueprint ensure fair access to financial services through automated systems?", "a4b6ef42-b7bd-444a-970f-c4708415a0ea": "What is NIST Dioptra and how is it used to evaluate GAI trustworthy characteristics?", "6b536f29-aa04-4434-b133-bb4e20ecd6f1": "How can data privacy be ensured when using AI systems in CBRN information or capabilities?", "bde91ad8-b00a-4c6b-ab35-cbc33641a8a4": "What are the risks associated with confabulation in AI systems?", "6fdb5ca1-7096-4613-ad70-ef413dcf51b2": "How can information integrity be maintained in AI deployments?", "5d29075f-46e0-4828-b712-657535007fca": "What measures can be taken to prevent harmful bias and homogenization in AI systems?", "3b68fd1d-60d7-4479-bc67-e72dc34b2baf": "What are the key tasks involved in AI deployment and TEVV (Test, Evaluation, Validation, and Verification)?", "b8bcfc18-dfb7-45f2-94fb-686ffb671400": "How can the limitations of generalizability of AI systems be documented effectively?", "448bc715-abc3-4d6f-a386-a2e6c48b434f": "Why is it important to avoid extrapolating GAI system performance from narrow assessments?", "35077bce-fc70-4f73-9e85-bd50ea0b4421": "How can human domain knowledge improve GAI system performance?", "b299ae9b-dd09-4132-a8d7-685ac966b912": "What are some methods to document the use of human domain knowledge in enhancing AI systems?", "26969f38-1ae0-4596-8d89-e2796ab42cc5": "What are the trade-offs involved in early-stage model decisions for AI systems?", "4fcf7347-858d-4761-95f8-19fb1af11339": "How does selecting a watermarking model for robustness impact computational complexity?", "589055f4-8d10-4b06-b68f-9cfd4048cefe": "What is the importance of tracking the provenance of training data in GAI systems?", "40defdf1-6880-44b7-a1ac-b88f466b074a": "How can documenting provenance data limitations benefit GAI systems?", "8f3e8e77-90af-4d0d-8dd3-5d5f2e614d74": "What are the cascading impacts of early-stage model decisions on downstream performance?", "ca4886da-af0a-420f-b881-1207c2a9181b": "Why is it important to manage organizational risk in enhancing content provenance?", "c13ec0d2-6f92-4b7d-8743-62c761213c6e": "How does prioritizing robustness in watermarking models affect other aspects of AI performance?", "b1ec710f-242e-4b53-a427-76970b24a311": "What methods can be used to track the provenance of metadata in GAI systems?", "edc582fd-2c53-4a88-ac44-3c229de7085a": "What are the potential risks of not documenting provenance data limitations in GAI systems?", "f9e0f746-0407-4024-882b-ca132aac8938": "How can organizations balance the trade-offs between robustness and computational complexity in AI models?", "547e953c-3c15-4ee0-b243-3c2761a19028": "What are the main civil rights concerns discussed in the Shared Statement Of Civil Rights Concerns document from July 30, 2018?", "8787fe19-eb36-4f63-8355-085d09e9c966": "What is the focus of Idaho Legislature's House Bill 118 passed on July 1, 2019?", "4558a005-edd2-4a8a-a3a1-96e8a91bdc99": "What are the key findings of the Executive Office of the President's report on Big Data and algorithmic systems from May 2016?", "905133de-f72a-42a8-9a2f-e194bee80d01": "What are the main arguments presented in Cathy O\u2019Neil's book \"Weapons of Math Destruction\"?", "0093d7ea-7e1a-4cad-a33f-26434d5e9417": "What is the central theme of Ruha Benjamin's book \"Race After Technology: Abolitionist Tools for the New Jim Code\"?", "d802150d-0458-44ad-94e6-d48a2bafddc7": "How has facial recognition technology led to wrongful arrests, as discussed by Kashmir Hill?", "3dec342d-89f1-48ea-b474-124dabac2778": "What are the potential civil rights implications of pretrial risk assessments?", "26e9fdf4-b265-44ac-aab5-a4811434f00b": "How does the use of big data in criminal justice impact civil rights, according to the 2016 report by the Executive Office of the President?", "e9a896b0-946f-4635-90a0-2abae8017e85": "What are some examples of algorithmic discrimination mentioned in the provided context?", "2a1a746c-d31f-4062-9339-f652ee1cd0fc": "How do the books \"Weapons of Math Destruction\" and \"Race After Technology\" address the issue of racial bias in technology?", "4adc70dc-e01b-4a8d-bd8c-adef4690717c": "What are the benefits of having a human fallback system in automated processes?", "81c46268-ad7f-45a4-ab44-b39eec645df2": "How can users with disabilities request reasonable accommodations in automated systems?", "6c21060a-4d83-46b3-a846-697b9a09c91b": "Why is it important to test automated systems for accessibility for users with disabilities?", "7753fc6c-a2dd-4e2f-9694-1ad7f0d6c64f": "What mechanisms can be implemented to ensure human fallback is not burdensome?", "ed7e713d-1382-464c-aaa9-eb64bdc88fbc": "How can companies ensure that their automated systems are accessible to all users?", "26951caa-8dbb-4149-81de-40d042c190ba": "What are some common issues users face with automated systems that require human assistance?", "19fd9fc0-5705-4c8f-9471-301741097255": "How can human consideration improve the user experience in automated systems?", "1d8d1efe-bee3-41c4-a9a1-549978df9821": "What steps can be taken to make human fallback mechanisms more convenient?", "c886eb12-d921-44b8-8452-f1f5bc1225f4": "How do automated systems typically handle requests for reasonable accommodations?", "0a84141f-d8b5-468a-a16b-93a9a0a47587": "What are the challenges in balancing automated systems with human fallback options?", "42804d52-5b54-4e4a-aa0b-0f295b8b73bb": "How can systems be tested to ensure they handle inappropriate or malicious queries effectively?", "beed6021-c77f-4dd5-b636-dddc90934522": "What are the best practices for evaluating AI system vulnerabilities regularly?", "7f734e3f-9eb5-4d60-99e6-c12b8c79fe06": "How can AI systems be protected from being used for extortion or targeted impersonation?", "b5c02363-613b-4083-a3a5-ae724917ecbb": "What methods can be used to prevent AI systems from facilitating cyber-attacks?", "84e252fa-dec3-4ec1-9b3d-a0e88f6a8b55": "How often should safety measures in AI systems be reviewed and updated?", "fd9556ae-448f-4d3d-af10-8089d6b043b8": "What role do domain experts play in the operation and monitoring of AI systems?", "e580a445-024f-4b81-a591-c79fa6f7fb0a": "How can AI deployment be managed to minimize the risk of illegal usage?", "ee7f5729-19f1-41d3-ba40-12390626207d": "What are the key components of an AI impact assessment?", "0b0819ad-858f-4cc2-9a1c-41f9c77d93d6": "How can TEVV (Test, Evaluation, Validation, and Verification) be applied to ensure AI system security?", "3850de5f-db60-43a0-94d4-63536424c707": "What strategies can be implemented to prevent the circumvention of safety measures in AI systems?", "dcec4fe7-764a-4c8a-b07f-90fcbb7708ba": "What are examples of time-critical systems in various industries?", "e9fb9f53-24c3-4408-8e9d-508409b7f92e": "How can organizations ensure that new decisions are effectively enacted in automated systems?", "1c6c655d-fa19-4fa2-8143-ea52a0a44611": "What safeguards can be put in place to prevent future errors in automated decision-making systems?", "97179ce5-9493-4f29-90be-01818f234c9c": "How do automated systems in healthcare impact patient safety and care?", "10be3d49-59a6-4675-8216-5d2a7bb76336": "What are the consequences of errors in automated systems that control financial penalties?", "1b29da2d-0fb6-41d6-8e7a-38493f5b169f": "How can voting-related systems be protected from errors and ensure accurate results?", "43d4dced-d40b-4a55-9481-6f383f7be02f": "What processes should be in place for human decision-makers to reassess and overrule automated decisions?", "5f442e2b-07cc-469f-ac6d-9e468c48364d": "How can automated building access systems be made more secure and reliable?", "ed225928-3594-4648-8270-ea357b99ac8f": "What are the best practices for maintaining human consideration and fallback processes in automated systems?", "2ac2ed73-c6e1-4af9-8a79-5c62a839798c": "How can organizations overturn previous repercussions from incorrect automated decisions?", "fb32bc70-21c5-4841-beea-62de5c884172": "What are the key components to include in a service level agreement (SLA) for GAI systems?", "a012d748-c92a-4b5c-ba2d-b54a3914756e": "How do you ensure content ownership and usage rights are clearly defined in contracts for GAI systems?", "37d2418c-90ce-43c2-809f-d6c0c96ef7c3": "What quality standards should be specified in SLAs for GAI systems?", "8af40417-015a-48e0-965b-fc636209e614": "How can security requirements be effectively integrated into contracts for GAI systems?", "805a7e12-3609-48b5-8320-157d4b583f8e": "What are the best practices for maintaining information integrity in GAI systems?", "baa6b25d-2b92-4536-9b88-465f65d6d2fe": "How do you address content provenance expectations in SLAs for GAI systems?", "4e4d7968-ecd4-4428-9b66-54567b3811a3": "What are the common challenges in drafting contracts for GAI systems?", "7e5eef6e-1818-4a3c-941b-7cbfd29ab0a4": "How can intellectual property rights be protected in GAI system agreements?", "4ed36e1b-43e7-48b4-a4cc-ef3087d2246b": "What role does information security play in the value chain of GAI systems?", "5d208543-8d35-4012-8c41-f13b2978eae6": "How do you ensure compliance with information integrity standards in GAI systems?", "a8dacfd0-ba17-40c9-b350-208d2e297cd2": "What are the best practices for conducting diligence on training data to assess intellectual property risks?", "32da3eaf-6181-4aab-a4f0-41e9f8e2c415": "How can AI actors ensure that the use of proprietary training data complies with applicable laws?", "f713140e-cb77-498a-b734-2e3226c38cfa": "What are the key considerations for assessing privacy risks in AI training data?", "5aa920d1-a55e-40d6-99a9-e31c968672cf": "How can organizations document the likelihood and magnitude of AI system impacts?", "3ebe14d9-c0f7-44b2-afb8-cd0da676e8be": "What methods can be used to gather feedback from external sources about AI system impacts?", "32b1ece3-14d1-4487-853e-c72b7d522734": "What are TEVV practices, and how do they apply to content provenance in AI systems?", "5dd34d24-f3ea-4aa7-b42b-0f26e197f095": "How can AI systems be probed for potential misuse or vulnerabilities in synthetic data generation?", "3f3496fe-0f67-405f-9bec-0181454b415d": "What are the governance and oversight tasks for AI actors in relation to training data use?", "98b6ca14-c6f1-4733-90a9-e8fcaac3e450": "How can past uses of AI systems inform the assessment of current AI system impacts?", "dd5071c9-0180-4916-b959-c6a07c56db03": "What are the risks associated with information integrity and security in AI systems?", "aa2070d5-b6bc-4073-a4cb-b2a83531da3c": "What is the role of the Office of Science and Technology Policy (OSTP) in coordinating science and technology policies?", "ca96d7d3-1320-4416-8cdd-7963833fa563": "How does the OSTP assist the Office of Management and Budget (OMB) with Federal research and development budgets?", "ab6fdd15-c2c9-4c64-9836-5410506b43a7": "What are the main topics covered by the OSTP in their policy coordination efforts?", "cc9ce0e0-cd3b-452e-97cd-67a557ba37e0": "What is the purpose of the Blueprint for an AI Bill of Rights published by the OSTP?", "31087be7-62d7-4c98-a630-dd1d81d1181b": "How does the Blueprint for an AI Bill of Rights aim to protect civil rights in the context of automated systems?", "8afd8443-7a29-410c-96b7-f41aea383947": "Is the Blueprint for an AI Bill of Rights considered US government policy?", "4a65f47c-1889-45a4-9ff3-a8bf32191dba": "What are the democratic values promoted by the Blueprint for an AI Bill of Rights in the governance of automated systems?", "8cda0b73-a4d4-4d29-9816-3f0cf0bbc8b4": "How does the OSTP provide scientific and technological analysis for the President?", "b25eb9c4-f4bf-4134-b0d3-e134b79ace2d": "What is the significance of the Blueprint for an AI Bill of Rights being non-binding?", "27b484ed-f229-4b08-be96-75c3abc5f8a3": "In what ways does the OSTP influence major policies, plans, and programs of the Federal Government?", "b3f70511-7f40-4e59-81c3-ea17760970fe": "What are the key characteristics of trustworthy AI?", "6541a0ad-b207-4b2d-a359-3650fafa8943": "How do energy and carbon emissions vary in GAI systems?", "669d0023-9a92-4a21-b449-8fa1a6af2b88": "What is the environmental impact of training a single transformer LLM?", "a31427f5-6815-40aa-9f6e-73e09d22a931": "How do generative tasks compare to discriminative tasks in terms of energy consumption and carbon emissions?", "7084c993-fcea-49d4-b9d7-2a857b9f45df": "What methods can be used to create smaller versions of trained AI models?", "e6cc306a-b275-43af-a0a7-799fe2f76eb9": "Why are generative tasks more energy- and carbon-intensive than non-generative tasks?", "f593dd60-a1e1-4ced-b812-c96907870b2a": "How does the hardware used affect the energy and carbon footprint of GAI systems?", "c8be6a16-64b0-4b54-98c7-079cb7ec7717": "What are the potential environmental impacts of maintaining and operating GAI systems?", "b5e8f4bf-d285-47ed-945d-af92d42d30ff": "How does the modality of content influence the energy consumption of GAI models?", "8e4c0306-cc08-4518-83d4-6f690dd17d67": "What is model distillation and how does it help in reducing the size of trained AI models?", "a380299e-40fa-430f-992f-328134b228ce": "What role do advisory boards play in ensuring the accountability of new technologies?", "7cfa0b20-7c5f-4732-bbfb-bb227472a8c9": "Why is transparency alone not sufficient to achieve accountability in technology development?", "fd5786c9-4268-4527-8e19-6afd150f1710": "How can compensated community input influence the design process of new technologies?", "2ce45131-027f-4fee-8260-1dc2b57ba04e": "What are some additional system needs for validity in technology development, according to panelists?", "8c04225d-b7a3-4ebc-825e-8b08770983d5": "Why is it important to include community input early in the design process of technology?", "e0ebe468-8026-4719-8e3c-8f244e4b01e9": "What types of regulations are necessary to limit the type and cost of new technologies?", "75212bde-5e1c-41e8-98c8-52a538a82b6a": "How can early community involvement impact the success of new technologies?", "e1c0318c-d858-4aed-b2c0-ef0e1203fda2": "What are the benefits of having advisory boards in the technology development process?", "a96ba846-68e3-499f-be1d-9a65c95f7103": "How can regulation help in achieving accountability in the use of new technologies?", "0ad0778a-9494-4e3e-af88-c4694c4eddcc": "What are the potential consequences of not including community input in the design process of technology?", "f974b90c-f825-4ac8-b640-a6316a1d595e": "What are the best practices for post-deployment AI system monitoring?", "1fbd890e-3037-4f08-8ca6-d2ccc5b997e6": "How can organizations effectively capture and evaluate input from AI system users?", "77cb5cc4-479c-46ed-b80f-f461753002f9": "What mechanisms are recommended for handling appeals and overrides in AI systems?", "6dba3c3f-8828-4ed8-a5aa-eacdf7a2c278": "What are the key considerations for decommissioning AI systems?", "d9669bfc-ca24-4606-a9da-8cdef4498029": "How should organizations prepare for incident response and recovery in AI systems?", "decf6dd1-9ab9-49ac-b195-232ac4b5b5fb": "What is the role of external researchers and industry experts in managing AI risks?", "e7207f21-6721-4ef3-9f8b-fcd88334e364": "How can harmful bias and homogenization be mitigated in AI systems?", "8db3af9c-f69b-43a8-8614-e16adacd3ea1": "What processes should be established for monitoring potential confabulation in AI systems?", "30fbacfb-b9b9-4404-99ca-aa7dd91fb39f": "How can sentiment analysis be used to gauge user sentiment regarding AI systems?", "491c9db9-eec0-45a7-8d35-3b865c6cf096": "What are the challenges in maintaining information security in post-deployment AI systems?", "d5096416-f388-46a8-ba7c-efe00591a79f": "What are some practical ways to reduce bias against Black patients in medical care?", "047f16f9-4bbb-46e2-bc04-02caae32fad3": "How does focusing on active chronic health conditions help reduce healthcare bias?", "cc1bd459-a505-4eb3-bb21-c7ab7b777125": "What are avoidable future costs related to emergency visits and hospitalization?", "6a4f1ce1-50e2-4a89-94f5-90f724c34e60": "What best practices have large employers developed to scrutinize hiring data and models?", "c758fef6-ab25-497e-8fcf-971551bff0b3": "What is the purpose of the Algorithmic Bias Safeguards for the Workforce initiative?", "a39fde86-c926-4df1-9cf1-2c15908c1429": "What specific technical questions are covered by the Algorithmic Bias Safeguards questionnaire?", "9c9c9015-c691-44e8-9284-f62ef5306243": "How can businesses proactively use the Algorithmic Bias Safeguards when procuring software?", "417326fd-a953-4bf5-a161-0e5892bb974e": "What are some biases that can be identified in the model training process?", "d89c4058-3ac5-421d-b581-879d16d5de44": "What mitigation steps can be employed to address biases in hiring algorithms?", "8e1c792b-4ced-4778-8949-6daa22e033fc": "How do standards organizations incorporate accessibility criteria into their guidelines?", "2ea244fd-8aca-4018-b8b0-223714fba699": "What are the key methods discussed in the Partnership on AI's glossary for synthetic media transparency?", "2fb245a9-6b49-454b-9ab8-d94912761aeb": "How does the paper \"Unsafe Diffusion\" address the generation of unsafe images and hateful memes from text-to-image models?", "dd48491b-a737-4616-be6a-745b00f5915b": "What strategies are proposed by Rafat et al (2023) to mitigate the carbon footprint in deep learning model compression?", "f027880e-a213-48a4-9139-9393e699642e": "What legal attitudes are explored in Said et al (2022) regarding the nonconsensual distribution of intimate images?", "c704d4cf-6b24-44ed-9113-aa42008d5f83": "How does the study by Sandbrink (2023) differentiate the risks of language models in the context of biological misuse?", "366bacdd-bb77-47f2-a39c-c7bf6169f8b7": "What are the main findings of Qu et al (2023) on the generation of unsafe images from text-to-image models?", "25533810-cb28-4c5b-9792-7183df9cc659": "How does the Partnership on AI suggest implementing indirect disclosure for synthetic media transparency?", "26e99d87-0a41-4952-ae1b-35a9302a5920": "What are the environmental implications of knowledge distillation in deep learning, according to Rafat et al (2023)?", "907ab49a-3d4c-4679-8038-f781b13ee780": "What role do legal attitudes play in the victimization and perpetration of nonconsensual distribution of intimate images, as discussed by Said et al (2022)?", "aaa30f81-e6c7-4103-8a63-152c74a2b091": "What are the potential risks associated with artificial intelligence in biological misuse, as highlighted by Sandbrink (2023)?", "b0182517-507f-402c-9d72-7b621f64e5b4": "What are the additional expectations for automated systems that handle sensitive data?", "1c9c675b-2d74-4673-9aa9-cb84ac14fa32": "How is sensitive data defined in the context of automated systems?", "6d42e83b-e788-4ab9-a002-cd545c1513d7": "What types of data are considered sensitive according to the provided context?", "b6530c27-37b3-4728-bac0-f9d4473dbdbf": "Why is it important for automated systems to meet additional expectations when dealing with sensitive data?", "9179505b-25f1-43ea-8329-1f120ce64c61": "How can sensitive data expose individuals to meaningful harm?", "d7d542fb-ce5a-4048-8f5f-b650b4c35bb2": "What are some examples of sensitive data mentioned in the context?", "e1052cc7-4c2d-41cb-92c3-998f3211584f": "How can automated systems infer sensitive data about an individual?", "f0f3d3f7-c4a4-4fb7-acfc-b992f475242a": "What are the potential risks associated with the misuse of sensitive data?", "235fe3e8-a203-480d-b7b7-9f411ea2d613": "How does the context define sensitive domains?", "46d01f66-a474-4e8e-ac51-c8d522ef006c": "What measures can be taken to protect sensitive data in automated systems?", "874107ea-6ed8-441f-968f-3704b981bf64": "What are the key capabilities and limitations of GAI systems in terms of digital content transparency?", "2cdd6080-bdc8-4e6c-b474-90b02b47b693": "How can training materials be developed to educate AI Actors and the public about the societal impacts of AI?", "a0fb147c-38c0-4fef-9b75-18e76cf8a420": "What role does diverse and inclusive content generation play in the context of AI systems?", "df1759f8-c9a0-4abf-9108-e0f81494dbaf": "How can feedback from operators, users, and impacted communities be effectively recorded and integrated?", "1cfd9941-237a-46a0-be9f-b60e2511126e": "What methods can be used to gather structured feedback about content provenance?", "4d85eb98-cd48-4fdc-80d4-2923f78551af": "How can user research studies, focus groups, or community forums be utilized to assess content quality and biases?", "3e96a9b6-2c8d-490f-906a-bf61de6087dd": "What are the best practices for seeking feedback on generated content quality and potential biases?", "55df2562-e98c-4baf-a911-341e5304f4a6": "How can the general awareness among end users and impacted communities about feedback channels be assessed?", "364d50e7-0d67-4aee-b2a4-4e278b9be1a9": "What are the implications of harmful bias and homogenization in AI-generated content?", "759c5df7-8c61-4805-b255-184f63b68e04": "How can AI deployment and monitoring be improved to address issues related to information integrity and harmful bias?", "cf41653e-3c18-45ed-a00a-19afbd790804": "What are the potential risks of insurers collecting data from social media for determining life insurance rates?", "8ae367c1-1ed9-4fe1-b0c5-25212e409675": "How can data breaches by data brokers lead to identity theft?", "624e6325-9549-4672-912d-2863775ecf64": "What are the privacy concerns associated with facial recognition systems in public housing authorities?", "c0e48c62-1775-4503-bc52-2b14d98033f4": "How does surveillance software used by companies to track employee discussions about union activity impact data privacy?", "8c95bd57-9819-4866-946c-34d2b7751cb5": "What measures can be taken to protect personal data from being harvested and exposed by data brokers?", "9134a063-6c7d-43f6-b9f4-d7e0712c9e59": "How does the use of facial recognition software by law enforcement affect community trust and privacy?", "afa24cd5-0d80-4e11-bb7f-28b1ef159c0d": "What are the ethical implications of insurers using social media data to determine insurance rates?", "07d3959c-fd15-441f-a064-d94b454936be": "How can individuals protect their personal data from being collected and misused by companies?", "accc5413-f9c8-492e-954c-db2935a83a8d": "What are the legal protections in place to prevent misuse of surveillance software in the workplace?", "154a4887-57d4-48b2-a0e2-f01ef4a05f25": "How can public housing authorities balance security needs with residents' privacy rights when using facial recognition technology?", "51d26025-f397-4282-ab4c-e614b1909136": "What are the key considerations when integrating third-party GAI models into an organization?", "e8ca0409-014b-43ca-aa66-18baf30c5856": "How can organizations manage the risks associated with using third-party GAI tools?", "618930f7-ea49-4631-8ec5-748399ab123e": "What are the implications of using third-party GAI systems for data privacy?", "0e830dc5-978e-40ef-b148-0701cdbac3f2": "How should organizations address intellectual property concerns with third-party GAI integrations?", "82ab0ec3-0073-41f1-a863-66c4ff3d8b4b": "What guidelines should be established for transparency in third-party GAI data usage?", "e774a66c-375b-45ad-8d72-28706054193e": "What risk controls are necessary for foundation models versus fine-tuned models in third-party GAI?", "babb4c10-950c-47c0-8a65-383a64edc068": "How can enhanced processes improve the management of third-party GAI tools?", "30032779-8c22-4bdf-9a98-a280bd457baa": "What are the legal implications of incorporating third-party GAI systems in an enterprise?", "490002ec-c8f2-49a0-aca4-451a75374463": "How can organizations ensure compliance when using third-party GAI-generated data?", "b9f8fe8e-7ff3-4431-89f5-73948788cb99": "What role does IT services play in managing third-party GAI integrations?", "787f8222-d352-49ea-b919-7db70989aa72": "What are the consequences of not providing explanations for errors?", "4172ceb7-f347-40ad-8c68-0fe2cc0a23e7": "How does the lack of explanation affect error correction?", "f4bfb069-4ab5-4b30-81a1-2ec30f5ec8ac": "Why is it important to explain errors when they occur?", "b419532f-ec57-4460-bf35-fcdb757ee380": "What are some methods to ensure timely error correction?", "a3239f78-3895-4866-83da-fa3a4f26ff43": "How can the absence of explanations impact overall productivity?", "a349e851-d625-49e2-a919-8040f988ec05": "What strategies can be implemented to improve error correction processes?", "055cba72-28db-40d2-99ce-d761f2ee85b1": "How does the lack of explanation for errors affect team communication?", "2bc860b5-65ed-47ff-800d-bace4aa01b70": "What are the best practices for documenting errors and their corrections?", "4b57ffb5-80b3-461f-a283-70e5cf3971bd": "How can organizations ensure that errors are corrected promptly?", "cbad517a-de7d-4da6-9786-47e6dd684d94": "What role does transparency play in error correction and prevention?", "2d9f919a-f792-4b1b-a10e-a0f578dfc7cf": "What are the main findings of Epstein et al (2023) in \"Art and the science of generative AI\"?", "476325e4-24b4-429b-9788-e84684fab8a1": "How does Feffer et al (2024) evaluate the effectiveness of red-teaming in generative AI?", "6d1dbac6-21e1-487b-b666-a788c3d485db": "What offensive security capabilities of large language models are discussed in Glazunov et al (2024) \"Project Naptime\"?", "58543296-0157-4a5a-9ddc-522f60ee6867": "What are the potential risks of indirect prompt injection in LLM-integrated applications according to Greshake et al (2023)?", "e9b7254b-b94e-4e18-a453-c58d8436079a": "How does Hagan (2024) propose to establish quality standards for AI legal help?", "6a69d169-7541-4d51-93a8-35fb4587ee8e": "What strategies does Haran (2023) suggest for securing LLM systems against prompt injection?", "ccca527c-b69c-4c41-b2ea-9aff9b44255f": "How does the study by Feffer et al (2024) contribute to the debate on whether red-teaming is a silver bullet or security theater?", "c66d08da-f096-41ae-8483-61507f48f6c0": "What are the implications of the findings in \"Not what you've signed up for\" by Greshake et al (2023) for real-world applications?", "a2e8cb0d-b35c-4690-bb95-38babd0775ce": "What methodologies were used in Glazunov et al (2024) to evaluate the offensive security capabilities of large language models?", "087fbc2e-973a-4823-936b-0d0243f5c688": "How does the research by Hagan (2024) differentiate between good and bad AI legal help?", "3bcb7050-a5e3-4025-86e9-b9c7a3eb2196": "What are the key responsibilities of AI Actors in monitoring reported issues in GAI systems?", "0bc4535d-2b67-4804-be77-c8734789e122": "How can AI Actors effectively evaluate GAI system performance using content provenance data tracking techniques?", "c73bd3d7-f752-4f5f-9676-e8acebd65d4c": "What steps should be taken to ensure prompt escalation of issues by AI Actors?", "fdd55fcd-d79b-4a87-ae51-09c717172f37": "What are the measurable activities for continual improvements in AI system updates?", "3102119a-0e38-44ee-bcfc-a58a411db0ef": "How often should regular engagement with interested parties, including relevant AI Actors, be conducted?", "7b74d871-b727-476b-99de-f23821860093": "What are the suggested actions for managing GAI risks related to harmful bias and homogenization?", "5ef4015c-16b3-4fc4-983b-5266130165bd": "How should regular monitoring of GAI systems be conducted and what should be included in the published reports?", "e587f1fa-cbd2-454a-85bb-44b757a0046f": "What are the best practices for following incident response plans for inappropriate or harmful content generation?", "48910454-45b0-4445-9366-f056ee60b60b": "How can processes be adapted based on findings from incident response plans in GAI systems?", "7c2d9a6a-5497-4fdd-a880-598d0303db6e": "What roles do human factors play in the operation and monitoring of AI systems?", "b252c530-ffef-4684-adb9-1fadc10df9a1": "What is a use-case based supplier risk assessment framework?", "0c235183-3b08-4ff7-9116-62f623af1175": "How can organizations monitor third-party entities' adherence to content provenance standards?", "3d779012-1e50-47b9-a393-b2536d277ca6": "What are the key components of value chain risk management?", "bb8eff08-42f2-4110-a840-121f230e4c81": "Why is it important to include clauses in contracts for evaluating third-party GAI processes?", "7dd59eab-597e-4402-bbde-5219259f1663": "How can organizations ensure information integrity when dealing with third-party entities?", "8a1a0e9b-c1d6-43a4-b808-6de57e66e46e": "What steps should be taken to inventory all third-party entities with access to organizational content?", "044efb34-2fe0-49fe-8893-7ccb84e34d86": "What are the benefits of establishing approved GAI technology and service provider lists?", "a3484e6b-9afe-4e3d-9b40-45c9b71a39f0": "How can maintaining records of changes to content by third parties promote content provenance?", "8fdd2306-5671-47b8-a6a4-613dcd55f9fd": "What types of metadata should be recorded to ensure content provenance?", "319e4f98-b5a8-4b4e-968d-98c42d92e7b2": "How does intellectual property relate to third-party content changes and provenance?", "76e21c77-5abd-4e59-8638-acbbb4e196f2": "What are the best practices for obtaining consent for the use of data in GAI applications?", "e92011ba-08e3-4f27-8a90-097f25c195ee": "How can anonymization help in protecting data privacy in AI applications?", "a127a266-08a2-4440-ad55-820b622a4116": "What is differential privacy and how does it enhance data security in AI systems?", "036f4f23-f219-4b3c-a996-f38ce4a3735b": "What are some privacy-enhancing technologies that can minimize risks in AI-generated content?", "b59f0589-8212-4bea-bd66-233cd0e150df": "How can AI developers ensure information integrity when using human data?", "4e38d0ae-2d12-48c0-b6ef-a1288551e7f3": "What are the key factors to consider in human-AI configuration for data privacy?", "b035b709-5421-4c16-9588-36f6df6dd376": "How should AI system performance be measured to ensure it meets deployment conditions?", "3e8e3d35-e9b4-4d82-8549-e0076fb93944": "What are the risks associated with linking AI-generated content back to individual human subjects?", "ac6ecaeb-d774-42e9-a706-23571e9ca244": "How can baseline model performance be used to improve AI systems with retrieval-augmented generation?", "642dbeb3-b3f9-425b-adf3-f3fd7101c7f2": "What methods can be used to empirically validate claims of AI model capabilities?", "41b113fc-3f56-4b6a-b606-bacc03b2a8b9": "What are the key differences between data privacy and intellectual property?", "95092336-d112-46e6-822a-88f81d722e72": "How can companies ensure data privacy while protecting their intellectual property?", "4b019b57-09a1-4397-af47-7cf29ee23a69": "What are the legal implications of data privacy breaches on intellectual property?", "1411f491-e75f-4111-a9e7-b927c26b8c3b": "How does GDPR impact intellectual property rights?", "7caa53ec-adf7-4bd5-b0d6-16e1ec9de7af": "What are some best practices for maintaining data privacy in intellectual property management?", "724b5a82-32f3-49fd-8271-505c3b2a5866": "Can intellectual property be compromised through data privacy violations?", "1bba92c8-2cf6-481c-a916-e2b90863ada9": "How do data privacy laws affect the sharing of intellectual property across borders?", "b57dc1d4-b29e-4d1f-81d7-93ab353d2990": "What role does encryption play in protecting both data privacy and intellectual property?", "241527a6-6383-42f6-80ad-d47448a4e5de": "How can businesses balance the need for data privacy with the need to share intellectual property for collaboration?", "03c92229-db55-4afa-8ac5-34de6b0a3b79": "What are the consequences of failing to protect data privacy in the context of intellectual property?", "abaa83aa-555a-4d21-acf1-7c73b0d057ff": "What is the risk-based pricing rule in lending?", "073ea355-2455-462d-8d7b-c26913cb9ef8": "How does the risk-based pricing rule affect borrowers?", "17d41936-ead2-475b-b785-6dd826930ae7": "What rights do consumers have if their credit application is denied?", "f99de0df-3a30-4c03-be26-e66bce9f385a": "What information must lenders provide to borrowers under the risk-based pricing rule?", "7b39da4e-33ab-41c7-826b-5d251d01cc39": "How does the CFPB protect consumers' rights regarding credit applications?", "4c661a4d-46b0-4361-881d-a2e3bfffc226": "What are the requirements for warehousing employers in California regarding quotas?", "f95da55e-afa4-49de-aabe-bcc697b0c48a": "How do algorithmic monitoring systems impact warehouse employees in California?", "7654615f-4130-42ba-ab22-d08da8a0d3b5": "What must California warehousing employers include in the written description of quotas?", "154ad3bc-8ed0-45e4-9156-62d7e04da73f": "Why is it important for certain decisions to be explained to consumers?", "c8c67888-5617-4df8-aa80-597113c355ee": "How does California law ensure transparency in quota systems for warehouse employees?", "e593b5e5-80a5-4368-8044-4401253e2242": "What are the unique risks associated with General Artificial Intelligence (GAI)?", "58567b4a-dad8-4564-bb4b-78ed4e066585": "How can organizations manage the risks posed by GAI?", "980286b9-57b1-41cb-be5b-1c92d2cade95": "What actions are suggested to mitigate GAI risks?", "c4dd04c4-3824-49be-bbb2-28e519e680e2": "What are the primary considerations for GAI mentioned in the appendix?", "710a2983-1bea-4763-b367-819eea4ef3bc": "Where can I find references related to GAI risks and management?", "91a4617d-ae27-46d6-967d-0120e9f36b4d": "What is covered in the introduction of the document on GAI risks?", "33034f48-184c-4594-80d2-8996aa2b9d20": "How does the document suggest handling risks that are exacerbated by GAI?", "fa471afa-5001-4d53-b1fc-23f57e65bb99": "What are some examples of risks unique to GAI?", "71a166e6-1296-4706-9677-80a0a0a51341": "What page contains the suggested actions to manage GAI risks?", "70bebbf1-bde6-4150-b4ef-a9a63f9f8a4c": "How comprehensive is the overview of GAI risks in the document?", "d01efaa2-b7e6-48c4-9117-00d808d80d03": "How can organizations ensure that notices and explanations are accessible to users with disabilities?", "c50f5c77-e898-4823-a5d9-6ead49445ebe": "What are the best practices for making notices available in multiple forms, such as on paper, physical signs, or online?", "7216a1fd-492e-4455-9b1c-87127423efe4": "Why is it important for notices and explanations to be available in multiple languages and reading levels?", "c4b916ed-6f69-42b4-a36d-b4b84b300ab8": "How should explanations be tailored to the specific purpose for which they are intended?", "e7b46646-126f-461b-8c4a-db9cd8cff6c2": "What are the key differences between an informational explanation and one provided for recourse or appeal?", "d67307ed-cd5b-4202-b68c-269b3cb7e195": "How can automated systems provide clear explanations for their decisions or actions?", "8185582a-dc07-4b2e-b832-9e3abf2e271b": "What steps can be taken to ensure that explanations are understandable and actionable by the intended audience?", "655c7548-ad98-400b-a65a-0d9b4aebe77d": "Why is it necessary to provide explanations in the context of a dispute or contestation process?", "8024511f-e7d3-4a95-83f6-e77cedb7f0f5": "How can organizations balance the need for detailed explanations with the need for simplicity and clarity?", "a6cf7580-b17b-496c-a7cf-7bdb4ad5cec8": "What are the challenges in making notices and explanations accessible to the American public, and how can they be addressed?", "1b8b06c4-65a8-4291-8125-da58b1a2fcf2": "What are the main flaws identified in the tool meant to help low-risk federal prisoners win early release?", "86c6a713-18f3-4788-94d7-798e37572f76": "How is the Justice Department working to curb racial bias in deciding who gets released from prison?", "3b3ac587-1908-4993-ad69-1d204707f0ba": "What were the key findings of the 2021 Review and Revalidation of the First Step Act Risk Assessment Tool by the National Institute of Justice?", "161b4640-9926-4d54-b332-3ae24fe928a1": "How does Google's Sentiment Analyzer exhibit bias against the LGBTQ+ community?", "5ef48cd2-e223-4cc4-935f-7b59b03953aa": "What are the implications of Google's Sentiment Analyzer thinking that being gay is bad?", "36ac78ac-c399-4cc2-bba8-b4c88049ba0b": "What is the Jigsaw Unintended Bias in Toxicity Classification project on Kaggle about?", "5e6dc05e-c94a-440c-badd-059d28a98ecf": "How does the First Step Act Risk Assessment Tool impact federal prisoners?", "b801b3ed-2645-4ab7-8eab-5f44c53f7cbf": "What steps are being taken to address the flaws in the tool for early release of low-risk federal prisoners?", "953c7a65-0054-4509-b437-2ee2f041e51e": "How significant is the issue of racial bias in the context of prison releases according to the NPR articles?", "48e496f1-8380-4006-9052-8e63ea4fdc38": "What are the potential consequences of bias in artificial intelligence tools like Google's Sentiment Analyzer?", "2bd65018-80f1-4a1e-8ff0-c03d11d2f9ac": "How can I opt-out of an automated system and what are the steps involved?", "4eafde78-6428-426b-85a5-fd83ddfc8a8e": "What are the criteria for assessing the clarity and accessibility of opt-out instructions?", "123f10a6-afad-4ad0-990c-17f252217093": "Are there any user experience research studies on the effectiveness of opt-out notices?", "f3cfe9a7-ea97-4243-a8c0-5cb436fb9198": "What are some examples of human alternatives provided when opting out of automated systems?", "3b9d6877-d127-4076-bbd6-390afed34038": "How quickly should a human alternative be provided after opting out of an automated system?", "f239aa99-3443-48c5-8df4-e7d753f4d1f7": "What makes an opt-out process unreasonably burdensome?", "b7ddcdb9-2b6d-4546-a92a-711d76916a7f": "How can organizations ensure that their opt-out instructions are easily findable?", "7baa6242-7aa9-410d-998c-d8d113ee3ed9": "What are the common challenges faced by users when opting out of automated systems?", "5bcd97f0-208e-48bf-b9c3-85ecfdd844d9": "How can the brevity of opt-out notices impact user decisions?", "ffaea519-7960-4f2b-ade8-290a2a8dfeee": "What are the best practices for providing timely human-driven alternatives in automated systems?", "7f2aa8e1-d759-4d16-835a-7d25726466be": "What is the main argument presented by Angela Chen in her article on The Verge about life insurance and online presence?", "5f1f09e0-afde-4833-8819-12492a28f526": "How might social media activity impact life insurance policies according to the 2019 article by Angela Chen?", "7e7ae934-b104-42e1-a4ec-88b2b1c55cd0": "What are the potential risks of using big data and algorithms in life insurance as discussed by Angela Chen?", "4c2801a4-a960-4bb9-bb30-91106cebe0b1": "How does Angela Chen suggest life insurance companies might use online records in the future?", "2bc6c92b-c94f-406c-97c3-94ff5221a4a7": "What are some concerns about discrimination in life insurance based on online presence mentioned in the article?", "f15e8ee8-6df9-4c38-a7a7-724b35d7266e": "How could life insurance companies benefit from analyzing social media data, according to Angela Chen?", "1a097e10-917a-47d2-bbd0-f888b755d76e": "What examples does Angela Chen provide to illustrate the impact of online presence on life insurance?", "83ce9f99-21b4-47f0-bf69-a1bdc6c6943f": "How does the use of big data in life insurance raise ethical questions, as per Angela Chen's article?", "c0058a46-7dfe-4578-a952-21be6f9e70dc": "What are the implications of Angela Chen's findings for consumers of life insurance?", "6d0329dc-668d-490c-8c24-42c4b1e1311b": "How might regulatory bodies respond to the use of online data in life insurance, based on Angela Chen's insights?", "2f8589f2-71e9-45f2-ae32-f28ec7bf2c4b": "What is the purpose of the technical companion in relation to automated systems?", "8cbfa2ab-0832-4346-ac20-86d72deba93e": "Who can benefit from using the technical companion as a reference?", "0424821d-48cd-42b3-a821-45f2d9a02ee4": "What are the three supplemental sections that accompany each principle in the technical companion?", "fc1f5d65-4b48-44ba-bbc9-d567cad9bf4e": "Why is the \"WHY THIS PRINCIPLE IS IMPORTANT\" section included in the technical companion?", "2bac89ba-56e9-4237-bbb9-2b5879e79287": "What kind of problems does the \"WHY THIS PRINCIPLE IS IMPORTANT\" section aim to address?", "dbd6b39e-eb46-4dcc-ae55-b3268513bb63": "What is the goal of the \"WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\" section?", "2a6d8358-e27e-4c93-986f-95adb301ee63": "How can the expectations for automated systems serve as a blueprint for development?", "8cdabbb1-82d4-4201-914a-3a873fbcf68b": "What practical steps are outlined in the \"WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\" section?", "95cd7988-3d70-4b9a-9079-383a78c28a45": "How do the expectations for automated systems mirror existing technology development practices?", "679bc28a-263c-454f-b2f4-3c5ccf1ae00f": "What is the vision of the Blueprint for an AI Bill of Rights?", "e4229b12-3425-4bac-9532-22a0fd43652e": "What are the key capabilities and competencies required for effective risk measurement and management in an enterprise?", "03a02bfa-6e55-4dd0-a51f-edce76b48834": "How can diverse demographic groups contribute to better risk management functions?", "825dd8bf-7c6c-4446-8011-c25d6f557389": "Why is it important to consider educational backgrounds in risk measurement and management?", "e2909c32-1244-4a0a-a3fd-cf8797be04ac": "How do lived experiences influence risk management practices?", "af541196-0d8c-497e-a398-c3c9253cfec4": "What role do different professions play in the risk management process?", "2855bf3c-9555-42a9-9e98-d7eda34003a7": "How can domain expertise enhance the effectiveness of risk measurement and management?", "5ac3f009-0665-4ce4-bcca-3c6faa028116": "What are the potential risks of harmful bias and homogenization in AI configurations?", "ba3a4a1d-711b-402c-bc07-9ec87c5389cb": "How can enterprises ensure that their data and benchmarks are representative of diverse user populations?", "a75d2170-8371-41de-9317-9b80e2823d28": "What are the best practices for involving diverse participants in structured GAI public feedback exercises?", "eefc16a2-a6f8-499e-a2f7-12e4df65614d": "What tasks are involved in AI deployment to mitigate harmful bias and ensure diverse representation?", "f5398e05-e48c-4721-b625-1d00761faddf": "What are the best practices for ensuring information integrity in data systems?", "7683e79a-d55d-446d-b33d-e51f78b017da": "How does human oversight contribute to the evaluation of information integrity?", "ed4ea430-7625-4ff5-96fb-d43600713d35": "What are some proven cryptographic techniques used to maintain information integrity?", "77d696c3-d291-4500-99d5-f940800f82f6": "How can automated evaluation methods help in maintaining information integrity?", "a3938f64-6ba3-4182-ad9a-f0a734f44349": "What role does the review of content inputs play in ensuring information integrity?", "65e0271e-8df4-4d79-b667-eb66dbe7884a": "Can you provide examples of automated tools used for evaluating information integrity?", "64ee0887-641a-423f-b769-1ceb31077cfc": "How effective is human oversight compared to automated evaluation in maintaining information integrity?", "8ca7525d-cf25-49cf-b7f2-3c8e0c68c8fd": "What are the challenges associated with using cryptographic techniques for information integrity?", "fbce740c-e2f8-4a6a-b5cb-b8e609d9628b": "How do organizations balance between human oversight and automated evaluation for information integrity?", "a19e1eab-0f2e-4afa-8fe7-df69b9bee656": "What are the key components of a robust information integrity evaluation framework?", "95c335f7-1eae-4090-bbd3-d200657a199b": "What are the implications of the data leak involving 235 million social media profiles exposed by a major data broker?", "d4722ec4-cde6-4b3f-a742-a10c5e340440": "How did the exposure of 12 billion records online occur, and what were the consequences?", "f313296f-4197-4b95-a34f-da79b135a5ab": "What are the main concerns surrounding the use of facial recognition technology in public housing?", "8a1416c4-b1bc-4eec-84af-787a472e11f5": "How have Amazon and Walmart used surveillance technology in their efforts to prevent unionization?", "c1dddd25-fdcc-449b-8e21-263bfadff302": "What enforcement actions has the FTC taken against the photo storage app Everalbaum?", "0ddce7bb-a5c7-4fa6-bea6-da3737420a35": "What measures can social media users take to protect their profiles from being scraped without permission?", "7acd2768-ac2c-4dbe-9c92-a27dcf7be867": "How has the public responded to the use of facial recognition technology in public housing?", "0c799549-781b-482b-9074-635d45590cea": "What are the potential privacy risks associated with large-scale data leaks like the one involving 12 billion records?", "24353569-a8b5-4fbb-b2d8-0e04fd061aa6": "In what ways have companies like Amazon and Walmart been accused of spying on their employees?", "cbda8aeb-bccf-46df-ba9f-12a9fc01b0b8": "What are the legal and ethical considerations of using surveillance technology to monitor union activities?", "1e6a21b9-e745-404b-be65-8c7b9e4b0bd7": "What are the key performance expectations for the system?", "a0c122d2-8188-4702-8b56-6d423c5f74d3": "How are human-run procedures integrated into the system?", "a2d4766b-5e37-4f46-adac-a62b1c427c78": "What types of data were used to train the machine learning models?", "1e2f5c68-7e0e-479e-a952-e3d1c6caf4f7": "How were the data sources processed and interpreted for the system?", "63d84cc6-9adb-40a1-896f-b2e6034c339b": "What steps are taken to identify and manage risks in the system?", "b66070c1-0ec6-46ae-9fab-b4445a76fc1c": "How does the organization address missing, incomplete, or erroneous data?", "fbe83be2-10ab-40ca-806f-d957f742ed90": "What concerns were raised during public consultation, and how were they addressed?", "24efaac2-edc5-4a7e-a8a9-2786e4c966e4": "What are the results of the performance testing in terms of accuracy and error rates?", "e9460513-d9f4-4ee8-a89e-9acdf8fdf716": "How does the system's performance compare to previously deployed systems?", "6fff7468-40e8-4590-9f72-615e77d43bc3": "What are the ongoing monitoring procedures and how frequently is performance tested?", "deabd138-adc2-44f5-98ee-322a39c17a98": "What are the best practices for re-evaluating risks when adapting GAI models to new domains?", "ee75ad83-a0d7-4713-99c1-7720e8e912b6": "How can warning systems be established to detect the use of GAI systems in new domains?", "39d5c426-28b0-44e6-9fff-8fb668122c5c": "What are the key risks to consider when adapting GAI models to new domains?", "70c28c2d-9d3b-4641-974e-a1804938ca0c": "How can one ensure that previous assumptions about security and safety hold in new domains for GAI systems?", "adf119f1-b069-4a65-87cd-ad53e5c43a5a": "What methods can be used to detect the presence of PII in generated output text?", "dd282118-49ce-47dd-87c2-d4156e944523": "How can sensitive data be identified in generated images, videos, or audio?", "d771683d-c686-4a52-81ee-292ea31f22d4": "What are the potential risks of harmful bias and homogenization in GAI models?", "f49bb512-05ef-4e2c-b6a6-091cbb905329": "How can GAI systems be protected from generating dangerous, violent, or hateful content?", "f863d5ef-78f1-4960-a14d-66a66caa16f6": "What strategies can be employed to ensure data privacy in GAI-generated outputs?", "828a7539-87cf-49c4-a17e-7be3ed25bfea": "How can intellectual property be safeguarded when using GAI models?", "56a2f1d9-fc30-46e6-9705-569c76ebb6c7": "What are the main ethical and social risks of harm from language models as discussed by Weidinger et al (2021)?", "ae85135f-051c-4806-95a0-2cfaeb1b3924": "How do trolls use AI to create fake images of celebrities like Taylor Swift, according to Weatherbed (2024)?", "6ee2f972-193a-4bfb-bd19-604bde142dc1": "What is the focus of the paper \"Long Form Factuality in Large Language Models\" by Wei et al (2024)?", "941d29d9-1e5b-4e48-add3-1024903e747a": "Can you explain the concept of \"Information Disorder\" as outlined by Wardle et al (2017)?", "cd19f434-9802-45cd-9417-bf530750bf85": "What are the key points in the \"Sociotechnical Safety Evaluation of Generative AI Systems\" by Weidinger et al (2023)?", "17234b23-05e3-4796-9357-c3255713355b": "How does the \"Taxonomy of Risks posed by Language Models\" by Weidinger et al (2022) categorize different risks?", "2d3d1d60-0258-4016-b2e7-21895ee3da0d": "What interdisciplinary framework for research and policy making is proposed by Wardle et al (2017) in their study on information disorder?", "b95decfd-0e22-427d-9a93-294412eab257": "What are the implications of the findings in \"Ethical and social risks of harm from Language Models\" by Weidinger et al (2021)?", "858d0487-9ef0-41bd-beb3-28e4bf4dbd52": "How does the paper by Wei et al (2024) address the issue of factuality in large language models?", "58dd02ef-48f3-4312-83e5-1fb54e25c2cc": "What are the recommendations for mitigating the risks posed by language models as discussed in Weidinger et al (2022)?", "2d3012f3-7feb-48d4-b54c-5107353c03d0": "What are the best practices for documenting AI model details, including data provenance and quality?", "93acef7c-f3cf-4bda-848d-c79f08fefdcb": "How can counterfactual prompts be used to explain machine learning models?", "efa2b789-a244-4680-af19-b891e4c72987": "What are gradient-based attributions, and how do they help in interpreting AI models?", "7173f456-fedc-440d-b22f-efa9cf4e85f1": "Why is it important to document the assumptions and limitations of an AI model?", "5d764367-0f8d-4efd-976f-e89b2c8f684c": "How does model compression or surrogate models contribute to AI model explanation?", "95643527-800b-404a-a62c-a3f7390fbdf9": "What are the ethical considerations to keep in mind when documenting AI models?", "694a90b5-31cd-4f46-b221-2bbccbf2ea0d": "How can occlusion or term reduction techniques be applied to explain AI models?", "0697ed1f-3d26-439c-9818-2971e3df5158": "What role do RLHF approaches play in the training of AI models?", "171d3b23-4f45-49d6-a317-a5fe6cf4016a": "How can organizations ensure the responsible use and governance of AI systems?", "dc38fdd2-c4f6-490f-a7e5-5c3751e542f2": "What are the potential risks of confabulation in AI models, and how can they be mitigated?", "13d44da0-2fef-4c2f-b4ff-19599f9b2afb": "What are the main issues that the principle of safe and effective systems aims to address?", "fe155026-d872-40e9-8736-9e9a69d08e1f": "How can reliance on unproven technology lead to problems?", "d4c4faa8-502a-4bf2-b182-7aa689ad6ae4": "What are some examples of technologies causing substantial and unjustified harm?", "46c0dbae-a959-405b-b3f9-8c2e83761656": "How can historical data negatively impact automated decision-making systems?", "116fa31d-f588-49de-8ed4-d15b3ba118e5": "What are the risks associated with using technology that has not been proven to work within an acceptable range of error?", "fc759a44-cbe0-404c-83b8-dcb5b364028c": "How can technologies be designed to violate the safety of others?", "4ee837f4-d5a5-4d90-a40a-019addf10b09": "What are some unintended harms that can result from the use of certain technologies?", "c824ae9c-741c-4730-a945-d591f13e0967": "Why is it important to ensure that technologies work as intended and as promised?", "472d7e59-379f-4fa7-9356-fe2608a2ee59": "How can irrelevant information from past decisions affect current decision-making in automated systems?", "079cadfe-b827-4d8b-8847-ce41fba6094c": "What measures can be taken to prevent technologies from causing unjustified harm?", "01e765c9-5aef-4c7c-bca4-d9b4b94e54fd": "What is the purpose of the Technical Companion in the context of developing technical standards and practices?", "33df19f9-df9a-4d75-b8cf-7b262bf4f8b3": "How does the Blueprint for an AI Bill of Rights relate to existing laws?", "c702da49-cb6a-498c-b6c2-a377e4973503": "What sectors and contexts should the technical standards and practices be tailored for?", "2f16d6ca-6f2d-4da2-8d77-9da96198f0aa": "How does the framework aim to impact rights, opportunities, or access through automated systems?", "c5ff5f27-e556-4207-8151-5519b2fe84a6": "Does the framework provide detailed analysis of existing laws?", "9ec597d8-9559-44a5-b175-db525d6f9e63": "What is the broad vision shared by the framework for automated system development and use?", "f6dcae70-8e0a-4f47-9c30-589ffa82d968": "How does the framework inform private and public involvement with automated systems?", "d6cc4e55-78e6-4d3b-9425-9f0e1ffa648c": "Does the framework take a position on legislative and regulatory proposals?", "7ac7defe-9d7c-4996-bda6-56a4fe59a5c9": "Are there examples of existing protective measures mentioned in the framework?", "bb7c4021-0d4e-4b15-b28e-1d9d6664c577": "What is the significance of not analyzing legislative and regulatory proposals in the framework?", "e1aec18d-e71a-4d28-be59-e5be3881d70e": "What are the key components of effective monitoring and governance structures for automated systems?", "b2ab71bb-0a72-4068-b1c1-505a308496ea": "How can unmet needs for change in automated systems be identified and addressed?", "eb859b33-b604-4d38-9b9c-36c1c453a82b": "What are the expectations for reporting by entities developing or using automated systems?", "057efbeb-09ad-4580-a430-acb50a9681a6": "Who should the resulting reports from automated systems be provided to?", "94f7d7a1-6947-4bd2-b4fa-6d8db3896c41": "Why is it important for reports on automated systems to be made public?", "c4bcd7fc-8a32-4295-a4f2-b6da221f7546": "What considerations might prevent the public release of reports on automated systems?", "4d3140c8-cffd-4c71-ab31-dfbc34b26f50": "How should information be handled when public reports on automated systems are not possible?", "7b4e0c55-fcda-4bcb-ab4a-e2333b46c568": "What role do oversight bodies and ethics officers play in safeguarding individuals' rights in the context of automated systems?", "fbd293d5-2f7d-46ba-8a9d-ba56d3e2690b": "Why is transparency important in the reporting of automated systems?", "82a4bddf-bde1-4da4-8597-af2b5f75319f": "How can the American people benefit from transparency in automated systems reporting?", "c8815a76-b3d3-4150-9fc2-ffeca9946282": "How do GAI systems contribute to the spread of misinformation?", "e6706517-19c4-47c4-9d33-e88aaa79be85": "What is the difference between misinformation and disinformation in the context of GAI systems?", "c1f9af22-5965-4be1-86a3-e4a301b6af2f": "How can subtle changes in text or images by GAI systems manipulate perception?", "551ffc98-e117-4c8c-aabd-84102ffaa8f7": "What are deepfakes and how are they related to GAI systems?", "5b9a4026-4170-4739-aa59-a82371bcde95": "How can GAI systems be used to target specific demographics with disinformation?", "3a57b342-f32d-4571-bf11-0fd968d2f12b": "What are the potential future threats posed by GAI models trained on new data modalities?", "bdc49d2f-f75f-409c-a019-6bd6e720673b": "How do multimodal models enhance the capabilities of GAI systems in generating disinformation?", "e354dc4e-3651-464a-85e7-a02d6e8c6fe5": "What measures can be taken to mitigate the risks of GAI systems spreading false information?", "e0faed04-d3ac-4ffd-847b-0eb7570099b0": "How do malicious actors use GAI systems to create sophisticated disinformation?", "25e2dfeb-bd2e-4183-bd34-d7a9fb889f4b": "What role do confabulations play in the production of misinformation by GAI systems?", "9d781fd4-80b1-415f-bdc0-cb99a0ad57aa": "What are the key policies and procedures to address AI risks associated with third-party entities?", "5ab4424c-a307-47e8-9524-720e346f1103": "How can organizations categorize different types of GAI content with associated third-party rights?", "3749d629-316f-4418-9f81-884d7050c768": "What are the best practices for managing GAI risks in collaboration with third parties?", "76664a25-080a-4041-947e-a87349ea3aa4": "How can joint educational activities with third parties help in managing AI risks?", "a3d40842-deb4-459e-b156-f9ef4c243572": "What approaches can be developed to measure the success of content provenance management efforts with third parties?", "9ec62f10-d33b-4bcd-a708-9a6b8f4f52b7": "How can organizations ensure data privacy and intellectual property rights when dealing with third-party AI content?", "0e343257-054b-4912-83af-419e09f10542": "What are the common risks of infringement of third-party intellectual property in AI projects?", "8d5d823e-8af6-4d8b-b8f1-c68b7d439a22": "How can value chain and component integration be managed to mitigate AI risks?", "2521694f-ca6a-4805-97d6-166beb459f2c": "What metrics can be used to validate the effectiveness of content provenance management with third parties?", "5985031f-49b1-44cb-9355-1cbdf90a9ec8": "How can incidents related to AI content provenance be detected and responded to efficiently?", "c5db5bcc-a2bf-498d-92ab-5bb0db131203": "What should be included in a report about data collected from individuals?", "46b110ea-13a4-41c5-be85-368d573b2111": "How quickly should an entity respond to a public request for data usage information?", "634c0a75-d94b-4dc9-9e9c-ecb9cfb11231": "What format should the data report be in to ensure it is understandable by most users?", "08fdf655-f40d-4a7d-8eec-5c6ca9a4754b": "What information about data collection should be included in the report?", "c63e76ef-0b53-439a-871d-3b04064a5530": "Who should have access to the data and metadata collected from individuals?", "9bdf0a7c-cc20-4518-b935-616a80516ebd": "What steps should be taken if a user login is not available for identity verification?", "3fd33ba3-95e9-4be2-87dd-1dbcf4afee88": "What is the purpose of summary reporting in the context of data usage?", "04331b6c-1532-4bcc-9a9a-f12352764d7f": "How should summary reporting be made available to the public?", "18215d4a-212f-4664-a6f7-f25e3fa46f80": "What are the legal limitations on the data and metadata that can be included in the report?", "c9685d0b-7fe3-47f6-a4b5-99d6cf0b3123": "Why is it important to include time limitations in the data report?", "313a9213-5dd4-4fd0-b6cc-5649c4da50f7": "What is algorithmic discrimination in the context of AI systems?", "36918793-5d0e-48bd-b045-f8ff83ea055d": "How does algorithmic discrimination impact different protected classes?", "664af71b-506c-4536-aa63-5a4c95942aa3": "What legal protections exist against algorithmic discrimination?", "0f5c4986-db15-4307-9880-e51f3bd60751": "Can algorithmic discrimination occur based on gender identity or sexual orientation?", "5dd96edd-ff11-42e1-8ea9-f87e9ae4b2de": "How do automated systems contribute to algorithmic discrimination?", "a14927ac-5614-423d-a8f9-5c0853bab534": "What are some examples of algorithmic discrimination in automated systems?", "67b571d8-1ebc-4a74-bc63-de217aebedd1": "How can organizations prevent algorithmic discrimination in their AI systems?", "176842e4-195d-47ea-bb47-4747e05ce154": "What role does computation play in automated systems?", "d7798ca6-fc5a-4819-813f-1d68a7715e00": "How can algorithmic discrimination affect policy implementation?", "12f7ff31-5e11-4d2c-bedc-26c5cc7322d9": "What measures can be taken to ensure automated systems do not violate legal protections?", "c21ceaec-63ff-45ce-a1c8-4c954a4d0cba": "What are the risks associated with GAI models synthesizing NCII and CSAM?", "c6cf6391-dc4d-4c46-b3b5-fb5a40f28726": "How have websites and mobile apps that generate synthetic NCII evolved over time?", "01e4a6f5-a4f4-4957-9cdc-2aaba3491a36": "What are the characteristics of trustworthy AI in managing harmful bias and ensuring safety?", "241aa1b9-216b-43a4-9f90-6610fe5c28b2": "How do third-party components in GAI value chains affect transparency and accountability?", "2a49cfa9-6bbd-43ad-bea4-28427449269d": "Why is the risk of improperly vetted components higher for GAI compared to traditional AI systems?", "72445f3e-2877-4f4b-9192-ba572bcdb530": "What challenges are associated with vetting large-scale training data for GAI models?", "422aafdb-ec57-4340-bd89-2fc20e3b0da0": "How do pre-trained models and software libraries contribute to the risks in GAI value chains?", "30ac0cae-4e18-4ad7-bb21-0e34d9b27082": "What measures can be taken to enhance privacy in GAI systems?", "d14ef51f-6a7d-4b9b-8c2c-7ba562b3d774": "How does the difficulty of training foundation models impact the development of GAI?", "d716e7c9-5860-48e4-914a-8c72625fdbbf": "What are the implications of automated and scaled online businesses generating synthetic NCII?", "407a7ba6-d65a-4a11-afa4-c6ea6e4c2736": "What are TEVV metrics and processes in the context of AI deployment?", "436567b2-4a53-418b-8002-08cb30c28f31": "How can measurement error models improve the validity of pre-deployment metrics?", "d6bd80dd-1e24-488d-a6e5-eb5aa8895943": "What is the importance of documenting biases or statistical variance in applied metrics?", "84eeb2b5-483b-4794-8d69-a1fb7ca5b57a": "How can domain expertise be leveraged when modeling complex societal constructs like hateful content?", "1149e1f0-19fe-4af1-b052-9c5b0bf7b453": "What are the risks associated with confabulation, information integrity, harmful bias, and homogenization in AI?", "3e72a6bd-6a5a-4b12-8156-069a0f9382be": "What tasks are involved in AI deployment, operation, and monitoring according to the provided context?", "1e55727a-0e04-4def-9388-239c06d052dc": "Why is it important to evaluate and document the effectiveness of TEVV metrics and processes?", "a76bd5b4-7983-42f7-98f1-6cf2180a7448": "What challenges exist in assessing AI risks using currently available measurement techniques?", "5bcfbe76-4c4e-4bf9-8a09-cd1dc9eb77c3": "How can risk tracking approaches be improved for settings where AI risks are difficult to assess?", "f8f78806-13b8-4acb-9826-c4408a3c523c": "What are some suggested actions for improving the measurement and evaluation of AI metrics?", "0e48d737-5d25-4c14-94d4-f0be919145c3": "What are the best practices for conducting post-mortem analyses of AI incidents?", "ee67a49d-fa40-4f72-bc61-a39c2144d958": "How can visualizations help non-technical stakeholders understand GAI system functionality?", "963ca3e1-2fd0-4a20-85c7-d1bfe31c0bd9": "What methods can be used to represent GAI model behavior effectively?", "133585cd-97fd-4343-9627-bfa3bdcaee9b": "Who are considered relevant AI Actors in the context of AI deployment and development?", "37f44acd-e83a-4327-9d26-ac7ce250879c": "How should incidents and errors be communicated to affected communities in AI systems?", "44946c07-c978-408b-9b84-a5fd16194ca2": "What processes should be followed for tracking and responding to AI system incidents?", "20bc157a-c8bb-4ff2-9003-6f475b0d145a": "How can after-action assessments verify the effectiveness of incident response and recovery processes in AI systems?", "bc817926-284f-402f-a8b4-8016544b7436": "What are the key components of a Human-AI configuration for managing dangerous or hateful content?", "757d49ae-2c4c-4dda-b834-35d68feba859": "How can AI deployment and development tasks be effectively managed to prevent future incidents?", "ec4a9dd4-cf97-4b49-8736-19a432697eef": "What are the suggested actions for managing GAI risks in AI systems?", "5dc1404b-b9d5-4165-bfba-5ae18b4ef0a8": "What is the Blueprint for an AI Bill of Rights?", "1b4835f2-59c4-4255-a964-61969ea8863e": "How does the Blueprint for an AI Bill of Rights address law enforcement activities?", "8f94848b-91d1-4bd4-9095-8fab3fe85de3": "Why might notice not be appropriate in certain law enforcement contexts according to the Blueprint for an AI Bill of Rights?", "be1037b8-1682-4b8b-ab5b-6bad9c9018d7": "What are some examples of automated systems that might require sector-specific guidance?", "adb12b37-a721-476a-817d-6ae9d45f34bd": "How does Executive Order 13960 relate to the use of automated systems in federal agencies?", "d9941951-34e0-4599-ba08-82c1823f7557": "What are the potential challenges of using AI systems in school building security?", "800426c1-0a93-4185-b638-fc754e6a3feb": "What safeguards are in place for federal departments and agencies using automated systems?", "6cf609a1-e1ab-4388-9eee-76603f895da4": "How does the Blueprint for an AI Bill of Rights balance the protection of sensitive law enforcement information with the principle of notice?", "03ffb16c-be24-46fd-a594-205d9e68e49f": "Why is future sector-specific guidance important for the use of automated systems?", "6ff903dc-0e95-45b1-b7ba-5147dcdd7088": "What oversight mechanisms exist for federal agencies using automated systems?", "90935253-ff29-43e9-9473-f71036e4e5f8": "What are the main security concerns associated with generative AI as discussed by Burgess in WIRED?", "a87294ca-3f18-46f9-921e-10e6fa956d49": "How does prompt injection hacking pose a threat to generative AI systems?", "16605b19-6407-427d-8ad6-0bd123fef5ef": "What are the key takeaways from the Canadian Centre for Cyber Security's guidance on generative AI?", "5cd233c2-fb54-486b-bec3-6c406a216562": "How can training data be extracted from large language models according to Carlini et al (2021)?", "799db38e-e7df-45c2-af2e-701b12bdbee5": "What methods are suggested for securing AI pipelines as per the Mandiant blog?", "7b243fdf-3ecd-4747-87fd-c6c9860cb3ae": "What is the significance of next word prediction in large language models as explained by Burtell et al?", "d623ee79-0c8b-4ee4-a9ce-edbf633b20de": "How does the memorization of data in neural language models impact their security, based on Carlini et al (2023)?", "fe664b26-b505-4d49-b30f-fe252750d38f": "What are the potential risks of generative AI highlighted by the Canadian Centre for Cyber Security?", "d5d0438c-36f6-4328-90cb-4baa96b6a1bc": "How does the Georgetown Center for Security and Emerging Technology explain the power of next word prediction in AI?", "466b29fa-ac89-4e2a-8791-cfe170592641": "What strategies are recommended to mitigate the security flaws in generative AI systems?", "39725f9a-68c2-4fe9-aa8d-9a4f08d023ae": "How are AI systems being used to limit access to equal opportunities in education?", "ca7c2204-b6f5-4781-bf3b-8d9f02540ffd": "What are the concerns related to the use of remote proctoring systems in education?", "f43c197c-7302-4f65-a850-8d1592d44eee": "How does facial recognition tracking impact students in educational settings?", "6d8e2c0e-e4cf-4ebb-a417-815a64668c79": "What are the potential issues with automated tenant background screening in housing?", "14c91abd-8d01-4cf0-8523-0812a5c1c840": "How does facial recognition-based control affect residents in housing complexes?", "68d008be-5997-4d96-96e0-731c60a6c09e": "What are the risks associated with discrimination in automated hiring screening?", "cc4bb020-b92c-4f19-adc2-06b98ff1da33": "How does workplace surveillance impact employee privacy and opportunities?", "e63cb0d8-46ce-4928-aef8-ccd1f1693044": "What limitations of existing privacy laws were discussed by the panelists?", "6f3c8f9d-9bbc-44cd-92a3-95215c3cfee5": "Why is it important for students to have the ability to reinvent themselves in terms of privacy?", "2f47fea5-c195-4bb6-889a-3d498268e761": "What are the concerns related to the use of robot teachers in education?", "b23c2ac0-3c8d-418f-9d3a-4d355510c475": "What are the key components of an incident response plan for third-party GAI technologies?", "e341d43e-c5cf-470a-9937-1a0522709924": "How often should third-party GAI incident response plans be rehearsed?", "d32de1da-1496-480e-bfcb-a256b5aa2656": "What are the best practices for aligning incident response plans with relevant breach reporting and data protection laws?", "0fc878ed-8fa8-46f3-a457-943aac86deb4": "How can organizations ensure effective communication of third-party GAI incident response plans to all relevant AI actors?", "62ea75e9-a1f9-482e-b748-93e00e24299b": "What steps should be taken to define ownership of GAI incident response functions?", "d326718b-3c54-47fe-b90b-e026762b440d": "How can retrospective learning be used to improve incident response plans for third-party GAI technologies?", "8113c5c3-1121-4595-b984-3595f621efb3": "What policies and procedures are recommended for continuous monitoring of third-party GAI systems in deployment?", "1f685537-4d22-4888-90a8-cf32f7774031": "How can organizations address data redundancy in GAI systems, including model weights and other system artifacts?", "32b71e7d-ef8b-49de-a3f8-f906cd313fe2": "What are the potential impacts of harmful bias and homogenization in GAI systems, and how can they be mitigated?", "0ee66a25-f2c4-434c-b514-7affa3493429": "How should organizations integrate value chain and component integration considerations into their GAI incident response plans?", "f150a1c8-dad3-432e-91e2-c40098a5ebd7": "How can LLMs deceive humans into believing they are speaking with another human?", "1d4e22f5-8de7-4f07-9526-61c7936f9459": "What are the potential risks of adversarial prompting in LLMs?", "7c6bd923-b691-426e-beb9-67b46c185a03": "How do researchers study the extent to which humans can be deceived by LLMs?", "04d3f9c8-515a-4f01-8cae-62fb636fd1cc": "What are the characteristics of trustworthy AI?", "b4f47764-9f91-49f1-88a2-221cc0e06715": "How can GAI systems produce inciting or radicalizing content?", "3e394353-4b52-4a52-af48-562eea9f2152": "What mechanisms allow LLMs to generate dangerous or violent recommendations?", "36aee960-dcad-4e5c-97d0-05f4fc62b815": "How can harmful bias in AI be managed to ensure fairness?", "a4b69757-7dcd-4eff-90eb-a30ced99f719": "What makes AI systems safe, valid, and reliable?", "72048f3a-c23d-42a8-8194-a14c109a5af2": "Why is it important for AI to be explainable and interpretable?", "2047cc7b-4522-4ef9-93de-8bc09531f843": "What are the potential downstream impacts of confabulations in GAI systems?", "ab3b72f8-672c-486e-bc6b-9461baa761da": "What is a pre-deployment assessment in the context of surveillance?", "8e05f5b4-738f-4015-9698-0d841d8727cf": "How is a disparity assessment conducted in real-world surveillance deployment?", "e41871e4-fd71-4858-9f31-90e4bce717ba": "What are the specific goals of data collection in surveillance systems?", "0aee33e7-9f19-42ba-9f6f-020edc4551d5": "How can one ensure that only the minimum required data is collected during surveillance?", "780e0590-0606-443d-a7e8-2891d967ee16": "What are scope limit assessments in surveillance data collection?", "85dfacbf-8094-47ad-80dc-7bbcc46712ca": "How are data retention timelines determined and justified in surveillance systems?", "6c98830e-c446-42a8-ac57-f8f46ee0a255": "What is the impact of surveillance on individual rights and opportunities?", "7abcd464-ee77-4ce1-8d41-57315a78fed9": "Why is it important to have an independent party assess the impact of surveillance?", "cc012aac-5ce3-434c-8e51-35c1e8338ce4": "How should surveillance reporting be provided to ensure clarity and machine-readability?", "51e0fc29-06be-4a85-a0ae-1fa144bff1cc": "What are the best practices for documenting the impact of surveillance on access and rights?", "b74eff6f-0348-4f4d-8455-ec94c409abc3": "What are the best practices for obtaining user consent for data collection?", "0db718f0-8251-47d3-83c0-f9a203c06084": "How can companies make consent requests more understandable for users?", "84f786f2-b45d-4028-8864-a9c521374ba4": "What are the current issues with notice-and-choice practices for data use?", "39695c7a-9723-424f-a040-717c4cadc993": "What enhanced protections should be in place for data related to health and finance?", "48492acb-3e38-45a7-bcde-061e9fcbb695": "How should data pertaining to youth be handled to ensure their protection?", "64e6cef4-2fdf-4121-b410-fc7168f68289": "What are the necessary functions for which sensitive data can be used?", "71ec2ac9-cc52-4831-a314-2f92000494e4": "What ethical reviews should be conducted before using sensitive data?", "8dd38cfb-46a6-4d54-9560-fa7036a393c5": "How can communities protect themselves from unchecked surveillance?", "40a38d86-7774-4a05-b3ec-46ee736efe3f": "What kind of oversight should surveillance technologies be subject to?", "5303236e-9b24-4300-a6eb-71fd2660ba8e": "What are the potential harms of continuous surveillance and monitoring?", "68145309-ec97-431b-a2e4-ff7c84ae2b92": "How can I opt out of an automated system in favor of a human alternative?", "bc2c2f00-164e-4fb2-b04f-0a8e11a127b2": "What are the criteria for determining the appropriateness of opting out of an automated system?", "99c78782-3873-4806-b67a-5057aee1fe94": "Are there any laws that require a human alternative to automated systems?", "f1218cc9-8cf6-492a-87b4-e7e2b34939a8": "What should I do if an automated system produces an error?", "550182c7-921f-4b50-a93a-089c2d1e4f06": "How can I appeal or contest the impacts of an automated system on me?", "38276fcb-783d-4a66-b4ab-96d3654c9707": "What is the process for accessing human consideration and remedy if an automated system fails?", "64350a23-d7dc-4a12-b082-a57617fd4d14": "What kind of training should operators of fallback and escalation processes receive?", "db717bad-ccdd-4097-a630-6459902c89c3": "How can I ensure that the fallback and escalation process is equitable and effective?", "bb183df4-5c49-4847-b777-9b605daaec7d": "What are the reasonable expectations for opting out of automated systems in different contexts?", "126df14a-0e79-4f7b-9ba1-5cb3197cef81": "How can I access timely human consideration if I encounter problems with an automated system?", "bb600a12-cf3e-430f-aeac-ffb55080bc75": "What are biological design tools?", "ab0afd54-b8f3-4255-bcea-00d7f469fa4d": "How can I access the paper on biological design tools on arXiv?", "4b17bc0d-84c9-44ce-b287-5873628bcb88": "What is the significance of the paper \"230613952\" on arXiv?", "d8711a1f-0375-405e-88fb-47084c2dd456": "How do biological design tools impact scientific research?", "4e3d1faf-aaf7-4382-bfdb-f07986da3774": "Can you explain the main findings of the paper \"230613952\" on arXiv?", "01016449-dd51-43e2-8064-6163d76960ca": "What are some examples of biological design tools mentioned in the arXiv paper?", "96010c49-fead-4764-a1cf-52f08f825c94": "How do I cite the paper \"230613952\" from arXiv in my research?", "1a7dc66f-7d3c-449a-91a8-730ae4fbf5e0": "What are the applications of biological design tools in biotechnology?", "618088f5-7d87-423c-94f7-092dad08e9de": "How do biological design tools contribute to advancements in synthetic biology?", "7a5bd3f1-0117-49f5-847a-c98c3e1ed923": "Are there any limitations discussed in the arXiv paper \"230613952\" regarding biological design tools?", "396fdf65-f8c8-44af-b354-a6a3a8080cf4": "What are some effective mitigation procedures to address algorithmic discrimination?", "53b2c06d-8db2-4193-bad2-2cabf8dd64af": "How can organizations ensure ongoing monitoring of automated systems to prevent algorithmic discrimination?", "33223652-33b9-477b-a3f2-76a8fd138783": "What are the best practices for performing disparity assessments on automated systems?", "669ee0bf-cd38-407d-a36e-b02d7efaeba6": "How often should automated systems be monitored for algorithmic discrimination?", "ba748242-63e0-4e5f-9536-5e37095f34d7": "What approaches can be used to assess whether an automated system has led to algorithmic discrimination?", "369d0faf-1db8-4137-8253-078bff29bf59": "How can changes to the context of use or associated data impact algorithmic discrimination in automated systems?", "177fe09a-b7d7-45cf-a228-70fe1d4708d3": "What role does demographic information play in monitoring and mitigating algorithmic discrimination?", "74ec3300-8c0b-4be9-9382-88a845824eef": "How can organizations identify patterns of unusual results in automated systems?", "5cf0dfa4-8cc2-49d6-a074-c9d97d87b51e": "What are the challenges in performing regular assessments of algorithmic discrimination?", "77d5bb4a-5a5b-46c2-987e-911e1f0d76e7": "How can testing with a sample of users help in identifying algorithmic discrimination in automated systems?", "269c41f4-2524-4d35-9147-09e322f2504d": "What is model collapse in the context of AI and synthetic data?", "53226a81-7a32-4f91-a589-46450a0b7283": "How can overly homogenized outputs lead to unreliable decision-making?", "0d9724ae-bfe2-47f4-a021-1296db5dd676": "What are the risks associated with foundation models acting as bottlenecks?", "db9015f0-bda2-4f91-9808-19738ae88078": "How does model collapse threaten the robustness of AI models?", "b87fc7dd-8992-4529-85cb-066cbde37dcc": "What does it mean for AI to be \"Fair with Harmful Bias Managed\"?", "eecaaa8a-3251-4874-9a51-cd62286c5095": "How can synthetic data contribute to model collapse?", "bfb13ae7-473a-4caa-968a-c783c333b1e6": "What are the potential consequences of overly homogenized content in AI systems?", "030514da-a5fd-4a1f-ac10-cf8922a6150f": "How can foundation models amplify harmful biases in downstream systems?", "43d3ae5b-1657-46fb-920d-87bbf081d086": "What are the characteristics of trustworthy AI?", "40e129bc-068b-44de-942b-641c29a2983a": "What risks are involved in human-AI configuration and interactions?", "3eda549a-f514-4a5a-8cef-68367b00516e": "What are some real-life examples of laws and policies that protect rights and opportunities in AI systems?", "4359e62b-013c-45a6-b540-f16600b3d7c0": "How does the Department of Energy's AI Advancement Council contribute to ethical AI use?", "f8696290-800f-4952-81bc-3020e6f11c09": "What are the key components of the Department of Defense's Artificial Intelligence Ethical Principles?", "38c06f0b-84d5-48fa-ab9f-4a53a84e8916": "How does the US Intelligence Community ensure ethical use of AI systems?", "b81f442e-1b5f-43da-8261-9b2c1363ea3d": "What practical technical approaches are being used to protect rights in AI systems?", "dba6ad78-0e3e-4068-a3b5-38c5ab583a94": "How do sociotechnical approaches help in the ethical development of AI systems?", "4b849fa7-2c5a-41a5-b197-62210fadaf66": "What issues does the DOE AI Advancement Council address regarding AI systems?", "6f3964c4-6694-4977-976d-770e0175af87": "How do the AI ethical principles of the Department of Defense differ from those of other agencies?", "34e35d11-aa65-49a1-92b2-ab8eb6f362e6": "What role do US government agencies play in the ethical implementation of AI systems?", "77bca3a1-9687-4e84-8144-d0e76454c55c": "How can policies ensure the responsible use of AI in national security and defense activities?", "adb044f3-b06c-4147-b270-388aa7ce28a1": "How can AI potentially enable malicious actors to access CBRN weapons?", "2cc96dd4-379c-4ea0-8aab-bf6fcb0ee94c": "What are the risks associated with LLMs in the context of CBRN information?", "6b0d4619-0b82-4e50-9d25-50215d1b4223": "How might LLMs facilitate the misuse of biological and chemical threat knowledge?", "7f729371-7c4c-40c9-9651-c84ceaf4ab59": "What did recent research find about LLM outputs related to biological threat creation?", "4bcda5c2-bc2c-4f9b-a31e-c90c1bffe24b": "Are LLMs more effective than traditional search engines in providing information on CBRN threats?", "eab83c61-d25e-47e3-bf55-cec2cf22dd7d": "What are the implications of LLMs for individuals without formal scientific training?", "80e574c2-a56c-4ade-b455-0eca52117b22": "How can transparency and accountability be maintained in the AI lifecycle concerning CBRN threats?", "7af58ef3-a1c9-420e-9eec-b2eecfc7ba18": "What measures can be taken to prevent the misuse of AI in the development of CBRN weapons?", "cb512b7c-e79e-479d-9557-8757c4fda4ed": "How does the accessibility of biological and chemical threat knowledge impact security?", "16c95e95-db9f-48d8-9c3f-fbf32e49bf92": "What are the potential dangers of AI in the context of CBRN information and capabilities?", "34bbf002-cd8d-4886-a398-74853f3697c0": "What are the regulatory safety requirements for medical devices in relation to AI?", "332c9a40-a1fb-448d-9067-d81497b47a04": "How do sector-specific privacy protections impact the implementation of the AI Bill of Rights?", "05415269-22c9-4836-aceb-97f1c11d2051": "What new laws might be needed to ensure the protections proposed in the AI Bill of Rights?", "ac4195ce-e0d5-4720-a4ce-5bcc8078b64c": "Are there any exceptions to the principles in the AI Bill of Rights for law enforcement?", "05f22b03-2ba9-4910-8467-4cb79c9a8206": "How can existing laws be balanced with the new principles in the AI Bill of Rights?", "d202eb6e-03df-49f4-88d4-b160a4626fcf": "What are the practical challenges in applying the AI Bill of Rights to specific use cases?", "02abe32e-eeca-4b60-9b32-22ae7ce787a5": "How does the AI Bill of Rights propose to protect civil rights and civil liberties?", "8afa840c-a2a0-42cb-95b1-8f1b404515e3": "What role does the private sector play in implementing the AI Bill of Rights?", "a0f7a50f-5987-4c5c-98a6-36316797bea8": "How might new policies be adopted to support the AI Bill of Rights?", "817d708c-fe29-43ce-9478-5bc616838321": "What are the competing public interests that might affect the application of the AI Bill of Rights?", "9cb6e467-ad95-44c0-841b-777832cfa4aa": "What are the safety implications of automated traffic control systems in smart cities?", "eb391602-3d68-4cb9-a04a-29b9595203c3": "How do electrical grid controls contribute to community safety?", "c3b9539e-277f-4737-9e5e-e386eeebd7a7": "What role do industrial emissions control algorithms play in environmental protection?", "73302da7-3bef-402f-9bf6-f12df15c536e": "How do smart city technologies impact the safety and efficiency of urban areas?", "58e6a606-3cd6-48fc-9dad-9c6fdc181f83": "What are the ethical considerations of using algorithms to adjudicate benefits and penalties?", "2d6ccf02-270d-43e3-9410-be3e588f9b6d": "How effective are fraud detection algorithms in preventing fraudulent activities?", "22b01cb1-5a1a-4789-8b56-4d7baa49fcc7": "What are the privacy concerns associated with biometric systems used for access control?", "dbb74eb6-3fde-40d5-8df3-bcc934e6a158": "How do access control algorithms determine eligibility for services or benefits?", "7b7be751-057b-4a7f-88ad-cfe0a6255aca": "What are the potential risks of using fully autonomous systems to make benefits-related decisions?", "0a96f62f-6fcc-42d1-a86d-2d07836fa4ad": "How do environmental impact control algorithms help in reducing industrial pollution?", "c12924b2-38c5-459b-8743-4004840152f9": "What is data memorization in the context of machine learning models?", "1ee45afd-2181-4a4b-8885-4e243fe3c9ba": "How do adversarial attacks on LLMs reveal sensitive information?", "064348ce-09cb-4cf5-9b13-efb66fb26273": "Why is it important for model developers to disclose specific data sources used for training?", "07faf7be-a125-488a-8fc9-95d2a01f7983": "What are the privacy risks associated with data memorization in AI models?", "0f2299ca-5f65-45fa-bec4-3d8a5dd6667d": "How can AI models infer sensitive information that was not part of their training data?", "082dc681-2582-47ff-9159-652af70f6d28": "What are the potential negative impacts of AI models inferring PII or sensitive data?", "8476f9de-1c46-484a-9d42-63c47d4e3ddf": "How can users be made aware of whether PII was used in training AI models?", "1930f79d-9ceb-4935-a643-c1d80c961fee": "What measures can be taken to prevent AI models from leaking sensitive information?", "d28999ed-71ad-4d55-afa6-e2d551dffcee": "How does the collection of PII for training data pose privacy risks?", "968af77b-e22e-4bd8-b951-9a663a94877d": "What are the challenges in ensuring that AI models do not generate or infer sensitive information?", "9c573068-8e35-41b2-879e-161babef96ed": "What are the capabilities and limitations of monitoring systems in deployment through TEVV processes?", "ead604ce-72c9-4dc9-a2ec-008dc710ab0e": "How do humans engage with GAI content in decision-making tasks?", "e690c10f-4933-40e7-835f-6034cc551627": "What are the benefits of using overt disclosures in GAI systems?", "ffc79ee0-1f3c-4956-b746-ac94aae246e9": "How can organizations document and delineate GAI system objectives and limitations?", "bf06a17c-7eb3-4ac4-9260-bea0f24eedfd": "Why is provenance data important for GAI systems used in content creation?", "97d2ebc4-8668-4f50-9e9d-4dec70f48766": "What are robust watermarking techniques for GAI systems?", "87372dbb-28be-4288-810b-cdec1b6c9afd": "How can metadata recording techniques help trace content origins and modifications in GAI systems?", "2423bd23-5f6b-4730-a392-af960a751c0b": "What tools and repositories are available for metadata management in GAI systems?", "fd2d6cb9-81af-4234-a95b-65534c17a395": "How can narrowing GAI task definitions to include provenance data benefit organizations?", "845cfb86-2eb6-43f6-bbc3-bfbd925cd21a": "What is the role of structured public feedback in enhancing content provenance?", "4bdb3f89-538a-40e3-9935-8e433ab9e6e2": "What is the purpose of the Generative AI Public Working Group (GAI PWG)?", "6905b587-39e3-4b09-ac42-6cded7de2b9c": "How does the GAI PWG obtain multistakeholder input on GAI risk management?", "13355254-5606-49a9-a3eb-4769a610066b": "What are the four primary considerations relevant to GAI discussed by the GAI PWG?", "e2b55aad-14c7-43c8-bdad-c0dcd2877575": "How does NIST plan to update the AI RMF subcategories and suggested actions in the future?", "ad739be8-7f90-4ee1-817e-e3f5a425b4f2": "What is the role of public feedback in the GAI PWG process?", "ea9cb785-05ef-4f79-8053-0816fff5eb8d": "How does the GAI PWG ensure transparency and collaboration in its process?", "0e7c873d-f587-4cfe-bb7e-5b4877644666": "What is the significance of governance in the context of GAI risk management?", "329c3806-ac7f-48ea-8945-74a6f91b5d0f": "Why is content provenance important in the context of generative AI?", "5a1e1ec3-fda4-453e-ab8e-ce1d9faad797": "What is the importance of pre-deployment testing for generative AI systems?", "5e8537e8-4e6c-41f2-9b53-2a9692305988": "How does incident disclosure contribute to the management of GAI risks?", "5c7e3648-de65-4b1e-b755-1e099598c19f": "What are the key strategies proposed by the Information Technology Industry Council for authenticating AI-generated content?", "d119441d-bce2-47b9-9924-dbf87e4cc817": "How does the concept of algorithmic pluralism aim to promote equal opportunity according to Jain et al (2023)?", "22519aa4-4650-4092-b1b5-e3ac5d8f725f": "What are the main findings of Ji et al (2023) regarding hallucination in natural language generation?", "48b1844d-583d-4e62-908d-889b415b5c31": "How do people typically react to AI failures, based on the study by Jones-Jang et al (2022)?", "cfb67ae3-43b1-4450-9bb3-d7f29aa0e86f": "What factors contribute to algorithm aversion as discussed by Jussupow et al (2020)?", "2c4bf45c-f7e6-46e1-9e67-30c91c0444c8": "Why do Kalai et al (2024) argue that calibrated language models must hallucinate?", "ab2fee39-2a46-49d1-8cb6-55b4167b1b63": "What are the implications of automation bias and algorithmic aversion for the adoption of AI technologies?", "dc7ea38d-faed-4f3d-a7b0-8c1ae79788dc": "How does perceived controllability influence people's reactions to AI failures?", "7ec7f6e0-8490-4dab-ad43-7031aaff916f": "What are the potential solutions to mitigate hallucination in natural language generation as surveyed by Ji et al (2023)?", "3835091e-5a77-48ed-b786-16833c6dad03": "How does the literature review by Jussupow et al (2020) explain the reasons behind people's aversion to algorithms?", "14ce14f2-5924-440f-9f82-1d9518db6f9e": "What are the best practices for documenting GAI risks associated with the system value chain?", "c102bbce-23e6-4b6e-a359-2de7b683d7f0": "How can organizations identify over-reliance on third-party data in their AI systems?", "1bd749b4-cb8c-4f68-951e-4dc05f918076": "What are the key components of a contingency process for handling failures in high-risk third-party AI systems?", "4ba5ee61-6497-4e28-86f2-603d556d088f": "How should incidents involving third-party GAI data and systems be documented?", "17f3b4d5-ed49-486d-af7c-5c8150c925ef": "What are the common risks associated with using open-data and open-source software in AI systems?", "9bf8ec79-8c71-49e3-9cf1-bfcf6e900eac": "How can companies integrate value chain and component integration to mitigate GAI risks?", "d4c8b3c6-aaa0-44ad-89d8-826c0e200e82": "What steps should be taken to ensure intellectual property is protected when using third-party AI systems?", "5724c74c-76af-4944-89d9-d6e91234c261": "How can procurement processes be optimized to reduce risks associated with third-party AI entities?", "a22ca24b-e8c8-49a3-9ddf-5f838e432323": "What are the roles of contractors and consultants in managing third-party AI risks?", "b00e06f3-0822-47bc-8d94-e046c58311d6": "How can organizations monitor and operate AI systems to ensure they are not overly reliant on third-party data?", "2f3330e7-4aca-4708-bbc6-29c775344e94": "What is an algorithmic impact assessment?", "24490295-2bcb-4b3a-b834-033104ba2420": "How can entities ensure accountability in automated systems?", "b0e2eb81-c76f-407d-87a7-bf82bdc5c994": "Why is it important for notices to be timely and up-to-date in automated systems?", "2dca1178-1147-434b-b575-69e710673993": "What should be included in notices about automated systems?", "93bf2a5e-1a9f-4aa4-a500-5dde06657e7c": "How can user testing improve the clarity of notices and explanations in automated systems?", "f79d35e1-9c4c-471c-b976-8c3dae58e572": "What are the key components of Algorithmic Discrimination Protections?", "62517b56-3063-4614-8fd7-d987ee4bf4e2": "How should entities notify users about changes in the use case or functionality of automated systems?", "c53f1315-615a-4dfa-a417-1ea3f2c92e3b": "What are the expectations for reporting in the context of automated systems?", "bd8dd202-37fc-4f0e-9788-e8deb33eabdb": "How can entities ensure that users understand the notices and explanations provided for automated systems?", "fbf6c048-26ac-4ec7-84a1-ae7cc41ebfb7": "What role does research on user experiences play in the design of notices for automated systems?", "38534eb5-4881-4f58-be97-63238c6c479b": "What are the current limitations of existing GAI tools in facilitating CBRN weapons planning?", "b27e8b4f-0fcc-41d9-a4e8-94ac31878ba1": "How can ongoing assessments improve the monitoring of AI tools' capabilities in CBRN contexts?", "991a0d40-0a1c-44ba-b955-3de8eb4bd067": "What measures can be taken to ensure GAI systems do not access sensitive data related to CBRN weapons?", "e84751a4-90f8-413b-b3b6-e91f28305dcc": "How important is it for AI systems to be explainable and interpretable in the context of CBRN risk assessments?", "89c9205b-de28-4888-8c7f-79f9900b731b": "What are the key characteristics of trustworthy AI in the context of safety and security?", "8c67ad80-5cd3-4842-88e6-fc5e71086935": "How can we enhance the safety of AI tools used in high-risk areas like CBRN weapons planning?", "8b30e5d6-0420-4224-b85d-052d8d01a5b4": "What role does explainability play in the trustworthiness of AI systems?", "766d94d6-ff7e-4281-9d69-ba577c2b2228": "How can we ensure that AI systems remain interpretable as they become more advanced?", "0d7ff6a0-f0ab-42d2-bc69-3d81e2146c1a": "What are the potential risks of GAI systems having access to CBRN-related data and tools?", "03c6100c-5fe4-4dff-9b62-da870a412d36": "How can we balance the advancement of AI capabilities with the need for safety and security in CBRN contexts?", "0f4996c3-02e1-4284-94a7-c3695bba5538": "What are the rights of the American public when it comes to opting out of automated systems?", "070d096a-c121-4062-9907-63cb49651bb5": "How can people with disabilities ensure they receive reasonable accommodations instead of automated systems?", "5b9ce146-9406-4f12-8eae-194503dd5c93": "Why is it important for the American public to have a human fallback system for automated services?", "77c334c1-312b-4323-9cf6-971bedb52b19": "What protections are in place for the American public if an automated system fails or causes harm?", "93b3a12a-1c0a-4a3d-8a80-a250055ab49f": "Are there any laws that require a human alternative to automated systems?", "0fcc07cd-f226-4132-b9bb-62b83ccc8c18": "How can the American public conveniently opt out of an automated system without being disadvantaged?", "e6ab4ce6-4166-46a9-8f52-a88ce88d15f0": "What are the potential risks of relying solely on automated systems for critical services?", "61dd4dc8-b98c-4c08-b83e-b7c8eb005bd6": "How does human review provide protection against unexpected scenarios in automated systems?", "28b5c99a-f4e0-41c7-b3c0-57d98c306d61": "What measures are in place to ensure that time-critical systems have a human fallback option?", "15bda80e-6fb3-4e9e-b3aa-343fbfc57016": "Why might an automated system fail despite rigorous testing?", "b17b2a91-c2f2-421a-9161-f577f4f49902": "What is the AI Bill of Rights released by the White House Office of Science and Technology Policy?", "1c591402-7c09-4342-9a4c-3e447029fa73": "When was the Blueprint for an AI Bill of Rights published?", "3f4d1548-89ee-4941-adf0-70889cee50a0": "What is the purpose of the AI Bill of Rights framework?", "3451f2b4-c2cf-4c53-979c-50973acbb1bd": "How can I access the Blueprint for an AI Bill of Rights online?", "d0bc240e-3df8-4e3c-91e6-826ec2f75c9a": "What is the role of the Office of Science and Technology Policy (OSTP)?", "7edebb20-1da2-49a3-b7c1-035d6f51e95c": "When was the Office of Science and Technology Policy (OSTP) established?", "8aeea347-cc31-433e-b950-82f344182a53": "What prompted the development of the AI Bill of Rights?", "e6649708-a5e7-4253-873b-745f594a79a0": "How did public engagement contribute to the AI Bill of Rights initiative?", "550ed3d0-68d5-4c20-9af9-61b217f9c8ec": "What are the main goals of the National Science and Technology Policy, Organization, and Priorities Act of 1976?", "a1df848c-b03a-406d-a467-61ee5c89957f": "Who benefits from the advice provided by the Office of Science and Technology Policy?", "50365036-5406-482d-a4f4-64f43b779b3a": "What is continuous monitoring in the context of GAI systems?", "fe6f7ccc-7cd6-4b50-8a46-8aba632ecc48": "How can feedback from affected communities improve GAI outputs?", "608884ac-731d-4bfa-b339-8c8d39749dd9": "What are some techniques used to evaluate the quality and integrity of data in AI training?", "2372552e-4f0d-4f62-8715-f7845287595c": "What is chaos engineering and how is it applied in AI systems?", "b404ae73-34eb-4434-82b9-0e2c6ab3bf2e": "Why is stakeholder feedback important in evaluating AI-generated content?", "fdc418c4-4bca-46d6-95b4-c7d346c634ca": "What are the benefits of structured human feedback exercises like GAI red-teaming?", "7e203668-296d-4740-807b-996e28110ecf": "How can GAI red-teaming help in managing risks associated with AI?", "fb5db5ae-01cb-4cf0-8c7b-9193091f22ff": "What are the potential negative impacts of GAI systems that need to be monitored?", "4f49fc89-1623-43ba-9f22-18c09e38f9ef": "How can harmful bias and homogenization be mitigated in GAI systems?", "5f86869b-e204-4551-8742-3ac0a5c93510": "What are the key components of tracking and documenting risks in GAI systems?", "92689af0-19a9-4930-8cc2-d7dcf81cda7a": "What are the common issues users face when conversing with chatbots?", "44aae3aa-24ad-4059-9e44-4c37b8d088b4": "How do users typically react to unhelpful responses from chatbots during distressing situations?", "cef22ba7-26f8-4a69-8040-adb5bf18fa6f": "What risks are associated with the creation and public exposure of offensive or hateful language by AI?", "cd099f8e-41d5-45f2-9799-b0ffd6d8b77c": "How can AI-generated denigrating or stereotypical content contribute to dangerous or violent behaviors?", "63ef9f64-d9a2-4b88-9c2f-0c99870fc0e5": "In what ways can the spread of denigrating or stereotypical content exacerbate representational harms?", "ab431b98-c209-4dcd-a050-4a14e1bdb05b": "What characteristics make AI systems trustworthy, particularly in terms of safety, security, and resilience?", "802de2ca-5fa0-4365-94c2-2f48d2551dc3": "What are the privacy risks associated with the training of GAI systems?", "a7780055-4593-4371-baf2-0854c10b4668": "How does the use of personal data in GAI training conflict with widely accepted privacy principles?", "2eb881cb-2d98-4856-b5d4-062d6a883283": "What are the key privacy principles that might be at risk due to GAI system training?", "1835f2f8-1619-4637-a8a9-fb620ed7cb26": "How important is transparency and individual participation (including consent) in the context of GAI system training?", "a5a1e993-48c9-46ea-8808-a32d93da94aa": "What are the ethical considerations for using sensitive data in automated decision-making processes?", "69dbc047-26cf-443a-83ec-63b327f9646c": "How should health insurance companies handle sensitive data provided for payment facilitation?", "1bc73bf1-7e48-46ac-a612-80196cb803e7": "What is the role of an independent ethics committee in reviewing the use of sensitive data?", "c084d257-5a73-46df-8538-5073ed7a01c4": "Why is it important to have periodic reviews of decisions based on sensitive data?", "91cd1699-977f-4d73-8327-5dc62cff847d": "How can we ensure that necessary functions remain optional and not coerced?", "2f1e2c40-8abe-446e-9ab4-5eb6690a678e": "What are the potential risks of using dynamically developing algorithms in sensitive data contexts?", "6ed87438-4cb5-4a0e-8495-7378ba140033": "How should entities handle data that might limit rights, opportunities, or access?", "7cf57cf0-ea86-4802-a27d-d8900db77529": "What are the guidelines for ethical review of sensitive data usage?", "80b69831-2305-4880-94da-f11163809d19": "Why might an ethical review determine that data should not be used even with consent?", "8d50eaaf-61f5-49ac-a319-1c3d968c61a0": "What are the challenges in monitoring the ethical use of sensitive data in automated systems?", "623d8b3e-7499-4bf1-b87b-e7a81b50ee4a": "What is GAI red-teaming and how is it used in AI evaluations?", "b45f8960-4fbb-403a-8415-36afec2cd758": "Why is it important to engage representative AI Actors in structured human feedback exercises?", "64b731e3-b3d6-480b-9718-5237e4c7595a": "How can harmful bias and homogenization be mitigated in AI systems?", "c2a47cbb-9233-4979-8506-436f11a090b3": "What are the key considerations for conducting impact assessments in AI?", "6ce7fe74-d363-4677-b304-583120b4f062": "Why should those conducting human feedback exercises not be involved in system development?", "c44d6e5d-92e9-47d4-a053-1feb8c4ea992": "What roles do AI Actors play in AI deployment and development?", "272d11b8-e3c8-4c56-8cd2-61cec900ff82": "How does data privacy factor into human-AI configuration?", "3cca6e56-8ef0-4ae7-9605-f6f461bb0b87": "What are the responsibilities of domain experts in AI impact assessment?", "b49893d8-5d05-416a-962b-90597b72602d": "How can AI systems be monitored effectively to ensure they meet evaluation standards?", "6e23f927-8119-4bf8-823e-e3ef9d26c60a": "What is the significance of TEVV in the context of AI operations and monitoring?", "723651fb-9c2d-409b-b320-816fae7dec56": "What are the Access Board\u2019s Section 508 regulations?", "75f53b59-e2b8-4540-adea-9e72c300aab8": "What are the technical standards for federal information communication technology in the United States?", "660d930f-69d6-4768-b0a1-44bb673dfacf": "What is the International Organization for Standardization's role in technology design processes?", "ca06fff7-b717-465a-91c0-f20cc32b3902": "What are the World Wide Web Consortium Web Content Accessibility Guidelines?", "0d33d01e-495f-4254-8b8e-d7393c8fa678": "What is NIST Special Publication 1270 about?", "43a76af4-542c-499e-81e3-23edeecb1692": "How does bias in artificial intelligence affect public trust?", "f2963ebd-4268-48bc-99ef-b385c395b833": "What are the three categories of bias in AI identified by NIST?", "6007e7be-7afe-475c-8f2c-d189608a35f8": "How do systemic biases contribute to harms in AI?", "873995b6-97c4-4fbf-9d84-aafae525c908": "What are some examples of statistical bias in artificial intelligence?", "6106aef5-150e-4d9f-b12d-839a1bc64d5e": "How can human bias impact artificial intelligence systems?", "5d31f548-e180-4961-b02c-1026476307a5": "What is provenance data tracking and how does it help in detecting synthetic content?", "41737fda-7f5e-4526-8ecb-d8a560682739": "How can provenance data tracking improve the trustworthiness of AI systems?", "759a70d7-6d44-4ebd-b51f-264e7fd500c6": "What types of metadata are included in provenance data tracking for GAI content?", "10bbc39b-cae2-4ff4-b61a-2c7fa87c5fd4": "How does synthetic content detection contribute to GAI risk management efforts?", "cf616e2b-26ef-43e4-9dc4-b7b9569b9f8a": "What are the benefits of combining digital content transparency with organizational accountability mechanisms?", "9b2af444-94eb-4943-808d-8a01c74a7860": "How can provenance data tracking help trace negative outcomes back to their source?", "110eb68c-1767-4f4d-ba87-2028a2a1105c": "What role does provenance metadata play in improving information integrity?", "1a06806d-fb35-4c87-8809-da5a08874714": "How can provenance data tracking be implemented for different types of content like text, images, and videos?", "5296dde2-bf5c-4f7d-b712-62c552801ef5": "What information can provenance metadata provide about GAI model developers or content creators?", "5b028681-df5b-4e75-a745-ef99535810a3": "How does digital content transparency uphold public trust in AI systems?", "273f9105-5fd9-4850-ad10-dd9a9b8cc778": "What are some potential risks unique to the development and use of GAI systems?", "f2015027-8be4-41af-9324-6b28bc14ba10": "How can organizations identify and manage risks associated with GAI?", "1c39f437-06bb-4fe4-9973-5e2bdc4a8cb2": "What does it mean for a risk to be labeled according to the outcome, object, or source of the risk in GAI?", "1e85d184-b2c1-4ff6-af08-8ea57ec19ad9": "How are the risks of GAI mapped to Trustworthy AI Characteristics in the AI RMF?", "e0add861-37cb-4915-902c-04c52bc85356": "What are some examples of risks \"to\" a subject or domain in the context of GAI?", "b2c9c8be-32f2-487a-8dcc-b0edbbaec345": "What are some examples of risks \"of\" or \"from\" an issue or theme in the context of GAI?", "553df791-945d-4f86-8985-dd794f43f520": "How can future updates help in managing the risks associated with GAI?", "39d40845-848b-4f07-ac52-444d6f6dc6e4": "What role do Trustworthy AI Characteristics play in managing GAI risks?", "d8eb671f-52dc-4612-9671-cf9c05b5beae": "How can organizations categorize risks based on their unique approaches to risk definition?", "6d2fc2bb-39a1-4e50-b2f9-1579f87f3239": "What is the significance of Appendix B in the context of GAI risk management?", "295453ca-009a-489b-9a3b-7f39d0aede8a": "What is the NIST Privacy Framework and how does it help organizations manage privacy risks?", "8a36fef4-811b-4fdd-80eb-a3d79e7b5155": "How can organizations use the NIST Privacy Framework to support ethical decision-making?", "417512e6-a1fb-48c7-b125-859ffc865ba6": "What are some benefits of adopting the NIST Privacy Framework for managing privacy risks?", "39ea0fdf-a276-42b5-911b-4ec55c6e14c9": "Why did the school board's surveillance attempt in Lockport, New York, lead to a state-wide biometrics moratorium?", "2d51294e-3f63-42ce-b8e7-80d8791bacfc": "What are the implications of the biometrics moratorium in New York schools?", "28211d1f-6063-4fe9-8a8f-56d2e0c9bd80": "How does the NIST Privacy Framework help organizations demonstrate compliance with privacy laws and regulations?", "ffd0d540-b753-4764-918d-866af54215a0": "What sectors have voluntarily adopted the NIST Privacy Framework?", "a4e0cabf-38aa-4b49-920c-c6d52520a8fa": "What measures are included in the NIST Privacy Framework to identify and communicate privacy risks?", "c63477fe-7ddc-4df4-8341-f70bcccc16f6": "What was the community's reaction to the school board's surveillance plan in Lockport, New York?", "916eda70-3efa-4870-9261-7badb6422454": "What is the significance of the July 1, 2022, deadline in the New York biometrics moratorium law?", "5928ad96-89e5-4214-a148-421437e5e16a": "What does it mean when a document does not supersede existing statutes or regulations?", "ccf1c7b4-aec1-4f63-b6b5-ee4c5681bfdd": "How does this white paper affect the interpretation of international instruments?", "3320cd92-d86b-448f-aca0-3a32e873afe7": "Are Federal agencies required to comply with the principles described in this document?", "eee8169c-4314-45e2-88bc-1d4fe146f56b": "Can the principles in this white paper influence the US government's position in international negotiations?", "8a46d200-e514-4172-a7b4-199a50cfb153": "What are the implications if the principles in this document do not meet existing regulatory requirements?", "4e95d3b7-08c9-4644-87d2-e0c03b0312ea": "How do these principles impact lawful activities of government agencies, such as law enforcement or national security?", "de79da97-0834-4ee6-b1f4-eb364ebbe1ac": "In what ways does the context of automated systems usage affect the application of these principles?", "ef810a8b-abbe-45ac-b82f-47d20dc71885": "What should be considered when applying the principles of this white paper to automated systems?", "b0f0f94e-aa95-48c9-8e54-5d27b11c1ddd": "How might the principles in this document interact with existing policies and regulations?", "a5863708-81c6-4a01-a207-62001c6f482f": "What are the limitations of the principles outlined in this white paper regarding government agency activities?", "b613cee8-31a4-4979-8cd1-7547aef01a05": "What are the benefits of using technology to push benefits to those in need?", "93e8f04f-3a3f-4daa-8c55-9a697403cf18": "How can automated decision-making systems ensure positive outcomes in benefit distribution?", "4d63f90f-ffcf-461e-b0b2-d7443e9e0e53": "Why is it important to provide clear notice and explanations about the use of technology in benefit systems?", "87ebbea1-b1c2-4928-8cc9-72bec3599f01": "What are the potential risks of using technology to take supports away from people who need them?", "383af2c3-7350-4f63-b41b-480703a2f318": "How can human-driven mechanisms ensure the timeliness of benefit payments?", "db698d0d-61cc-4055-82ba-9e9f070b90a0": "What emerging technologies are being used in the healthcare system?", "864c0429-b586-4dfb-9fd9-1122c038f71f": "How can consumer products related to health benefit from current technological advancements?", "512bfcf4-61ac-46a5-bc16-91f0073ecef3": "What role does the White House Office of Science and Technology Policy play in healthcare technology?", "cf6c1f99-aa2f-4487-bf62-4d84073869b1": "How does the Center for American Progress contribute to the discussion on healthcare technology?", "3ab8040e-18b6-4400-94bc-7f7d0d4b1b2b": "What are the responsibilities of the National Coordinator for Health Information Technology in the US Department of Health and Human Services?", "789a4e6b-1b5f-4a2e-be70-4c60b2e169ef": "What are the key expectations for automated systems in terms of technical standards and practices?", "d904c6a8-b7ae-43d7-8287-35c070601ea2": "How should derived data sources be tracked and reviewed in automated systems?", "b604912b-4408-4b2b-8eb6-c1c90a9637c8": "Why is it important to identify and track data derived from algorithms in automated systems?", "17717bc2-52ea-4d65-b273-956e440e43ef": "What are the potential risks associated with derived data in automated systems?", "26da822d-31fe-48b3-9ca8-50155d6fa977": "How can feedback loops and compounded harm be prevented in the use of derived data?", "c5d39949-dbb6-4c5e-b00a-b6fd7788ae1d": "What measures should be taken to validate derived data against collateral consequences?", "cfea209a-9830-4e58-b251-1f70f42f93f1": "Why are data reuse limits important in sensitive domains like criminal justice?", "7a516f8b-a3c1-4579-a094-1302e810e841": "How can data reuse in new contexts lead to the spreading and scaling of harms?", "acc90406-7fa4-487f-a601-6e3dc23145a9": "What are some examples of sensitive domains where data reuse should be limited?", "3a91cd21-aa78-4c15-a216-a24057643605": "What practices should be implemented to ensure the safe and effective use of automated systems?", "8363b99c-85ab-4773-86a2-f63cac3d051a": "What are the key responsibilities defined for periodic review of content provenance in GAI systems?", "f87db50c-422f-4529-a21c-78abd2d10fab": "How often should the periodic review of the risk management process be conducted for GAI systems?", "34ed1fca-1d3e-4fdc-b3a4-9281c24dd99e": "What organizational roles are involved in the incident monitoring for GAI systems?", "4d916dd9-762b-4031-b912-5fc4222557c2": "What are the suggested actions for improving information integrity in GAI systems?", "cb1a51e1-502e-4c12-b98b-10353458c6e6": "How can organizations establish effective policies for after-action reviews of GAI system incidents?", "09aabd10-bf20-4e36-a1af-b2389c95fefe": "What is the importance of maintaining a document retention policy for TEVV in GAI systems?", "16ef8d11-a13c-44d4-a88e-868f36377f22": "How do after-action reviews help in identifying gaps in GAI system incident response?", "abb1af0f-005f-408f-aa68-27d64a96a71e": "What are the procedures for updating incident response and disclosure processes in GAI systems?", "681af4b6-c008-47ac-87ac-859d9849d77d": "Why is it important to have digital content transparency methods for GAI systems?", "c32d437f-b5b4-4d28-b7f8-1a2de95eb23b": "What tasks are associated with governance and oversight in the context of GAI systems?", "2792f4ff-31f6-48c7-9b05-a903e1fa4fdf": "What is the NIST AI Risk Management Framework?", "b4374d8c-f461-4c57-8c4e-4d9975359384": "How does the NIST framework aim to manage AI risks?", "cd79ee78-f93f-4750-875f-33a59251f574": "What are the key characteristics of trustworthiness in AI according to the NIST framework?", "0135028f-ffa7-4189-92d1-0888059657c4": "How is the NIST AI Risk Management Framework being developed?", "7c54c8a4-972c-48f7-9388-9640bbc920ce": "What role does Congress play in the development of the NIST AI Risk Management Framework?", "7ab73fee-d39b-41c6-8cd8-222b44049d82": "How can organizations incorporate the NIST framework into their AI systems?", "579d7a98-c43c-4d11-b24e-6d7275e820fa": "What are the benefits of using the NIST AI Risk Management Framework?", "427a0cc8-5c14-4b43-bcd0-372317b5a8e7": "How does the NIST framework address privacy concerns in AI?", "e7555614-9fb9-4861-b164-85033e7aeace": "What opportunities are available for providing input into the NIST AI Risk Management Framework?", "5c5b27b8-e80f-4e87-80a8-dda89bca7b12": "How does the NIST framework ensure the explainability and interpretability of AI systems?", "4436c96e-b176-4846-b3be-120e574bbaae": "What are some examples of unintended harms caused by automated systems?", "53134aa2-f21c-4ea7-be6f-6b32937c51f0": "How can companies ensure that their development decisions are ethically vetted?", "89297186-beb5-4d42-b727-46821b71a4e0": "What are the benefits of pre-deployment testing and ongoing monitoring for automated systems?", "ae608ad7-1d56-4091-a89f-36c7c99c6c23": "How do public consultation processes help in the deployment of new automated systems?", "666e61ac-70db-4dc3-8657-e4c29d144123": "What existing practices protect the American public from potential harms of new technologies?", "98137aae-d585-4b93-b931-6c0f0a022772": "Why are proactive protections important for the use of automated systems?", "18d231da-3925-4aa0-8358-ad67f4ce8c1e": "How can expanded protections increase public confidence in automated systems?", "9f500df2-ad6b-42ed-8645-8a9e917bdf80": "What role do ethics reviews play in preventing harm from automated systems?", "8d4c69ae-abc4-4f4b-961e-5d0f8194de75": "How can governments improve their consultation processes for new technologies?", "4097b122-a3b3-4718-8577-48d550199b62": "What are the challenges in implementing safeguards for automated systems?", "1a442f2d-3986-4d5f-ab68-0ad589163a65": "What is AI-enabled \"nudification\" technology?", "d1108cc8-95d4-4e06-83da-0a5667e1679b": "How does AI-enabled \"nudification\" technology impact women disproportionately?", "0141b7f8-c7ab-497a-af5e-f0ad4e4ff34e": "Why is it difficult for both humans and AI to detect inauthentic images created by \"nudification\" technology?", "04c27c24-9db1-48ea-9b81-876c36344898": "What are the potential personal and professional consequences for victims of non-consensual intimate images?", "e6564ab8-5109-4a91-9546-08eef6406338": "How does the proliferation of AI-enabled \"nudification\" technology contribute to image-based abuse?", "6c438bca-254b-4856-9607-ce37450c6154": "What measures can be taken to protect individuals from AI-enabled \"nudification\" technology?", "b754ed6a-d92e-4edb-a8bc-d0eea1d29626": "How does the experience of harm from non-consensual intimate images affect victims' mental health?", "56c9eed6-f23c-499e-8bc0-52b33a7b3e1b": "What are some examples of apps that enable non-technical users to create or alter images without consent?", "6f321352-c8fc-4909-9996-33dc8382f6aa": "How can the authenticity of altered images be verified to prevent image-based abuse?", "5398f151-8b5f-42f5-b601-0b47b2d7ba49": "What steps can be taken to make AI systems safer and more effective in preventing image-based abuse?", "57eb4cf6-01d6-4cae-b638-975a2a0f2656": "What are the key findings of Virginia Doellgast and Sean O'Brady's report on call center jobs and worker stress?", "8a32a722-7f7e-40c1-967a-6c5bd05a1c88": "How do management practices impact worker stress in call centers according to the CWA report?", "e1b53808-6b10-44e0-882d-ba92d299d675": "What recommendations does the Federal Trade Commission make in their 2014 report on data brokers?", "cad08b72-46c0-40b7-bbf7-613ff0ab5f7f": "What are the main arguments presented in Cathy O'Neil's \"Weapons of Math Destruction\"?", "4da03091-d495-4661-9c76-eca9b1c37a74": "How does the US government use social media surveillance according to the Brennan Center for Justice report?", "53d8542f-d4e3-4349-bb91-b058abf20736": "What are the implications of Shoshana Zuboff's \"The Age of Surveillance Capitalism\" for privacy and human rights?", "08b49066-3db9-4bb5-97a1-46a2adb0324f": "How does the Brennan Center for Justice report describe the extent of social media surveillance by the US government?", "ded819f0-0467-4497-a257-4b727219d0dd": "What are the potential risks associated with data brokers as highlighted by the Federal Trade Commission?", "51e0e9cc-e3b9-47a7-8bf8-405f02fa04e6": "How does \"Weapons of Math Destruction\" address the issue of algorithmic bias?", "aec903ac-6dcb-450f-ad0d-c664a413a4b9": "What solutions does Shoshana Zuboff propose to combat surveillance capitalism?", "8d838d90-aa87-4ecc-9abc-0fcfa5a4d35d": "How can we protect the public from algorithmic discrimination in a proactive manner?", "a4c16234-c64d-454b-98d2-55d8906e1040": "What is a proactive equity assessment in the design phase of technology?", "33c734e9-1fa0-4bd1-b225-487ee53c2b33": "Why is it important to review potential input data and historical context during the development of automated systems?", "da018beb-b83d-4eaf-8c14-30d735aac789": "How can we ensure accessibility for people with disabilities in automated systems?", "93044246-0649-4ba3-b8b1-355b186025a1": "What societal goals should be considered to identify potential discrimination in technology?", "df01e2f4-1283-4220-9353-4573fe7a1203": "Who should be included in the equity assessments for automated systems?", "98fd7a77-ecb9-40ff-b731-37710588af27": "How can we address potential discrimination against Black, Latino, and Indigenous communities in technology?", "37017d6c-9757-463e-b5e4-7df7f5a6a2cc": "What steps can be taken to ensure equity for Asian Americans and Pacific Islanders in automated systems?", "cc45a9c2-e146-4637-ae51-762e5cf9a713": "How can we support members of religious minorities in the design of automated systems?", "a04f1bd4-fb2f-4020-a56f-07c5d12ee11c": "What measures can be implemented to protect women, girls, and non-binary people from algorithmic discrimination?", "21b9f512-20cd-4b96-a8e5-824c5d788b9e": "How can continuous surveillance systems impact civil liberties and rights?", "cbc08ff9-844c-48d2-bd2f-d965463662ff": "What are the risks of using identity-related information in surveillance systems?", "62b31c74-49b3-45d8-958c-175dffe5e8da": "Why should continuous surveillance not be used in public educational institutions?", "2eba3ccc-3d6a-46ae-bebc-6cf52aea05da": "How can algorithmic discrimination be avoided in surveillance systems?", "a81db3fd-a4bb-4dd0-a446-4b3c34313263": "What are the potential consequences of using biometrics in surveillance?", "cc016098-a84b-4347-a791-d173694b1fd8": "Why is it important to limit the use of location-based inferences in surveillance?", "61262c75-27f1-4742-a14d-7dfbb43c1f6b": "How does continuous monitoring in workplaces affect employees' rights?", "48be4dc1-8648-4ae2-b813-f08d7321d697": "What measures can be taken to prevent surveillance from limiting access to critical resources?", "d3c26eb4-5404-4906-8f26-12a930b32e4b": "How can social network data be misused in surveillance systems?", "5cd15272-af41-4e9c-aa26-42043ce6088c": "What are the ethical considerations of using surveillance in public accommodations?", "7869bc66-d4ef-4fc7-9897-351302b9eae5": "How can automated systems impact the eligibility for Medicaid-funded home health-care assistance?", "c13ee199-2840-46a1-ab92-c45e02f2f6b6": "What are the potential issues with using algorithms to determine eligibility for social services?", "d612ed72-1fb8-4bca-9b74-b5243bf640ca": "How can the lack of transparency in algorithmic decisions affect legal cases?", "bbecd72d-7534-478d-a686-94b4ed9cb7c8": "What are the challenges faced by lawyers when contesting decisions made by automated systems?", "68914712-215e-4582-8129-f1f7db119cc4": "How can individuals be informed about data collection and algorithmic assessments in child welfare cases?", "5f684140-09b8-47a2-8459-aa9c40072ce2": "What are the ethical concerns surrounding the use of algorithms in child maltreatment risk assessments?", "b0deab21-1ef7-4bb8-bb66-bd0567448753": "How can the use of algorithms in social services be made more transparent and understandable?", "e6417fe3-22a8-4536-89de-555e6a9a171f": "What steps can be taken to ensure fairness in algorithmic decision-making for vulnerable populations?", "12de6c9e-e24e-4576-9d3c-1fcbd6ccc54b": "How can parents contest decisions made by algorithms in child welfare investigations?", "522de666-2af7-4927-bf75-9d3efd25b34d": "What are the legal implications of using algorithms without notifying the affected individuals?", "fe8e6e24-ac4f-4570-bb59-e53149f51c49": "What is the purpose of the overlapping framework of rights mentioned in the context?", "07b9522e-0eac-4459-85e1-b511e6b00056": "How does the Blueprint for an AI Bill of Rights aim to protect the American public?", "55277100-414c-49c6-84ac-e6a011795719": "What are some examples of protections already required by the US Constitution mentioned in the context?", "d6a87b9f-e5f6-44ac-bc18-b4f1bc5515a9": "How should measures be taken to realize the vision of the rights framework?", "fc2524b6-ed78-43d5-b6fe-a01bdd53e031": "What is the relationship between the Blueprint for an AI Bill of Rights and existing US laws?", "fd764fa7-268d-48fb-9ec7-3be9aa2eb183": "How does the context describe the role of judicial oversight in government surveillance?", "202fc607-7128-470f-aed2-e848351cae24": "What does the context suggest about the proportionality of measures taken to protect rights?", "e5097d86-a6bc-49d4-86a4-d058c5b193a1": "How does the Blueprint for an AI Bill of Rights envision the future use of automated systems?", "e633d508-d253-4b7f-bec5-75b11962e65e": "What are the principles described in the Blueprint for an AI Bill of Rights meant to ensure?", "742f04aa-415c-4e14-98b9-5298de43c47d": "How does the context relate to the protection of opportunities and access for people?", "e1fd4aa1-c10d-4f71-bf77-b4cca9a6e0ed": "What are the best practices for regularly assessing and verifying the effectiveness of security measures in AI systems?", "9fbe4be3-06da-42bf-a241-95fc342604db": "How can organizations ensure that their AI security measures have not been compromised?", "f1082088-e78b-42d5-9285-ae15731354be": "What methods can be used to compile statistics on policy violations and intellectual property infringement in AI systems?", "e6f71c20-417c-46ac-8a42-10c97c50760f": "How should transparency reports be analyzed across different demographic and language groups?", "2f6a4b04-9c59-4bad-8823-c165db75adf3": "What are the key components to document when giving instructions to data annotators or AI red-teamers?", "a64475ac-8c2d-48af-b278-eb91c933268d": "What digital content transparency solutions are recommended for documenting instances of content generation, modification, or sharing?", "0d873b42-d24d-4da1-89ff-6ccc15e84b6b": "How can organizations address risks associated with transparency and accountability in AI systems?", "97de1dbe-9a2c-4a3a-9a0d-842bf1c1ad55": "What are the common risks related to intellectual property and harmful bias in AI systems?", "7e629132-bcc2-46fa-9815-95ccd889c845": "How can the effectiveness of AI deployment and impact assessment be monitored?", "070a1044-27d3-4b31-aa33-23e39882c9ab": "What role do domain experts play in the operation and monitoring of AI systems?", "10ecba85-50b5-466c-92fc-326759a2a5ee": "What are formal methods in the context of automated systems and machine learning?", "43072003-14fe-4f66-94c2-9ac121ef7717": "How does the Designing Accountable Software Systems program ensure legal and regulatory compliance?", "98486804-fb18-4583-ab43-67b07b08efa6": "What are the transparency and validity requirements placed by state legislatures on pretrial risk assessments?", "54358f22-1102-42f3-b0f2-7227b0a0f0c2": "Why are civil rights groups concerned about the use of algorithmic pretrial risk assessments?", "ffba1d9d-ebb6-4955-a4fa-839e180301ef": "What does Idaho Code Section 19-1910 require for pretrial risk assessments before they can be used in the state?", "99d80794-a2ea-449b-9af4-62646f316051": "How can a pretrial risk assessment be shown to be free of bias according to Idaho Code Section 19-1910?", "07f00fa3-c821-41ec-9ff1-a781bd1bd55d": "What steps must a locality take to validate a pretrial risk assessment as free of bias?", "e9dee9ec-012d-4ba0-a878-5dec4184c9a2": "What are the implications of the requirement for all documents and records related to pretrial risk assessments to be transparent?", "b922e48a-8d8e-4ae8-807f-5bb7477dd82d": "How do formal verification and analysis contribute to the reliability of automated systems?", "75050f5f-7cfc-4adf-a91a-1d7fb4185c7c": "What methodologies are considered rigorous and reproducible for developing accountable software systems?", "6b983f53-78fb-4bd3-be9a-cb6deb436d63": "What are the key components of a certification program for managing GAI risks and interpreting content provenance?", "550b57aa-5cc7-44ec-b07d-c62cd7dd4668": "How can existing training programs be adapted to include modules on digital content transparency?", "65a3901b-9b76-4dcd-b3ba-d04968302b14": "What methods can be used to evaluate whether GAI operators and end-users understand content lineage and origin?", "fe52b3aa-3245-48cc-86da-8f3f3225b4cf": "Why is it important to delineate human proficiency tests from tests of GAI capabilities?", "b3b5db46-0009-42b6-9465-4b5f30fbe624": "What systems can be implemented to monitor and track the outcomes of human-GAI configurations?", "530d41df-1d98-428a-9449-1e199472e781": "How can end-users, practitioners, and operators be effectively involved in the prototyping of GAI systems?", "68889efa-4505-424f-ae65-dc462fc55044": "What are the risks associated with human-AI configuration and information integrity in GAI systems?", "5149446a-0fef-433d-a2ae-bfa7865ae0d0": "How can proficiency in managing GAI risks be tested and certified for specific industries?", "0fba5c84-2bcf-4be8-9684-d1904eb861bc": "What are the benefits of continually refining and improving human-GAI configurations?", "5634f37b-9702-4b53-8dce-bfb21a101d03": "How can information integrity be maintained in the context of GAI systems?", "8eb9d5d5-af51-4376-9b9d-1297edd11496": "What are the potential risks of using biometric markers in school exams?", "eaf23276-45c4-465b-a0a1-c347a8fc5b76": "How might biometric systems in schools affect students with disabilities?", "451adf54-6336-4465-b4de-b6390b383b02": "Can location data from data brokers be used to track visits to sensitive places like abortion clinics?", "f2489a19-b9ce-4941-a3e9-154a0ecad928": "What kind of student data do companies collect to forecast student success?", "317d1c65-7185-452c-878a-1b16e14d3e4d": "Why are parents concerned about the collection of sensitive student data without consent?", "474c5dc3-a92f-40c7-b290-7529f751d77b": "What are the transparency issues related to the use of collected student data?", "2e297de2-2657-4560-9b39-b6316e39d5b4": "How can the collection of demographic information and other sensitive data lead to discrimination in schools?", "38c93c3c-8f95-4361-80e3-d401cdfb7a38": "What are the implications of employers transferring employee data to third-party job verification services?", "7a8d09ca-630b-49f2-921f-eb1e2dffc47c": "How do schools use data on free or reduced lunch status in their assessments?", "2b74ec7f-f8f1-40ba-b970-2b4fbc944395": "What are the ethical concerns surrounding the use of student data for predictive analytics?", "839d62aa-6521-47db-b195-5f55f3715e07": "What mechanisms can be used to sustain the value of deployed AI systems?", "8c59f600-8f81-40e2-8f76-cad08254ac3a": "How can organizations compare AI system outputs against predefined risk tolerance guidelines?", "5a461ec3-611e-43f6-af2a-7a7f88454170": "What are the risks associated with AI-generated content, such as CBRN information or capabilities?", "16e67bb6-1d80-4b1d-b60d-cda158c2d378": "Why is it important to document training data sources for AI systems?", "ddf72159-1801-49ab-a221-531088d69221": "How can feedback loops between AI content provenance and human reviewers be evaluated?", "f24bf2cd-d91c-4662-bfd9-31b03791b58e": "What are the benefits of implementing real-time monitoring systems for AI content provenance?", "c0e0ee49-2ab6-4eab-bafe-fec66c3023af": "How can organizations ensure that AI-generated content does not include obscene, degrading, or abusive material?", "c7548d81-fe0d-4316-a260-2d342818a5c6": "What steps can be taken to prevent harmful bias and homogenization in AI systems?", "91e539e0-ee4e-4e3b-bc8d-b3794ca3798a": "How can dangerous, violent, or hateful content generated by AI be managed?", "4c18144e-d6a3-4d13-a836-e55c43d2c5e2": "What is the role of information integrity in the context of AI development and deployment?", "18472313-2f95-4239-88f3-4a75df9cf246": "What are Model Cards for Model Reporting and why are they important?", "18e201b7-192f-4ea2-934b-df1184862535": "How do adverse action notice requirements under the ECOA and the FCRA impact consumers?", "07cfac24-a2ea-4bef-b577-15ed4935a4bd": "What information is provided in the FTC's guidance on using consumer reports for credit decisions?", "86d77516-8a49-4005-97e2-fef5020a40b3": "How does the CFPB act to protect the public from black-box credit models using complex algorithms?", "b2ac04a5-d89c-4fc8-8023-6bb6b8a814ab": "What are the key takeaways from the Conference on Fairness, Accountability, and Transparency (FAT* '19)?", "03bc6211-f0ad-4a75-b7aa-92f96064ce4b": "What are the adverse action notice requirements under the ECOA and the FCRA?", "75513281-57cf-4e3f-8495-cafc4e79d242": "How does risk-based pricing affect consumers according to the FTC?", "28e98921-df31-486e-a4bf-cd2a8f018ee4": "What measures has the CFPB implemented to address issues with black-box credit models?", "1942efe7-0de7-42dd-9f0b-23cdddc6969d": "Why is transparency important in credit decision algorithms?", "ba2751db-a092-4455-89ce-60ce0f2e5ff0": "How can businesses ensure compliance with adverse action notice requirements?", "43ab2e49-8484-42a0-b907-a326016b5fba": "What are the potential risks during the design stage of the AI lifecycle?", "71c80195-c590-4a6a-8455-cc14b7d76775": "How can risks during the deployment stage of AI be mitigated?", "bac1fcc0-33c2-4bd1-bcae-206282b1aebf": "What is meant by \"algorithmic monocultures\" in the context of AI risks?", "ded49efb-8ec5-43c7-90a3-414f486b61aa": "How do risks at the ecosystem level differ from those at the individual model level?", "aa5a5372-d095-4583-8dd2-04b30b8d377b": "What are some examples of risks that can arise during the operation of AI systems?", "d364a35e-d9b1-4892-aeee-562d6fe3fb16": "How can human behavior contribute to the risks associated with AI systems?", "8e058945-6592-48f2-a7c0-b29f59239765": "What are the implications of repeated use of the same AI model on labor markets?", "0586fc63-c513-4afc-a447-0f010dfa2c07": "How can the decommissioning stage of AI systems pose risks?", "5723f355-fc6d-4188-a997-6fc9df87d7e0": "What are the potential impacts of AI on access to opportunity and the creative economies?", "b1450fe5-2a1b-49c3-8181-b0908b22ec94": "How can risks from GAI system outputs be identified and managed?", "d6dc8fa5-06dd-4cb6-b9e1-3d99509fd36f": "What are the standard risk controls that can be applied to proprietary GAI technologies?", "43f41ff8-54b8-405b-97e6-387ac8c232c9": "How can organizations ensure third-party transparency when using GAI systems?", "def9231f-04c4-40b3-a87d-4a5d13f4664e": "What is the role of software bills of materials (SBOMs) in managing GAI system risks?", "0abe2ec1-662d-4cd9-8f9a-28ce9be69bbc": "How do service level agreements (SLAs) contribute to risk management in GAI systems?", "36dbeaca-841b-42b4-a5fc-2f1795f02564": "What is the importance of statement on standards for attestation engagement (SSAE) reports in GAI systems?", "e49097b7-7ffa-4581-8515-1b30cfa7d7b0": "What are the challenges in risk mapping and pre-deployment measurement for GAI systems?", "f51fa6cc-e9f9-4425-b85d-44db74827ce4": "How can robust test, evaluation, validation, and verification (TEVV) processes be applied to GAI systems?", "b85af260-3cfa-4a9c-bc21-802cd691f871": "Why is it important to document TEVV processes in the early stages of the AI lifecycle?", "7444a8c8-575f-4345-84f3-0124561379dd": "How can acquisition and procurement due diligence help in managing risks associated with GAI technologies?", "84e38c59-60d0-4319-81ad-3888d9b12506": "What are the benefits of involving representative AI Actors in the TEVV processes for GAI systems?", "80255fc3-d877-468b-97e9-7be818ed20da": "What is the AI Risk Management Framework (AI RMF) 10 for Generative AI?", "9516ed5a-2087-413e-925b-26135b0f5d06": "How does the AI RMF 10 aim to improve the trustworthiness of AI products and services?", "6fab3d01-f156-429d-831e-4ea194ff08bf": "What is the purpose of President Biden\u2019s Executive Order 14110 on AI?", "91adbba6-9d02-4f12-8ca2-25bf46b3776b": "When was the AI RMF 10 released?", "bbdcadfc-3db1-46fa-85d2-e443a126d455": "Is the use of the AI RMF 10 mandatory for organizations?", "36c3329b-2a66-4361-b5f7-7ce8ade47cec": "How can organizations benefit from using the AI RMF 10?", "aae224f2-ef20-4cd2-8f53-3e748c2dac1e": "What are the main components of an AI RMF profile?", "150d6339-723a-443d-bc9f-4b1be5ba3fb1": "How does the AI RMF 10 help in managing AI risks?", "3779173f-0285-4630-b333-78420055bd18": "What considerations are taken into account in the AI RMF 10 for Generative AI?", "2a0ab6da-3f10-4297-bb6a-887732c4d6be": "How does the AI RMF 10 align with legal and regulatory requirements?", "66823427-2ffd-4c77-909f-5289272bdb4c": "What is algorithmic discrimination and why is it a concern?", "0ab73255-60d0-4d92-ac1c-3d0c7a6949a9": "How can automated systems amplify existing inequities?", "feeecc56-069d-45a3-88eb-3f4d74890fcb": "What are some examples of algorithmic discrimination in facial recognition technology?", "88d366bd-b700-4c98-a848-aedce1be01ee": "How do hiring algorithms contribute to discriminatory decisions?", "00fc2bf2-fbad-4580-bd4a-05669d18fc7c": "In what ways can healthcare algorithms be biased against Black Americans?", "2844838c-8c42-4c48-a44f-63715f685e1d": "What are the potential consequences of data that fails to account for systemic biases?", "36ef20ee-eb6e-4f4e-9d65-e279ee7920d6": "How prevalent is algorithmic discrimination across different industries?", "3302a720-0035-40c3-a9b6-017c0952d77d": "What protections are in place to address algorithmic discrimination?", "1a75a965-db4c-48cc-a1ac-7e4bcb22e7a6": "How can we ensure that automated systems do not perpetuate existing biases?", "0e9714b8-1ef7-4875-9d9f-858297cad936": "What steps can be taken to mitigate the risks of algorithmic discrimination?", "69b6d24d-4f77-4453-bd79-e8af8137c7e1": "What are healthcare navigators and how do they assist people in obtaining health coverage?", "d3ffb77a-2b83-4c99-9923-bd42f0684d90": "How did the Biden-Harris Administration support healthcare navigators in 2022?", "575075f0-9aa1-4135-87b7-2b392d2cda2b": "What kind of training do healthcare navigators receive to help consumers with health coverage?", "c53668a5-710d-4526-98b9-af2788d4b125": "How do healthcare navigators help small businesses and their employees with health coverage options?", "84bee82d-ff9b-4d9e-8a7a-b6cbcb4db569": "What are some real-life examples of laws and policies that protect rights and access to healthcare?", "d74ae40a-97e3-4c00-bef1-b09032380ffd": "How do practical technical approaches help in protecting opportunities and access to healthcare?", "7b48a310-6aed-4f43-a279-f37e09939bba": "What is the role of sociotechnical approaches in ensuring healthcare access?", "3db05dac-a97a-4b1e-9652-d76803a23c04": "How can increased funding for healthcare navigators impact uninsured consumers?", "984009d5-34ae-416b-b936-3f783743a025": "What are the benefits of having more trained and certified healthcare navigators?", "1fcf65a3-0d6b-4145-99cc-0bf2c3235ad1": "How do healthcare navigators assist with completing eligibility and enrollment forms for health coverage?", "66fd1727-6e57-49a1-ae65-9fdb471abd5a": "What is the role of the National Science Foundation (NSF) in AI research?", "4d10f264-ec1c-496e-8dbb-69f985ae5cf1": "How does the National AI Research Institutes contribute to the development of safe and trustworthy AI?", "4f34a0ae-db65-487e-8242-a6f5458fa9de": "What are the key principles of the AI Ethics Framework for the Intelligence Community?", "4c4c9829-d71f-4438-a202-eaef5533887c": "How does the Cyber Physical Systems program support AI research?", "90a56106-7089-4755-973a-da4912ed88d1": "What kind of research is funded by the Secure and Trustworthy Cyberspace program?", "0a37c463-1465-453d-bbbe-74e8a11fe10f": "How does the AI Ethics Framework help implement ethical principles in AI development?", "bc9e1c96-6a89-4d9b-b96c-71acb36031e0": "What are the main goals of the National AI Research Institutes?", "037f5eee-4dd0-4bed-9151-ef0be3018604": "How does the NSF ensure the safety and security of AI systems?", "06f79190-3996-4bfd-86e5-6065a4dc96ee": "What is the importance of explainable AI algorithms in the context of the Intelligence Community?", "3df54901-e507-45fe-bd82-154e2d31aae0": "How do privacy-enhancing technologies contribute to the security of automated systems?", "c4da0ee6-b97a-4cac-839c-667bdb97a9d0": "What are the key expectations for automated systems to ensure they remain safe and effective?", "37843ed7-8a1c-4d60-818a-72b161e25abf": "How important is ongoing monitoring for the performance of automated systems?", "0944ea5d-dbdd-4f95-829a-6caacde3bed0": "What procedures should be in place for recalibrating automated systems?", "12d5b6aa-803b-4df4-8a6d-4fce86fdf65e": "How can automated systems be continuously evaluated for performance metrics and harm assessments?", "643c631e-a495-4f76-ac44-fc0226061601": "What steps should be taken to update and retrain machine learning models in automated systems?", "6fbfb445-cd6f-4999-b0b3-48d65d58c0e6": "Why is it necessary to have fallback mechanisms in automated systems?", "9c0537b0-0ed3-4084-aefd-70d1001d512a": "How do changing real-world conditions affect the performance of automated systems?", "379808b9-e51a-42f1-b658-7f085dd496a5": "What are the best practices for post-deployment modification of automated systems?", "f9ec3773-dc0b-474f-8e19-10f71c543467": "How can unexpected conditions be managed in the deployment of automated systems?", "caf8112e-e9f3-4acd-81fb-c3b49486c424": "What role does continuous evaluation play in maintaining the effectiveness of automated systems?", "581a5da7-213e-4a7d-9f8d-d4133dd57842": "How does harmful bias in GAI models affect different racial and gender groups?", "9d4abb67-4cfb-4342-9757-6880500e5378": "What are the potential consequences of GAI systems performing poorly for non-English languages?", "6c77fa09-63b8-42b1-ab8c-8a0878fe99b4": "How can GAI models perpetuate or exacerbate existing societal biases?", "a46e5b14-4ca5-47eb-b330-0d94b8f9f326": "Why might GAI systems be inappropriately trusted to perform equally across all subgroups?", "7fabb17d-bead-4be0-a042-a82244952276": "What are representational harms in the context of GAI models?", "b6bddb5b-9792-4d2b-8ddc-6bd3455e5a52": "How can disparities in GAI model performance lead to discriminatory decision-making?", "93e73653-3778-45ec-8b88-83ef0cee390f": "What are the risks of using GAI systems for lower-resource languages?", "0ce84b04-c919-4d13-a642-1c811d94eb54": "How can training data contribute to harmful bias in GAI models?", "8b538c35-c3ca-4876-93d6-3a9de1887248": "What steps can be taken to mitigate bias in GAI systems?", "07203f30-4f7f-4aca-8eca-7c309b4986f2": "How does the underperformance of GAI systems for certain subgroups compare to not using GAI systems at all?", "b3dc9b16-8ecb-4c62-ab68-b96bed0dfc8d": "What are the challenges faced by people without smartphones in accessing unemployment benefits?", "02ced4ba-2675-4cba-9819-e8e00d3cd558": "How does the digital divide impact the ability to claim unemployment benefits?", "98bb75ed-7a8c-4dce-86df-07d671be2a37": "What are the implications of the UIA lawsuit on the perception of unemployment in the state?", "da95bf2a-175f-49d9-b8a6-280808d008cc": "How does the state criminalize the unemployed according to the Detroit Metro-Times article?", "f6187e72-cd60-47bf-8a65-87c6ff917a35": "Why are doctors turning away patients with unbearable pain, as discussed in the Wired article?", "14dd793d-1318-45e0-bf65-ae5c770cde2a": "What role do algorithms play in the treatment of chronic pain and opioid addiction?", "d3a655b7-63e1-43d6-a477-2241c65e6d0a": "How does Amazon's use of bots for firing employees affect workers?", "777e4f21-26bb-4162-81e1-25f943df9293": "What are the consequences of being fired by a bot at Amazon?", "d4ec38ed-55a8-425e-af0d-8589faeb46ba": "How do machine managers at Amazon impact the worker experience?", "2a40a8e1-c0b7-4eac-a84a-5d63fbdb6c9a": "Where can I find the definitions of \u2018equity\u2019 and \u2018underserved communities\u2019?", "c54055bd-b859-4846-856f-3208fb311b0f": "What are the specific criteria for deactivating a GAI system according to MG-24-002?", "6f0ae6ac-c093-4863-8cf2-4ada7f9d42e7": "How should organizations escalate GAI system incidents to the risk management authority?", "b4852e25-ec60-482f-a671-b99fc5379d18": "What procedures should be established for the remediation of issues triggering incident response processes in GAI systems?", "0d20124d-250f-42b7-a215-c52597d6ff03": "How often should the criteria for deactivating GAI systems be reviewed according to MG-24-004?", "477cd099-57f2-473f-9e76-397ae38e415a": "What are the key tasks involved in AI Deployment, Governance and Oversight, Operation and Monitoring?", "dcf15838-d29f-4e4a-843a-165bf83a1bfe": "How can organizations ensure that AI risks and benefits from third-party resources are regularly monitored?", "9d4de1d9-a66b-4c82-935a-f7e97333ed5c": "What are the risk controls that should be applied and documented for third-party AI resources?", "070ee859-7379-4692-a8af-44336782308e": "What timelines should be provided to stakeholders for the remediation plan of GAI system issues?", "639643b4-60e7-474d-aee6-3ee349564306": "What is the role of the organizational risk management authority in managing GAI system incidents?", "acb51a42-9248-4132-bcd5-aa4101f783dd": "How should organizations document the risk controls applied to third-party AI resources?", "507836ec-d203-47f8-a680-09f83fa2a01f": "What is the Responsible AI Resource Center (AIRC)?", "0712927b-b14b-48bd-8873-1773a4748151": "What is the purpose of The Language of Trustworthy AI: An In-Depth Glossary of Terms?", "5a1bdd93-7277-481d-9646-c6981146f8d6": "How were public comments and consultations used in the creation of the document?", "4ce4e9f4-b9b5-4632-a08b-c5e41c2f4b8a": "What does risk refer to in the context of the AI RMF?", "d4e55ebe-4671-4b95-9b07-c9e5e246be26": "How can AI risks differ from traditional software risks?", "8b4d8ad8-933b-4290-9fb2-14ecdde49db9": "What are some examples of risks that are likely to materialize in a given context?", "04d3166e-c3f8-442a-a5a4-36726190ef49": "What are some examples of risks that are more speculative and uncertain?", "da1e7ff4-4699-42f3-b32e-97305e4b357a": "How can GAI exacerbate existing AI risks?", "81f01fdb-f899-4475-8c04-9e7fe197e698": "What are some unique risks created by GAI?", "88a166d0-e31c-43cf-8f58-baefce9d284f": "How can the magnitude or degree of the consequences of an event be measured in AI risk assessment?", "9ed882bf-45cd-4218-a475-bb18625ade4c": "What are the main ways hiring algorithms can introduce bias according to Miranda Bogen's article in the Harvard Business Review?", "56adf753-40ac-4593-bb8a-832feec13d31": "How is the TSA making flying easier for transgender people, as discussed by Arli Christian in the ACLU article?", "19cd8cd2-a1f6-479e-8713-95e8683eee7f": "What specific measures has the TSA implemented to accommodate transgender, non-binary, and gender nonconforming passengers?", "3e165023-0718-43ed-99b9-10cbcee9f4e1": "What are the concerns raised by the National Disabled Law Students Association regarding the online administration of bar exams?", "dfad7b64-3277-4f06-af37-d85af7857c4e": "How does automated test proctoring software discriminate against disabled students, according to Lydia X Z Brown?", "a41d0161-4967-4046-9c48-e87cea7d5bf8": "What are some examples of bias in hiring algorithms mentioned in the Harvard Business Review article by Miranda Bogen?", "bf872985-a9f6-4c68-ac13-c3442a951f5a": "Can you provide a summary of the four ways the TSA is improving the flying experience for transgender individuals?", "f53a3ba9-3917-4f06-a722-e4d54d8119c2": "What resources are available on the TSA website for transgender passengers?", "6fa39a37-eeaf-4520-b9bd-162fce45ab8c": "What are the key points in the NDLSA's report on online bar exam administration concerns?", "f4bf60dd-02c3-4248-bed1-4831c9772821": "How does the Center for Democracy and Technology describe the impact of automated test proctoring on disabled students?", "63eb9983-de78-4519-b2c8-f22b454ee053": "What are the legal and regulatory requirements for AI development and deployment?", "267043c7-c798-4c11-b2d1-9c33ddd4fb4e": "How can AI development be aligned with data privacy laws?", "d30f4bc2-2f5f-41a6-b0ca-38e7382ef444": "What are the implications of copyright and intellectual property laws on AI?", "fc798bdd-e26e-4ed1-8142-e17cd09368df": "How can organizations manage and document legal requirements for AI?", "1fc3dca0-6426-432e-bf6d-db436aa4dfd0": "What are the risks associated with data privacy in AI systems?", "3eb7529c-01dd-46f1-ba24-c428f4b98cbf": "How can harmful bias in AI be mitigated according to regulatory requirements?", "10147097-3b5f-4390-8fdb-61a55029b730": "What is the role of governance and oversight in AI development?", "b73e2ec4-11cf-4e83-9ef8-b0612a7df8b7": "Who are considered AI Actors according to the OECD?", "3f2795a8-623f-4759-be58-bac1f301777a": "What tasks are involved in the governance and oversight of AI systems?", "1dacc26f-f637-4f67-b91a-d11102d1bb85": "How can organizations ensure compliance with intellectual property laws in AI development?", "b5373d9d-ce4e-40bc-bcbc-4f675f59d1a8": "What are the disproportionate risks that AI poses to women according to the Brookings article by D West?", "029ed6d6-5b8c-45f2-8690-d47dc48f8232": "How do large language models (LLMs) perform in citing relevant medical references as discussed by Wu et al?", "3dd37534-24da-494e-b97b-5e3890f7f13e": "What evaluation framework did Wu et al propose for assessing the citation accuracy of LLMs in medical contexts?", "a89ead0f-a8bc-4db8-adae-b72aa1d2c3c5": "What evidence of racial bias was found in OpenAI\u2019s GPT when used as a recruitment tool, according to Yin et al?", "3babea8f-5928-47f3-962e-9b283207ab3e": "How can jailbreak prompts affect the behavior of large language models, based on the research by Yu et al?", "0af422b3-b85e-4b51-94fa-71fc9ab1d8fa": "What are some examples of digitally-disadvantaged languages mentioned in the Policy Review by Zaugg et al?", "1c9b32d4-4ae7-4a96-a889-035f67276bb2": "What are the potential implications of AI-induced racial bias in hiring practices as highlighted by Yin et al?", "9b7725c9-7b92-4fdd-a10f-8ec1530b228d": "How does the article by D West suggest mitigating the risks AI poses to women?", "649f7414-376a-4e58-8d1e-23d5e7ed9d6a": "What methods did Yu et al use to explore and understand jailbreak prompts in large language models?", "e78a0693-7005-4bea-89f2-c3ea112bce08": "What policy recommendations are made by Zaugg et al to support digitally-disadvantaged languages?", "9db21a04-c438-4476-ac0a-82338eae49b2": "What are participatory engagement methods and how are they used in AI development?", "97081408-da30-4645-9afe-f3307ed6cdc7": "How can field testing improve the usability of AI-generated information?", "289db7b2-4d79-467d-a5d9-9d5b231a0264": "What is AI red-teaming and why is it important for identifying system vulnerabilities?", "2d57ed17-a6f7-46f9-81ca-95cb462059cb": "How does feedback from civil society groups influence AI system design and implementation?", "4922c3b7-23a1-4ecb-9156-b04943f93fab": "What are the benefits of using focus groups in participatory engagement methods?", "f2e88045-e05b-41dc-8cec-46bbf532cb91": "How do structured, randomized experiments contribute to understanding AI interactions?", "8e4237f7-4dab-4b3e-872c-636b750a8551": "In what ways can AI red-teaming help in preventing discriminatory outputs?", "6a0cc42b-ef86-4bfe-8697-1dafa8d40ca3": "How can surveys be effectively used to gather feedback from affected communities regarding AI systems?", "a27db742-1147-4e73-b8f8-fb28f863182a": "What role does public feedback play in the maintenance or decommissioning of AI systems?", "1da6cbec-681b-41b5-8fdd-7e46b4b39a69": "How can insights from field testing be used to improve data quality and preprocessing in AI systems?", "dc7b408c-d2a4-440a-8bd3-382b37949adc": "How can I find out if my personal information is stored in a federal system of records?", "12c4f43b-f79c-40bf-89c5-5de9f0101a4f": "What steps do I need to take to contest the contents of a federal record about me?", "928f7047-c52c-49a0-aad2-2d36efcef6dc": "Are there any exemptions under the Privacy Act that would prevent me from accessing my records?", "8ee048c2-ac9d-46c0-8558-0d5e5e8a8743": "What legal actions can I take if a federal agency does not comply with the Privacy Act?", "7785c38a-a968-421a-b0d8-c15fe4003fa3": "How can I request a federal agency to amend or correct my personal information in their records?", "5af5e28f-c346-483e-8b52-dabdb08f98cf": "What kind of monetary damages can I seek if an inaccurate federal record affects my opportunities or benefits?", "6eec9174-86c8-46fc-84dc-0a9dadffd18c": "What are the procedures for accessing my individual information stored in a federal system of records?", "d9e3443e-a2be-4b7f-9ead-f1b68a8decec": "How does the Privacy Act protect my personal information stored by federal agencies?", "ed5ff628-1277-45a4-a100-1c1d064b76a6": "What qualifies as an adverse determination under the Privacy Act?", "29499a6f-b07f-4cba-b4bf-90e40d038c36": "Can I seek legal relief if a federal agency maintains an incomplete or untimely record about me?", "dd201fcb-d5b4-47aa-9fa7-584fdc9a91d0": "How do GAI systems impact the preservation of endangered languages?", "f95cd300-7368-4898-875c-8a16e2699308": "What challenges do GAI systems present to model adoption and inclusion?", "08be409c-62b9-4046-a4e5-41f7aef94b1e": "How can the use of GAI systems lead to undesired homogenization in outputs?", "165031b8-01ee-4b7e-a254-95b7729cbac2": "In what ways might GAI systems make it more difficult to use endangered languages in everyday processes?", "77d2f46d-9e77-41fd-8367-3d123120a90e": "What are the potential biases associated with GAI systems?", "41496cd8-8df9-4ece-9b75-799e48764e3d": "How can repetitive aesthetic styles in GAI outputs affect cultural diversity?", "8e9ba476-d233-4ed8-8772-0fbd52872bb3": "What steps can be taken to ensure GAI systems do not contribute to the loss of endangered languages?", "10c840b0-30ab-46e2-9109-d9b669bd57ac": "How does bias in GAI systems reinforce the problem of homogenization?", "6e44c18a-5777-4822-9e66-8bc1a5a2f194": "What are the implications of overly uniform outputs produced by GAI systems?", "85eab450-b260-459c-acfc-4403ea487fee": "How can we address the challenges of inclusion and accessibility in GAI systems?", "93694198-599c-4796-8cf1-d528b4f3fb10": "How can organizations ensure that automated systems are kept up-to-date?", "2c321c09-c163-4b0c-81f8-f536580d0caa": "What are the best practices for notifying people about significant changes in automated systems?", "65b87922-c4d3-45bc-b744-c2b89f7b4788": "Why is it important to understand how an outcome was determined by an automated system?", "cffd7b3d-0081-4a33-80e6-28b3fbf26823": "How can automated systems provide explanations that are technically valid and meaningful?", "20e03c70-f24d-4dba-9cb5-042b4cba722d": "What factors should be considered when calibrating explanations based on the level of risk?", "760cfb81-f3c9-4194-a7d3-602f41fab3e8": "How can summary information about automated systems be effectively communicated in plain language?", "f709685b-47a6-44a7-8f9e-3aa0df73870e": "What methods can be used to assess the clarity and quality of notices and explanations from automated systems?", "16141df8-9867-4de3-bcb0-a2b9fac32680": "Why should assessments of automated systems' notices and explanations be made public?", "a44f2530-9479-450c-9cfd-272c1f2071d6": "How can operators and others who need to understand automated systems be supported?", "38270bdb-0dd7-458c-b93a-daebec80be45": "What are the challenges in ensuring that automated systems provide useful explanations to users?", "c699565e-04ef-419e-94cb-1b796a737007": "What is the AI Bill of Rights and how does it relate to Executive Order 13985?", "547dfa62-9140-4639-82df-a9142ce5286b": "How do the Fair Information Practice Principles (FIPPs) influence data privacy laws globally?", "9af0586d-74db-446b-8e26-38be4dd9afc6": "What are the core principles of the Fair Information Practice Principles (FIPPs)?", "eccf5725-f424-4268-b727-1f1a40978434": "How does the Blueprint for an AI Bill of Rights incorporate elements of the FIPPs?", "abeee4d7-e9fd-4575-a169-f5d27f7e36bb": "What is the significance of the 1973 report by the advisory committee to the US Department of Health, Education, and Welfare?", "66addcfc-c316-415e-b410-381db4715a69": "How does the AI Bill of Rights support racial equity and underserved communities?", "a172a3a2-d810-4b13-909a-903ececb2cd1": "What are the key elements of the Fair Information Practice Principles (FIPPs) relevant to automated systems?", "a5209e31-d0db-40ff-9bb4-1750d45a7717": "How do the principles of the AI Bill of Rights align with civil rights and civil liberties?", "8ed364a4-97b6-4794-a155-78239f56aa6b": "What role does the Federal Government play in advancing racial equity through the AI Bill of Rights?", "cf81588a-da58-41b7-8f19-e0ed9fa5cd06": "How are the Fair Information Practice Principles (FIPPs) applied in different domains like privacy and civil liberties?", "20cc838f-302b-4a97-b9c0-048b821e21d7": "What is an algorithmic impact assessment?", "11441d7e-f3c5-47eb-9661-dc9501b61db8": "Who is responsible for performing an algorithmic impact assessment?", "96391b37-e002-4444-bc88-af07e80d1be7": "How are corrective actions taken in response to an algorithmic impact assessment?", "f173dfc5-537c-4934-943f-6a4a8209556a": "What should be included in an algorithmic impact assessment?", "adb72817-95df-4fe7-be09-43be351339a0": "Why is it important to make algorithmic impact assessments public?", "98945bdc-d7f5-48af-828b-72fdff9b0735": "How can algorithmic impact assessments help in addressing algorithmic discrimination?", "ce1d92ca-dd4d-4c46-a861-5f738828da2a": "What is the role of disparity testing in an algorithmic impact assessment?", "6739f71c-fbe1-4126-86a2-75deefb42217": "How should the results of an algorithmic impact assessment be reported?", "b3952c44-c80c-4aa5-b364-771c28f8d963": "What are design stage equity assessments in the context of algorithmic impact assessments?", "ab8785d8-fdea-42dd-9207-1331f4d206dd": "Why is it important for algorithmic impact assessments to be machine-readable and in plain language?", "ad91ce86-6dec-4abb-97c6-044f2e6af46d": "What are some examples of issues caused by the lack of human alternatives in automated systems?", "2a47278f-8456-435f-a92f-ad1569f822a9": "How did the unemployment benefits system in Colorado fail applicants without smartphones?", "4baeb7cc-0f63-4432-a61e-2cca08742d2c": "What problems can arise from not having a human review option in fraud detection systems?", "7229c62f-af34-49e6-a1b2-244381296eaa": "Why is it important to have human alternatives in systems that distribute unemployment insurance?", "a551e275-7aaf-4936-8b4a-335333fd8c65": "How can automated systems incorrectly flag entries as fraudulent?", "3185d577-2897-470b-b66d-2b878c1635f3": "What are the consequences of not providing a human fallback in automated systems?", "6b956523-9518-4fff-b4c4-3cb46fe34560": "How did the hospital software error affect a patient's access to pain medication?", "f829362b-3144-4e70-9964-54ff3cf5f3c8": "What are the risks of relying solely on automated systems for critical services?", "04775f45-fc8c-47ae-a74f-788e790dbb7f": "How can the lack of human intervention in automated systems impact people's lives?", "ff3d43c5-1076-4f90-9595-d7507024df5f": "What measures can be taken to ensure that automated systems have adequate human alternatives?", "7a87b77e-ba2b-4a75-85d3-eff3e7a4f1b3": "What are the potential harms of inaccurate inferences made by AI models?", "2af440c3-e3bc-417b-9229-b989c418f726": "How can wrong inferences of PII lead to secondary harmful impacts?", "f9ad7469-3e3c-4909-8679-251103726804": "What is the impact of predictive inferences based on PII or protected attributes?", "94623f99-aa39-458d-bf0c-452bdeae335c": "How can AI models contribute to representational or allocative harms?", "a18f484d-0c10-474e-b56a-fc1b92977406": "What are some examples of dignitary harm caused by information exposure?", "caabe18b-3881-404c-bba9-113c918db0f3": "How can extortion result from the exposure of sensitive information?", "6cb41015-478f-4102-8f71-ca101130bef4": "What are the risks associated with confabulations in AI inferences?", "b45acd2b-d605-4e83-8246-a99cd8d2ffff": "How can adverse decisions arise from predictive inferences made by AI models?", "4f89b591-6f2b-4813-bd57-b6ee9e7cd684": "What is the relationship between harmful bias and homogenization in AI models?", "259e3515-ab8c-4f3d-8cbe-178ee51a7ff6": "How can inappropriate inferences of PII disadvantage individuals or groups?", "f7f7c66a-12a1-4a76-88e9-6719de9f8272": "What are the main problems that the principle seeks to address regarding automated systems?", "0404c1f3-7897-4222-838e-d468ed48b862": "How do automated systems impact employment opportunities?", "da09c33c-a2d1-4eba-b8bb-9cadf83a607a": "In what ways do automated systems shape experiences in the courtroom?", "84e11e40-3503-4afa-9c0b-3bd17751dc5f": "Why is the impact of automated systems often not visible to the public?", "fb6baf2b-0c39-43c6-8a23-5a6286b57d3c": "How can an applicant determine if a hiring algorithm affected their job application?", "5ae8a14f-7af5-4fe1-ad8d-daa3b64b7412": "What are the potential consequences of a judge using an automated system to make bail decisions?", "b86dc33e-c44f-40f8-9201-6e77c52b2d3e": "How can individuals correct errors made by automated systems?", "44365a55-f06d-44c0-86f8-5e766884f298": "What steps can people take to contest decisions made by automated systems?", "cb8c33b7-5edf-4f02-827c-e51d99fe8c20": "Why is it important for people to have knowledge about the impact of automated systems on their lives?", "57d78d01-d80c-4a0d-bffe-90e8c5435146": "What are some illustrative examples of the issues caused by automated systems in various sectors?", "a7190971-83cc-4e5b-ad51-d097c2d2416f": "What is automation bias and how does it affect human interaction with GAI systems?", "31534a0f-52cf-452d-9a29-f8ac561198f7": "How can human experts overcome their aversion to GAI systems?", "2c413bc7-6f0c-41fa-ac50-d2f146083ecb": "What are the potential psychological impacts of emotional entanglement with GAI systems?", "f493ae8c-e533-406a-8fc5-5c6bdaad070b": "How can over-reliance on GAI systems be prevented?", "589fdab2-8712-4a25-9c59-8f26e281a6dc": "What are the risks associated with confabulation in GAI systems?", "3f801419-15e7-46cf-8d7a-d2709b6b5792": "How does automation bias contribute to the homogenization of information?", "2acf1a52-6ed2-4c87-9d62-518df86733d3": "What strategies can be implemented to mitigate the risks of bias in GAI systems?", "68b6149d-8a90-4b40-b32e-6e844ff70543": "How can humans balance their expertise with the use of GAI systems?", "bfe3017a-f8b1-49b4-8022-1d6c6b18b595": "What are the benefits of using GAI systems despite the potential risks?", "66d721dd-e056-41a0-9177-35ec1a02f2a3": "How can emotional entanglement with GAI systems be managed to avoid negative impacts?", "7994cad7-21e2-4e09-aca5-f56ef1ae8a5a": "What are the ethical considerations for developing automated systems that could potentially violate safety?", "47e3ea93-fd88-4971-8203-e36fc3545d64": "How can unintended safety violations in automated systems be identified and mitigated?", "de90f1a6-2df7-4240-944b-87dad8ddb4c3": "What steps should be taken if an automated system is found to have safety violations after its launch?", "b0f19328-390f-4526-9bd5-58e3217d2939": "Are there any guidelines for rolling back or modifying automated systems with safety risks?", "3a98c1d4-e98a-4339-a8d8-666a62f395d1": "How can developers ensure that automated systems do not have unintended harmful consequences?", "3e20c7ee-70ab-4ad1-b14f-32dfe44d0c85": "What are the best practices for ongoing risk mitigation in automated systems?", "a13b0ccd-e3a9-4ab3-afc9-d9c863fbeb66": "How should companies handle automated systems that have been found to violate safety standards?", "d904e7ea-ad3b-4610-ab7e-0e02f95f7b78": "What are the potential risks of using automated systems without proper safety checks?", "2fbedd50-8ea8-4da8-a425-802da3096391": "How can the safety of automated systems be continuously monitored after deployment?", "1ec42e53-7159-49a1-9293-810d7a184a7f": "What measures can be taken to prevent the development of automated systems intended to violate safety?", "a0842cf3-00a4-4746-87d9-6ac45ada3ebd": "What are built-in protections for data privacy?", "b76e72b6-358d-4943-98de-cad3c4c392ea": "How can I ensure that my data is only used in ways I approve?", "4f6747b8-9ad2-45ed-8979-f605d5765654": "What are some examples of abusive data practices?", "8effaebc-2db1-4d37-852d-5b080b55344a": "How do design choices impact data privacy?", "ba664a46-2556-466b-94a9-e6c9881f6bc5": "What does \"privacy by design\" mean in the context of data collection?", "412a6863-0745-4d66-8b48-137fc5c03a6e": "How can I verify that a system respects my data privacy decisions?", "a96cda8b-8d4a-4085-b297-fbf4fc69d244": "What are alternative privacy safeguards if consent is not possible?", "c4c103d2-5129-4be7-bf42-4971da92e49a": "Why is it important for data collection to conform to reasonable expectations?", "4a2850be-c31b-461e-9165-f82e606b10ff": "How can I identify if a system is using privacy-invasive defaults?", "398d3a34-f7cf-44c4-81ea-d314e9f92196": "What should I do if I feel my data privacy has been violated?", "9b1e7e8a-3e78-4c98-a0df-24b37ca791cf": "What techniques can be used to mitigate biases in GAI content and data?", "0a69cf57-ac9d-423f-896d-b9e4e02f762a": "How does re-sampling help in reducing representational biases in generated content?", "164c840b-8cfa-4c20-a441-123e95824ffb": "What is adversarial training, and how does it mitigate biases in GAI content?", "e20bd84c-d838-4049-a2df-fba4b9d06ea1": "Why is it important to evaluate GAI content for representational biases?", "387e47d4-c086-4a07-aee6-28a9fa60cad7": "What are the potential risks of not addressing harmful biases in GAI output?", "3700f1e4-cf35-4c27-b2c3-9101bf5d8dfd": "How can re-ranking be employed to address biases in generated content?", "8c76e76d-83c9-4a57-a7df-b1c0e035ef8b": "What steps should be taken to analyze GAI output for harmful content?", "7d2386c3-a371-45d9-965f-2c7a9c11567e": "What types of harmful content should be checked for in GAI output?", "badd4da0-e5f0-426d-ab2a-1019b483cc97": "How can due diligence help in identifying potential misinformation in GAI content?", "4da36b86-9edb-4192-abf8-385aa94c36e8": "What are CBRN-related or NCII content, and why is it important to monitor them in GAI output?", "f377e766-01de-4a90-9986-ecb3ee8d61a9": "What are the best practices for designing consent requests that are easy to understand?", "4490f651-668a-4e60-9931-3c6e393d09b8": "How can user experience research improve the readability of consent requests?", "68e419bb-fa35-4890-98c7-4c8a11d9c78e": "What methods can be used to ensure consent requests are accessible to users with disabilities?", "838be5c8-7e39-41a0-8cc2-5d54526523b5": "Why is it important to avoid \"dark patterns\" in user experience design for consent requests?", "8540243f-924a-413c-bdad-29d5f1240c58": "How can consent requests be tailored to different languages and reading levels?", "c05097c0-b8be-4d9f-94ad-f28a74c0bbf6": "What are \"dark patterns\" in user experience design, and why should they be avoided?", "ed30f33c-f72f-4ca9-a986-e78ea5a84f34": "How can we ensure that users understand the contexts, time span, and entities involved in their data consent?", "078872ce-c620-44bc-b9d9-715ca524c82e": "What are some effective ways to test the comprehension of consent requests among users?", "c9c8531a-c33d-47fb-9c20-0a74872c9cd3": "How can user experience design improve the transparency of data and metadata consent requests?", "bd0d1036-d121-46a7-a63d-633b1fe52539": "What role does user experience research play in making consent requests more accessible and understandable?", "64ded336-d26a-4bba-9264-29a782f46b17": "What are the potential risks associated with the misuse of AI systems by humans?", "a1524a0a-e5c7-4b05-9291-a9f012c2f8a7": "How can the distribution of harmful deepfake images impact physical safety?", "69cb94c7-5d81-4222-9d53-052f0edd2720": "What are the long-term effects of disinformation on societal trust in public institutions?", "10493f76-3a15-41b8-a590-28e28ca5d22f": "How do the characteristics of a GAI model influence the presence of risks?", "0aa175b1-cf6c-4db9-95ae-790c02ed0420": "What factors should organizations consider when measuring GAI risks?", "e8a86a82-58ef-4852-8a9f-46998fe000e6": "How can the architecture of a GAI system affect its risk profile?", "e0c9a867-2ae3-4d5e-b74c-b2145e48b693": "What role do training mechanisms and libraries play in the safety of GAI systems?", "f497ff9f-1f6b-49ff-92b7-b97bbf4e0d33": "How does the availability of model weights impact the security of GAI systems?", "82c14235-06d7-4a04-8d16-eefd798350a9": "What are the implications of using different data types for training GAI models?", "6e3592cd-fc1e-495d-9976-3cea02adf35a": "How can organizations tailor their risk measurement strategies for GAI systems?", "bb8fd71c-8305-452b-bcbf-823fa999f419": "What are the Department of Defense (DOD) AI Ethical Principles?", "84ddfcae-5a84-4ca2-b23f-9f2993a78864": "How does the Intelligence Community (IC) AI Ethics Principles and Framework guide AI use in national security?", "6afc1f62-ea09-46aa-a9b2-a2231efa41db": "What is the Blueprint for an AI Bill of Rights?", "50dbd6b6-2f67-425a-8549-e938e1e443c4": "How can the Blueprint for an AI Bill of Rights inform national security and defense activities?", "33c03e38-7064-41dd-8c15-2570eff220f3": "What are the special requirements for AI in national security and defense activities?", "c062971a-0cf8-4ef1-93b3-5fca436d15d4": "How does the federal government ensure the use of trustworthy AI in law enforcement?", "3664af6f-9a6b-4750-9157-5b8c10c049bf": "What safeguards are in place for AI systems handling classified information?", "57f12af2-eace-4916-bf6c-caa521375c02": "How does the Responsible AI Implementation Pathway impact defense activities?", "3421dbb5-bc3b-4a71-9aba-9794e6c02c61": "What role does the Blueprint for an AI Bill of Rights play in the implementation of AI policies?", "1ea5b81c-aa5d-4fad-851a-f32df353f699": "How are existing policies governing automated systems applied to national security activities?", "bb9c137b-e957-4d1b-b681-1150450070b8": "What are the potential harms of using AI to generate non-consensual intimate imagery (NCII)?", "b0543347-b171-470d-ad58-452023d32540": "How can AI-generated content be managed to prevent the creation of child sexual abuse material (CSAM)?", "420598ea-3a86-4086-a5a1-8ca8b6a218a1": "What are the characteristics of trustworthy AI in terms of accountability and transparency?", "b2067f9b-b092-45f0-9fef-ed8c776f2ec2": "How does AI-generated obscene content impact privacy and psychological well-being?", "9d318383-e177-487e-b4cc-8d2e8e8fcc13": "What measures can be taken to ensure AI-generated content is fair and free from harmful bias?", "78e29c0f-c807-43a5-87c1-f4c5adab2595": "How can the spread of AI-generated deepfakes be controlled to protect individuals' likeness and voice?", "1d9240a9-0d09-4243-869a-45ed6bd3e9db": "What are the legal implications of generating explicit or obscene AI content?", "0dd67cb2-1fd7-4f2a-8672-8384a5b08a73": "How can AI be used responsibly to avoid creating degrading or abusive content?", "558abc03-0ada-41ab-b040-b6a193e5b238": "What are the downstream negative consequences of AI-generated CSAM on law enforcement efforts?", "a03bd0ad-5366-46e0-b712-e2748c71adf5": "How can privacy be enhanced in the development and deployment of AI technologies?", "7d190ccc-08eb-4795-975a-b70b5b5615ec": "How does surveillance impact student expression in educational settings?", "36548052-71c8-4515-ac0b-ad22a66ff05f": "What are the potential negative effects of surveillance on tenants?", "9ac8b717-1ae9-40dd-a3f6-5d158e342089": "In what ways does surveillance blur the boundary between work and personal life for workers?", "6b5a119e-29e8-4818-8d39-1e9518975f3e": "How can surveillance exert damaging control over workers' lives?", "5a0058ad-c9e9-4177-8811-35405c083ed1": "Why is the misapplication of data from criminal justice settings problematic for housing access?", "a1134775-18f2-4e09-af57-88092617afdc": "How does surveillance technology shift the burden of oversight from employers to workers?", "1dc6b0e5-60ff-4939-b09e-5d09b8373385": "What are the concerns regarding the use of surveillance in schools?", "b0b4a730-7836-419d-9d06-8a4e167aa24e": "How does surveillance technology affect equality of opportunity for tenants?", "3e475a7b-f183-48f1-9831-ecd371bfa399": "What are the chilling effects of surveillance on student behavior?", "91cdf914-e675-47bf-93d5-c257b52a9a51": "How should the assessment of surveillance technologies be conducted to ensure fairness?", "2e1a0ef5-cf28-488d-8c12-76d6037e5541": "What is the process for a human review of criminal investigative matters?", "ea3e3a56-e8b5-4d12-9369-363db39673a1": "What are the statutory requirements for judicial review in criminal cases?", "dfaba32e-19b0-4171-9fff-cc08fe20d751": "How do civil rights laws protect against discrimination?", "399b8802-887c-4184-b0f2-1a2bb5d07543": "What role does judicial review play in protecting civil rights?", "9dd9b476-9945-476b-9194-d8c83c4f9121": "What are the key elements of civil rights laws in the United States?", "3ed61f5b-fb54-4c4c-b6e5-0883b23b939d": "How does the judicial system ensure compliance with civil rights laws?", "dc4b76d1-b12c-4871-9175-8b56b5ad43ec": "What are the common types of discrimination covered under civil rights laws?", "a2334897-6fd9-497a-98b9-6686d453c2ad": "How can individuals seek judicial review if they believe their civil rights have been violated?", "0c5ef9a5-ec71-4016-8ae6-d82d2f95a1c5": "What is the importance of human review in criminal investigations?", "7a2b2ba9-a488-4c90-b169-70168efcc780": "How do statutory requirements influence the judicial review process?", "3fd5f3df-b1eb-4d8b-ab8d-07dddcc7fb48": "What are some effective strategies for testing AI in crisis situations?", "3be1b1c1-3098-49a5-b204-34bfa97d73b3": "How can we ensure information integrity in AI systems?", "7a009e96-556c-4842-92a2-a7b32f8146b7": "What methods can be used to identify and mitigate harmful bias in AI?", "25fac591-48bb-4ed2-bded-de52900f6c77": "How do we address the issue of homogenization in AI content?", "ec3564d2-3ce8-4426-b4d7-b66f34d730e8": "What are the best practices for handling dangerous, violent, or hateful content in AI?", "8e35ecf3-062b-4a03-810e-8eae31b35e67": "What roles do domain experts play in AI design and development?", "a0f073dc-3521-48db-9264-58f3bd671c3a": "How can end-users be effectively involved in the AI development process?", "29b2f6cc-206a-4bd6-98c7-e2c851e3a8b2": "What human factors should be considered during AI operation and monitoring?", "a86dc0bf-80f8-4a7b-969d-cf58b6502c4d": "How can AI systems be configured to handle ethically sensitive contexts?", "481d542a-46dc-4b5e-8bbc-a6e1094820a1": "What are the key tasks for AI actors in ensuring safe and ethical AI deployment?", "4a6050b2-f6b6-4ac9-939c-2e1188e92e89": "What are the key principles for designing ethical automated systems?", "72d17156-9c97-4463-a90b-1619a6608c3d": "How can automated systems impact public rights and opportunities?", "85b5625a-d5ea-45b5-b7b3-e66d4556da73": "What steps should be taken to ensure automated systems are fair and unbiased?", "8b7eeeff-ae14-458b-978e-5c1fc9573219": "How do you integrate ethical principles into the technological design process?", "7b875bc5-cd9a-4069-bcfa-6de556eb6548": "What are some examples of automated systems affecting access to critical needs?", "ee228074-989d-4a9b-bb46-df0a26dfda27": "How can designers ensure that automated systems do not infringe on public rights?", "89b66459-d623-4a30-a195-8da50e72a096": "What role do ethical guidelines play in the development of automated technologies?", "9a2bb4f3-2d1a-4689-9dfd-70c1a4fcfade": "How can we measure the impact of automated systems on public opportunities?", "ea968134-16f4-4d5c-810d-6049e27309d5": "What are the challenges in implementing ethical principles in technology design?", "a62f4790-c903-4ecf-bd87-b92068d5138a": "How can automated systems be designed to enhance public access to critical needs?", "9e50e870-f706-492e-b58e-93871354422c": "What are aggression detectors and how are they being used in schools?", "56433327-6403-42b5-bf0e-0d4ac4b47722": "How effective are aggression detectors in monitoring student behavior?", "bfb3f439-fb7d-4a61-b36f-ce7e06ea95fa": "What are the privacy concerns associated with using aggression detectors in schools?", "2e31e7e4-1966-455f-9c93-2f0416756f97": "How did cheating-detection companies profit during the pandemic?", "552e3fce-d322-41d0-a8d1-3af4d9b6d239": "What are the main arguments students have against cheating-detection companies?", "9f9d57e8-6574-422f-a98d-9523410c912d": "How does virtual testing disadvantage disabled students?", "6f686792-3271-44f2-962a-d0c914f0a618": "What are some examples of ableism and disability discrimination in new surveillance technologies?", "608ada78-40e7-49ac-a75a-e38f46e3d296": "How are new surveillance technologies impacting education for disabled students?", "44ee99fa-4fa1-4199-a49e-31e3f890d5de": "What measures can be taken to ensure virtual testing is fair for disabled students?", "b9dd8dbc-8dcc-4aa5-ae17-13bc77130913": "What are the ethical implications of using invasive surveillance technology in schools?", "f49391ff-b450-458f-8e1d-f4b2b768a48b": "What role did the American Civil Liberties Union (ACLU) play in the development of the Blueprint for an AI Bill of Rights?", "3c010abe-44a4-4a61-9b06-c9c60c98d68b": "How did the Aspen Commission on Information Disorder contribute to the discussions on AI oversight possibilities?", "815c1373-abe0-4296-ac84-38501b0c77cd": "What insights did the Australian Human Rights Commission provide regarding the potential harms of AI technologies?", "9eec3afd-f85a-4f17-a4d9-2c0989096d84": "In what ways did the Brookings Institute influence the positive use cases of AI discussed in the OSTP meetings?", "b795e850-79da-4183-9560-e011ed68a0a6": "How did the Center for Democracy and Technology participate in the development of the AI Bill of Rights?", "0ed96dda-2f6e-4335-86b8-3a5d06011bbd": "What specific ideas did Deepmind offer during the OSTP meetings on AI technologies?", "2a6ac783-bbc1-4474-9864-2029c75aea04": "How did the Center on Privacy and Technology at Georgetown Law address privacy concerns related to AI?", "3e13f6fd-312b-432b-9012-933f8b0a169a": "What contributions did the Data and Society Research Institute make to the discussions on AI oversight?", "7b11f5c5-827e-4e70-85f7-d83417de66c7": "How did the Electronic Privacy Information Center (EPIC) engage in the conversations about AI harms and benefits?", "fc17d202-010f-4306-916d-724cc681a109": "What was the focus of the EdSAFE AI Alliance's participation in the OSTP meetings?", "690220f2-52f5-463a-a2bc-b46c7e1aa79c": "What are the key privacy risks to consider during the development life cycle of an automated system?", "e2b24491-dd1a-45d0-a381-86ca5ec207df": "How can privacy risks from reidentification be mitigated in machine learning models?", "56f6feff-327b-4075-ac32-9d237ff6468f": "What are some potential harms to non-users of an automated system due to inferred data?", "425a7779-bf1b-4757-8833-e612c9b1ea05": "How should data collection be communicated to individuals whose data is being collected?", "e03f414b-1fb3-4067-99fc-cd4b122f509d": "What legal considerations should be taken into account when collecting data for training machine learning models?", "0d06fec9-3c74-49df-bc13-1e3ac9679a8d": "How can user experience research help in ensuring that data collection practices align with user expectations?", "b13e829f-73e8-4b70-9adf-6ea6b7b298e8": "What are some effective technical measures to minimize privacy risks in automated systems?", "ad29fd63-96d1-4788-8c4e-794dd0d3f326": "How can policy measures help in mitigating privacy risks associated with community surveillance?", "cefe533c-58c1-4179-b1f7-9134e20f1d19": "Why is it important to minimize data collection in the development of automated systems?", "61db5427-3644-439f-9a42-012708bb8917": "What steps can be taken to ensure that data collection for machine learning is consistent with user expectations and desires?", "7d332e8c-9943-4cdf-8bb4-1a3b498766ea": "What is the importance of tailoring explanations to specific audiences?", "b8324cd6-d521-45f3-8ba9-08ce631df758": "How can user experience research help in assessing the effectiveness of tailored explanations?", "05f09f2b-477a-4dbf-aaec-b5af1923c6b1": "Why might an explanation to a decision subject differ from one given to an advocate or domain expert?", "f022ec7a-8358-48d6-824c-0c26e658b35a": "What mechanisms can be used to build understanding and intuitions for a stated purpose?", "469f8389-8d8d-4b27-a69a-f16514c85c43": "How can explanations be effectively targeted to different audiences?", "5e39df0c-36a5-4afa-a2f9-6c5e341815f9": "What are some methods to assess the tailoring of explanations?", "8d3feca0-94b3-4b4a-a785-23a3b9b9a23c": "Why is it important to clearly state the audience for an explanation?", "3a3c3db2-4132-4478-a847-b36068c56e01": "How does the role of the recipient (eg, subject, advocate, expert) influence the type of explanation provided?", "15467e60-3777-472b-bd0b-47bf9f467a78": "What are the benefits of providing tailored explanations in decision-making processes?", "9a7b141e-920a-487a-a055-e1e9f4f1b2e6": "How can plain-language statements about causality differ from other types of explanations?", "ab11ab5b-818a-4880-8e91-ed269bd1af2c": "What does it mean when a document states that mentioning commercial partners is for information only?", "df1199d7-da58-4fef-8f9f-07735134932e": "Why do some documents include a disclaimer about not implying endorsement by a US Government agency?", "5958036c-3258-4303-8b1a-1622e068421a": "How should I interpret references to academic partners in a government document?", "5a32b7cd-ddd5-4513-abdf-563e5cd76816": "What is the significance of stating that materials or equipment are not necessarily the best available?", "fee5f65d-cb22-46ee-8464-222e373ff06b": "Why might a document specify that it does not intend to imply recommendation of any products?", "4a58ee08-f3de-45fd-907f-e153459dc81e": "What is the purpose of including a disclaimer about non-profit partners in official documents?", "d42d824c-1120-43e8-9235-7572b76c40a4": "How can I determine if a product mentioned in a government document is endorsed by the agency?", "4a312aa6-5cb4-4c06-8984-88e91ac45dc4": "What should I consider when a document mentions commercial entities but includes a disclaimer?", "d04730cf-a6d4-47bc-a699-0e3df94761de": "Why do government documents often include disclaimers about the quality of mentioned entities or materials?", "14564a8a-cdac-40c0-a780-78be519b65dd": "How does a disclaimer about not implying endorsement affect the credibility of the information provided?", "569527ac-7ac6-431b-875e-d434eb402c21": "What is AI red-teaming and how does it help in assessing privacy risks in AI systems?", "8389f03a-5744-4818-89fe-c077ff7f056c": "How can AI systems output training data samples and what are the associated risks?", "1ccccd94-2c43-4041-a4bc-08e24d811d35": "What are model extraction and membership inference risks in AI systems?", "93b0c907-004d-4f32-8b53-a052b75beeff": "How can AI systems reveal biometric or confidential information, and what measures can be taken to prevent this?", "a7f6c8ab-1b04-4dfa-9f57-4235258646b6": "What is the importance of engaging with end-users to understand their concerns about content provenance in AI systems?", "526267ce-38f4-4e58-b51d-6ae5ff0cf0dd": "How can AI systems track or reveal the location information of users or members of training datasets?", "b79cbec1-977d-4eb4-83ae-3d2d9ac42e08": "What are some techniques for designing provenance data-tracking in AI systems?", "9fa37ff2-7f29-4880-94c1-06fb3c6e92d5": "How does the Human-AI Configuration impact the privacy risks of AI systems?", "93413343-a62f-4859-8ee9-a611536c2b93": "What role do domain experts play in the operation and monitoring of AI systems to ensure privacy?", "48834fa9-9abe-4fae-a27c-636139f88c50": "How can intellectual property be protected in the deployment of AI systems?", "65958d5c-13fc-4718-8038-77c445872b42": "What are the key expectations for automated systems in terms of data privacy?", "ffe21033-700d-4f6d-aace-0e85d2620745": "Why are traditional terms of service considered inadequate for protecting privacy?", "8c46db62-ddab-49e2-b983-32432a14f583": "How can built-in privacy protections enhance data privacy in automated systems?", "4a376bd0-31f5-4955-8cfe-1e0e65d8f7e0": "What is meant by \"data minimization\" in the context of automated systems?", "7e329714-1d9f-4b04-926d-a8d79ddfdd9d": "How can transparency be ensured in the use and collection of personal data by automated systems?", "7d478e4e-d640-4a81-a971-a2d88d655d78": "What mechanisms should be in place to allow users to control access to their data?", "61151ee6-a28e-4a40-97ad-d1edeee36eb6": "How does \"privacy by design and by default\" differ from traditional privacy measures?", "b3576af0-4c36-49f5-a0e8-247d2ec6f65d": "What are the benefits of having clear mechanisms for users to manage their metadata?", "b624d6cb-faa1-4468-96a8-136321416574": "How should automated systems handle the sharing and storing of personal data to meet privacy expectations?", "8ecd7751-64ef-421e-a44c-3231886cdc17": "What role do technical standards and practices play in the development of privacy protections for automated systems?", "632ebbb0-13c7-4a44-b49f-bb277513d09c": "What is the importance of verifying deduplication in GAI training data samples?", "e06e7091-ac74-4abc-bc2b-da388c1149b9": "How does synthetic data impact the deduplication process in AI training?", "64518426-3d37-4769-9d8d-826b0577f97a": "What are the potential risks of not verifying deduplication in AI training data?", "dc8b4a4c-213f-4f2e-932c-a9a6f8cdadcc": "How can harmful bias and homogenization affect AI deployment?", "1bcb29e6-07a9-43ff-a455-16802ef28195": "What roles do domain experts play in AI impact assessment?", "19e66fef-7521-471f-82e2-f6884868acaa": "Why is it crucial to involve end-users in the operation and monitoring of AI systems?", "488f2216-2ff2-4ee4-9851-37299ad2c6c7": "What are the best practices for ensuring information integrity in AI systems?", "64edf8a2-3462-4be0-976d-1fddeea48012": "How can AI actors mitigate the risks of harmful bias in AI systems?", "b841ebb5-559d-4fe8-881f-c79ff7004ee3": "What is the role of TEVV in AI deployment and monitoring?", "5a0bd7f7-a52e-418f-9ada-e1fe84848613": "How does deduplication of training data samples contribute to the overall performance of AI models?", "64a056c2-8ae4-4280-874c-4a01461cc3e4": "What are some examples of state and local governments responding to AI-related problems with legislation?", "d69e8d1e-a600-475f-9486-61b3c75a2df4": "How have courts extended longstanding statutory protections to new and emerging technologies?", "2dbbbadc-b880-41d0-bb6c-e682dacb3df7": "What companies are known for incorporating additional protections in their automated systems?", "2827fc62-14af-4e2c-b663-ba391d6d25ba": "What innovative guardrails are researchers developing for the ethical use of AI?", "7fb6fed7-6aae-4ccc-b597-ab7e62e9b21f": "What are the key principles proposed by advocates and researchers for the ethical use of AI?", "44938ddc-9358-4c82-94f2-840413742d55": "What is the OECD\u2019s 2019 Recommendation on Artificial Intelligence?", "61da46c4-d66a-4522-bc97-af4fc2456222": "How has the United States adopted the OECD\u2019s principles for trustworthy AI?", "3f063cef-02a0-408b-98c7-56446e0fc13b": "What are the main principles outlined in Executive Order 13960 on AI?", "b4c11f6f-dc78-45df-9b02-6c9e97e63441": "How does Executive Order 13960 aim to promote the use of trustworthy AI in the federal government?", "a88f3780-0edd-49b8-89f3-212ba3ce8b3a": "What role do government organizations play in proposing principles for the ethical use of AI?", "ba2105e2-73d4-4990-9944-6d240e81707f": "How can automated systems be designed to prevent discrimination in loan approvals?", "17ebc163-d3e6-449c-980f-7ed26f839df6": "What are the basic safeguards against bias in automated systems?", "aab856e9-8ff2-46d9-893b-b6d07ab393c6": "How do automated systems impact underserved communities?", "cbad6951-7c8a-46df-bb70-c92a3f311a56": "What measures can be taken to ensure fairness in automated hiring processes?", "79e16b79-9d36-4087-8659-146694974dfc": "How can we ensure that automated systems treat all people fairly in the criminal justice system?", "1b05d0a8-49d2-47be-ab48-25a75c6e77f6": "What are the potential biases in using educational attainment in loan underwriting models?", "a980423a-f92e-407d-8954-c365cd973ffd": "How can we protect against abuse in automated medical treatment and payment systems?", "70182aa9-6598-41d8-94c5-c86d69052289": "What proactive protections can support underserved communities against automated system biases?", "c0cdbc54-4086-43e2-9a8b-0e45b61ecc4e": "How can nontraditional factors in automated systems lead to discrimination?", "732b7212-2022-429b-9d2d-ea2f1ff7d3f2": "What are the implications of higher loan prices for HBCU graduates in automated loan pricing models?", "9875b4af-3367-4c2e-9a31-faf2702adfa5": "What are the best practices for reviewing and documenting the accuracy and relevance of data used in different stages of the AI lifecycle?", "6fe66997-2216-429a-9ef4-7fe53eb306b3": "How can harmful bias and homogenization be mitigated in AI systems?", "36ec6e8f-35e5-4186-b6d7-f265dcdef11f": "What techniques are effective for fact-checking information generated by GAI systems?", "7d63e0e7-2c2e-4330-bdf2-4c7e114b7805": "How can the accuracy and veracity of information from multiple or unknown sources be verified in AI systems?", "bfa45ef0-bea8-47fc-b980-0b69ed2e1008": "What methods can be used to develop and implement testing techniques to identify synthetic media produced by GAI?", "6df2f1c4-ff8a-4107-a463-f56276352018": "How can one distinguish between GAI-produced content and human-generated content?", "51560d91-654c-4d78-bfbe-98c8ec9efde0": "What are the recommended plans for regular adversarial testing of GAI systems?", "9fde8cf0-b0b2-45f7-ad80-cadc55db3835": "How can vulnerabilities and potential misuse of GAI systems be identified through adversarial testing?", "b5e1a791-bb5b-42e1-b095-4eddeb8cbe89": "What are the key tasks for AI actors in AI development, domain expertise, and TEVV?", "3d828c80-9b73-4da4-a5ca-9009d879a58e": "What processes should be in place to ensure operator and practitioner proficiency with AI system performance and trustworthiness?", "8fd42408-bbd3-4239-b478-4be231789ff6": "What are the benefits of providing limited waivers of confidentiality for automated systems?", "41478b94-524f-4cb6-a05f-d28d7bf4f9c5": "How can designers protect intellectual property while allowing meaningful oversight of automated systems?", "607f4dd7-ca7e-4ee9-b4a8-f5f9760aeb1f": "What measures can be taken to ensure trade secrets are not unwarrantedly disclosed during legal discovery?", "dc9d51d9-e01e-4c24-8bc5-f2d310dd159f": "Why is meaningful access to source code and documentation important in sensitive domains?", "d012d297-b626-462a-868b-099e4ebe5d08": "How can automated systems be designed to provide built-in explanations for high-risk scenarios?", "19032e5d-0bf0-4167-86e2-54f143702e03": "What is the principle of Notice and Explanation in the context of automated systems?", "46b67447-c9d2-4689-b32c-eb292015ef9b": "How can fully-transparent models benefit the examination of automated systems?", "bbd073b0-20ee-4d44-ba79-37e22da7f19b": "What are the challenges of balancing confidentiality and transparency in automated systems?", "9fd04f5e-f2a7-4d3d-b5fb-3c5182e8df33": "How can court orders be used to protect sensitive information during the examination of automated systems?", "4a50706f-2be3-4191-a799-d03cfb6bd582": "What role does legal discovery play in the oversight of automated systems in sensitive domains?", "ef610b4b-0ce3-4569-b444-685ef6d6f71d": "What is algorithmic discrimination and how can it violate legal protections?", "3332f97e-e957-4370-b156-5b2e74510318": "How can designers and developers protect against algorithmic discrimination?", "38324b8e-0942-49f8-88c3-e261660ac848": "What are proactive equity assessments in the context of system design?", "9676f8ec-7e8e-461d-a77e-337c20095d6e": "Why is it important to use representative data in automated systems?", "07c06943-1c60-4690-a161-f2c3d04c88b7": "How can proxies for demographic features lead to algorithmic discrimination?", "f8088e52-e35c-4f3b-8479-e530e41e83c5": "What measures can be taken to ensure accessibility for people with disabilities in automated systems?", "d218c959-31ff-4b08-8feb-b6cd5b7f9808": "What is disparity testing and why is it important before deploying automated systems?", "fcda6d4e-26ec-4b88-b9e2-33410629db75": "How can organizations provide clear oversight of automated systems to prevent discrimination?", "358806ca-03c5-4219-8f5d-7d6bd50c4a51": "What is an algorithmic impact assessment and why is it necessary?", "95dfc992-0553-4499-94a7-d42021d7a805": "How can independent evaluation and plain language reporting help in mitigating algorithmic discrimination?", "3c860073-dead-456a-9dd9-746a0c1f2cc0": "What is ballot curing and how does it work?", "6e31af36-593c-4b4b-a439-f6361f629e60": "Which states have ballot curing procedures in place?", "bcac432d-f310-4e4c-8294-803789746673": "Are ballot curing procedures constitutionally required in all states?", "59c27c92-ad44-4eec-b9a6-c79134babf4b": "How do election officials contact voters for ballot curing?", "753f8d63-ebe4-412c-8c70-4b6defdfe6bf": "What kind of information might voters need to provide during the ballot curing process?", "838c69d0-ff96-462d-a5ec-c67efa2bcca9": "Can a voter cure their ballot if they missed the initial deadline?", "c7ec0382-b12d-4075-915b-a29a72506cfc": "How does the ballot curing process ensure the validity of a ballot?", "22887c27-fdf6-433a-9832-9a9115f9683a": "What are the common reasons a ballot might need curing?", "fdad09f6-1e41-4f9c-9715-fffb065295c1": "How long do voters typically have to cure their ballots?", "e1115d17-d43b-4b36-8792-e50c67b9bb8c": "Are there any states that do not allow ballot curing?", "ebd1eb6d-41f7-4335-b070-d78501a2156e": "What is algorithmic discrimination and how can it be avoided?", "64a7ca44-9bdd-4f2f-bdb1-baed69c8d781": "How do proxies contribute to algorithmic discrimination?", "20367a8b-c64a-4864-bc1b-3d280f351a0e": "Why is it important to test for correlation between demographic information and attributes in data?", "d9858772-aa90-4a5a-be0c-fc5b5b333bd8": "What steps should be taken if a proxy is identified in a system?", "57ce6697-9b12-431e-8391-82fdd25840ee": "How can organizations ensure that a proxy feature is not given undue weight in decision-making?", "39f694c3-4c98-4d86-b3e2-28fd601599af": "What are some examples of attributes that might serve as proxies for demographic features?", "14fb21ac-2111-40ac-bda5-782d92f9b5a6": "What legal implications can arise from using proxies in algorithmic decision-making?", "fb146223-b6a8-49be-bb4d-631246b9b613": "How can alternative attributes be identified if a proxy is found in a system?", "b11ec9e4-e31c-4b32-b7d2-e9801186fd91": "What are the best practices for monitoring systems to prevent algorithmic discrimination?", "8fd37fe4-2c42-4678-bce5-922ed9d2dae5": "Why is proactive testing crucial in the design, development, and use of systems to counter discrimination?", "4457a37e-6d0e-411d-aea4-b1c32f5a127d": "What are the limitations of AI in different contexts of use?", "a7e21fa9-a576-4c9c-b7b8-cf1dc782ba81": "How can structured human feedback improve AI systems?", "bdaa7693-aa11-46bb-a39b-d7e351b3fff2": "What are some anticipated human-AI configurations in the future?", "bc7e0482-14cb-4e29-83b9-d34e973bcfb4": "How can harmful bias and homogenization in AI be mitigated?", "06906bd2-d444-4b50-86fc-e7d4b6caa7be": "What measures can be taken to prevent AI from generating dangerous, violent, or hateful content?", "e7276fb9-5e30-4e77-a07f-98fae0b0a30c": "How can organizations identify and document illegal uses of AI systems?", "416fc8f9-4237-4877-a514-88013a360abd": "What are the risks associated with AI in handling CBRN (Chemical, Biological, Radiological, and Nuclear) information?", "35cbeeef-7459-4fe7-bb33-74fc872c987a": "How can interdisciplinary teams contribute to the development of AI systems?", "e5be6c98-b642-4a1b-968b-8d3d71eb0fd3": "What competencies and skills are necessary for AI actors to establish context reflecting demographic diversity?", "f6636c23-4d93-4bb1-abf3-5ef8d69586c7": "Why is interdisciplinary collaboration important in AI development?", "6e27bf91-4131-4e12-b6fa-cc6d5997f88f": "What are the long-term performance characteristics of General Artificial Intelligence (GAI) that remain unknown?", "ff520072-afb0-4b73-9f62-29ac419bf82d": "How can we ensure information integrity in the deployment of GAI systems?", "4e3cb88c-db89-46db-bfaf-6efc6c06e573": "What are the potential dangers of GAI generating violent or hateful content?", "1ba43e0c-45c7-42b2-9dcf-0edbb0e71b32": "How can a plan be devised to halt the development or deployment of a GAI system that poses unacceptable negative risks?", "82d470a2-1e99-4a76-92f8-281573661995": "What measures can be taken to prevent GAI systems from generating content that violates the law, such as CSAM or NCII?", "2e5d1e86-46b3-4528-b9e2-cbe1e372e99b": "What are the risks associated with GAI generating obscene, degrading, or abusive content?", "c53e6933-dd22-42ec-865b-0cfc8a4a6f10": "How can harmful bias and homogenization be mitigated in GAI systems?", "60fdcce3-24cf-46ff-bd37-71e20af72d5b": "What are the best practices for establishing transparent acceptable use policies for GAI?", "35c63a1b-39bb-4e6e-87d1-597fbe552fb0": "How can governance and oversight be effectively implemented for GAI systems?", "7e800537-d1f9-48cc-b514-dc45ba1e805f": "What are the key components of a risk management process for GAI that ensures transparency and aligns with organizational risk priorities?", "8caffdb8-0e56-4747-8e14-5076b635e19f": "What are the key roles and responsibilities for managing AI risks within an organization?", "7a804990-826f-4344-923d-164698caefdd": "How should an organization document and communicate AI incidents to stakeholders?", "1fefa53a-88bb-48ff-8f41-3aeae62bce74": "What procedures should be established for engaging teams in AI system incident response?", "b4eb2632-537c-4adb-8f56-5f7392106c81": "How can an organization ensure that AI Actors have the appropriate skills and training for incident response?", "3e0d09b6-f68d-4dc8-bef2-d2295751db89": "What are some official resources for reporting AI incidents?", "b9023848-3972-4a1d-9214-477117a0d3df": "How can harmful bias and homogenization be addressed in AI incident response procedures?", "8b69e61b-4fce-477c-8e17-3871ba07c8ca": "What is the importance of having clear lines of communication for AI risk management?", "3003c533-bec8-424a-a54a-5bbe26b84144": "How can organizations verify the skills and training of AI Actors involved in incident response?", "6e59a970-f171-4ff5-a512-b3d6396bb916": "What are the benefits of having diverse teams for AI system incident response?", "30812fe9-ef65-459a-9731-9e837e6be7cb": "How should organizations integrate value chain and component integration in AI risk management?", "8abc3501-531b-42b8-9cec-6ed049ec1079": "What are some appropriate responses to identified privacy risks in data processing?", "473df226-ad88-4f10-8241-ea6b0ab61506": "Why is it not appropriate to transfer privacy risks to users via notice or consent requests?", "a74c77b8-07cd-430c-8aac-7b1585bc505d": "What are privacy-preserving security best practices for automated systems?", "c4688abb-c2c2-46c4-86b1-0261e15da71b": "How can privacy-enhancing cryptography help in maintaining data privacy?", "003673d2-5ecb-440b-80be-5cd34bbaa356": "What role do fine-grained permissions and access control mechanisms play in privacy-preserving security?", "cb249eee-5357-4ed7-99fb-8da55348c57c": "Why is it important to ensure data and metadata do not leak beyond the consented use case?", "af4ab8cd-cae8-4e7c-922b-8e193f1f5383": "What are some examples of privacy-enhancing technologies?", "8fbca3b4-2fde-4348-a2e2-1dede9fbabb2": "How can entities balance privacy risks and benefits in data processing?", "39179d19-89d2-4969-a2e9-b1c27831cd8f": "What are conventional system security protocols that can be used alongside privacy-enhancing technologies?", "4e933b6b-f6f3-45a9-aa9d-c54fc2d80ade": "How can entities ensure users understand the privacy risks associated with data processing?", "7a230dee-19b1-4409-ac9b-4fd593416a92": "What are the main challenges for consumers with the rise of AI-enabled products and services?", "e0de42d1-9769-4cec-a48a-c883f8a5c316": "How can communities benefit from the growing ecosystem of IoT devices and smart city products?", "1bbc381a-2090-45be-8cad-d9d9d862608d": "What role does the Federal Trade Commission play in protecting consumer privacy in the context of advanced platforms and services?", "93fdcd7f-ecce-4c63-80b4-7e3b65a0ec49": "How can consumers ensure their rights are protected when using AI-enabled consumer products?", "e3a6aac7-0d0f-47f2-a09a-5a902286de83": "What are some potential risks associated with the use of smart city products and services?", "9343529d-ade3-4b58-8c53-8a84ae0b6620": "How can policymakers balance innovation with consumer protection in the digital age?", "1402d638-7b8d-4525-9c64-93611b63063a": "What strategies can be employed to enhance consumer trust in AI-enabled products and services?", "2b45e986-94f6-441b-a5b6-ad7d5710929e": "How do IoT devices impact consumer privacy and what measures can be taken to mitigate these impacts?", "60e72442-44d0-4e8e-8f79-1d3e0616d45d": "What are the opportunities for improving community well-being through the use of advanced digital platforms?", "5f17138e-15e8-4f1b-9538-d62720ee1357": "How can consumers stay informed about their rights and protections in the evolving digital landscape?", "5c5c28fe-f220-4cfb-b5dd-59fa4fc0ed70": "What is the NSF Program on Fairness in Artificial Intelligence in collaboration with Amazon?", "1ecd21e2-5962-46c3-8ac6-39f368d94cb0": "How does automatic signature verification software threaten to disenfranchise US voters?", "9a291c46-0178-49d0-a45a-eb38560f5715": "What is the cure period for absentee and mail-in ballots according to Ballotpedia?", "df46e7f9-256e-4668-a774-41a48f74c576": "How can you tell if two mail ballot signatures are by the same person, as discussed by the New York Times?", "3bac4694-ad52-44b3-a9b8-bdd45f2f3526": "What are the main points discussed in the article \"The Low Down on Ballot Curing\" by Rachel Orey and Owen Bacskai?", "caf5e917-228f-452a-aa7e-5e4722643f18": "What are the potential issues with automatic signature verification software in the context of voting?", "d769a613-d040-42c5-b109-da27d5f9787e": "How does the National Science Foundation's program on AI fairness aim to address biases?", "2f8b1ef4-df6f-4ba6-9615-e1a85909f4af": "What are the steps involved in the ballot curing process?", "ace15994-5883-41c6-a269-9979b4205fbd": "Why is the cure period for absentee and mail-in ballots important for election integrity?", "e1eb277b-b995-4e63-a1eb-9180ffd6f139": "What examples of signature matching challenges are highlighted by Larry Buchanan and Alicia Parlapiano in the New York Times?", "076a1fc1-5b6b-46f0-b2b6-48cfac0d15e7": "What is the importance of sharing pre-deployment testing results with relevant GAI actors?", "64993f9a-4b76-4a8f-874c-3d273367f43f": "Who are considered relevant GAI actors in the context of system release approval?", "153b18fe-97ea-4516-a51c-8aee0b2f8f07": "How does pre-deployment testing contribute to information security?", "99dc15b6-9a6f-46c8-ac5b-2de91142fa28": "What are the key components of human-AI configuration in information security?", "ee485024-fbdf-4772-9401-42eb2a5b5603": "What steps are involved in the pre-deployment testing process for AI systems?", "94cb95a8-ec57-470d-b1e8-3959d8963369": "How can sharing pre-deployment testing results improve system release decisions?", "af4cbd1c-b744-4bab-97a4-ec1f323b9260": "What are the potential risks of not sharing pre-deployment testing results with relevant actors?", "0325a366-a9b0-45a5-87cf-7107248c17f6": "How does confabulation relate to information security in AI systems?", "a2a6f02b-4f15-4743-a2b5-3f2025414a01": "What role does system release approval authority play in the deployment of AI systems?", "835ee7d9-eea0-48db-a932-b70328a2b80f": "How can human-AI configuration impact the effectiveness of information security measures?", "2184e32c-4f19-4057-964f-48f6090c4d64": "What are the health benefits of traffic calming measures according to the US Department of Transportation?", "426c71f4-c8da-4c45-b83c-0cb9d64407ac": "How do traffic calming measures help in slowing vehicle speeds?", "4f9726e7-c892-47d5-941c-423f9a84d608": "What are some examples of traffic calming measures mentioned by the US Department of Transportation?", "89c9ee09-99a0-44e8-9140-622366b15748": "How can organizations monitor and fix their AI models using responsible AI ventures?", "02099e7a-c0f9-4fec-8ac9-5f7fe1eab08c": "What are some startups that focus on AI ethics according to Karen Hao?", "38cff01a-3e4d-4f0d-84b0-6e9383d80e02": "What is the role of responsible AI ventures in promoting ethical AI practices?", "2c7799cd-0027-4338-a539-47cd43e086c6": "Can you provide a summary of the article \"Worried about your firm\u2019s AI ethics? These startups are here to help\" by Karen Hao?", "dc59187d-f64f-4de6-af9d-5e6e93133b18": "What are some top progressive companies building ethical AI in 2021 according to Disha Sinha?", "6f52455b-2a16-4f6c-94cb-d9ba727691af": "How does the growing ecosystem of responsible AI ventures impact the development of AI ethics?", "16af67cc-0dfc-4203-815d-8b6bf5ef6277": "What are the key points discussed in the MIT Technology Review article on AI ethics startups?", "555d774d-d331-4349-951f-af19f683089a": "What are the main privacy risks associated with AI as discussed by Lee, H et al (2024)?", "91ef15f4-5185-4c16-9ef0-4bbb37697969": "How does data poisoning exploit generative AI according to Lenaerts-Bergmans, B (2024)?", "1c1560c2-164b-4de3-9972-65d5c636bd55": "Why are GPT detectors biased against non-native English writers as per Liang, W et al (2023)?", "b0939336-ece6-40eb-82f5-1ec5f0e3b997": "What are the energy costs associated with AI deployment discussed by Luccioni, A et al (2023)?", "e8beb28b-df88-4857-a896-9492bc49fe69": "What operational risks does AI pose in large-scale biological attacks according to Mouton, C et al (2024)?", "d799c0a7-a358-47d3-8fc2-0b8da05d95fa": "How does generative AI exhibit worse biases compared to humans as stated by Nicoletti, L et al (2023)?", "4f6a16c0-eb08-4d81-9e2d-968a9c7bbe5a": "What are the key findings of the National Institute of Standards and Technology (2024) on adversarial machine learning?", "8029f69a-cddb-4697-8f5f-a117433fee74": "How can deepfakes impact privacy according to the taxonomy provided by Lee, H et al (2024)?", "66f00bc6-ab1d-43e9-9153-e2c75516e556": "What measures can be taken to mitigate data poisoning in generative AI as suggested by Lenaerts-Bergmans, B (2024)?", "5d336249-8464-410f-8952-8b2a4b802ba1": "What are the implications of biased GPT detectors for non-native English writers as discussed by Liang, W et al (2023)?", "cca47a77-0520-490a-ba33-7fc205506cff": "What are tenant screening algorithms and how do they impact housing applications?", "193d2e40-1043-47cd-a7ed-4c589f9ed491": "How do automated valuation systems work in mortgage underwriting?", "1377b78c-14b8-431e-bbcf-a61ca726482b": "What role do workplace algorithms play in determining employee pay and promotions?", "b20bfb27-3464-4d36-af79-19e0efb7136b": "How are virtual or augmented reality programs used in workplace training?", "b63ce3cf-4572-4655-8f8c-f0590a07b1dd": "What are the implications of electronic workplace surveillance on employee privacy?", "518edd21-747b-416c-a5e4-eded44945fd5": "How do AI-assisted diagnostic tools support clinical decision making in healthcare?", "f29882f4-75bf-4ba7-8c1a-4908f184296a": "What are the benefits and risks of using medical AI systems and devices?", "0477b1d1-9503-4a65-9872-d4e96a8ba1b1": "How do predictive models in healthcare influence patient treatment plans?", "cb0e5e06-fc4b-4849-a9f5-cc5a5541d9c4": "What are the ethical concerns surrounding the use of algorithms in employment decisions?", "675faee1-4b6f-46f9-9004-bfea75569eee": "How do automated valuations from online aggregator websites affect home insurance rates?", "e3b5b92f-9f7b-4c54-9240-dfaff35e382f": "What information is required to build or validate a risk assessment?", "50ddae98-3e18-4f13-87df-bdf5c87c3e49": "How can the public access information used in risk assessments?", "857942ee-2e35-4394-93cc-4a1070f88dbf": "Are trade secrets protected in criminal cases?", "039a84a0-ad22-41a4-9569-a900c59cd915": "Can trade secrets be used to prevent discovery in criminal matters?", "730583a5-c594-465e-b9db-d94372af5b92": "What are the legal implications of using trade secrets in criminal cases?", "a399efb4-2fe3-4b7d-bb74-468cb88c6692": "How does public inspection of risk assessment information impact transparency?", "25b7a363-4a6f-4e92-bf19-820353d20eda": "What constitutes a trade secret in the context of criminal law?", "e89b7e4e-9d0c-49e9-ba4b-d96fb7f38d1d": "How does the law balance trade secrets and public interest in criminal cases?", "43939503-05a7-4f81-a001-b61fae0e6cf3": "What are the consequences of failing to disclose information in a risk assessment?", "741c8a8a-a15d-4b18-b19a-a167119ce939": "How can parties in a criminal case challenge the use of trade secrets to quash discovery?", "ce483416-c348-4911-9628-813050e4eeab": "What are the best practices for limiting the scope of data collection to avoid mission creep?", "2ca2c34f-c0e3-4a49-bb51-b639dcabcfe2": "How can organizations determine if data collection is strictly necessary for their identified goals?", "361b48b8-e93c-4e16-91a5-cc5e07f1fe3d": "What steps should be taken to minimize data collection as much as possible?", "ac634aac-bdc9-4b09-83c7-537d7fab01bb": "How should organizations assess new privacy risks when using data in a different context?", "7f844336-cb98-4ae3-b02c-d29460977665": "What are appropriate mitigation measures for new privacy risks, and when is express consent required?", "09a89a5d-baf9-44e7-97ce-9150f52dcddb": "How can clear timelines for data retention be established and enforced?", "145c8074-9a0f-4eeb-a364-f358745607c7": "What are the legal or policy-based limitations that affect data retention timelines?", "9f2b34d5-6f6f-477a-95d2-34caa90de98d": "How should data retention timelines be documented and justified?", "1e82f71e-654d-42ec-9c14-6dadbe1f6a21": "What methods can entities use to proactively identify potential harms related to sensitive data?", "99c21c13-0f1b-4e41-ba73-66b99a72b9f5": "What strategies can be employed to manage and mitigate risks associated with the collection, use, sharing, or storage of sensitive data?", "3be3f630-af00-421e-9293-5b36b1457f21": "What are the AI RMF functions and their corresponding tags?", "15856cf5-933b-4a51-9b87-1a41b7989c41": "How should organizations determine the applicability of suggested actions to relevant AI actors?", "f8eeb340-f824-4954-8bba-97e13a1eaaa4": "What does the Action ID GV-11-001 represent?", "41e2ede8-bed6-4392-91c8-4d6e11fd5723": "Are suggested actions for GAI developers always relevant to GAI deployers?", "8d49c1c5-f878-4154-9587-d0639e096bfe": "What information is included in each table of suggested actions?", "256b9f00-7e6f-424f-a18c-4c07a21a9c0a": "How are GAI risks linked to suggested actions?", "2fefdcdf-4aed-4bf3-a395-cca2e7e6f51f": "What does the tag \"MP\" stand for in the AI RMF functions?", "5073c0c1-051a-4a7f-8466-425db5f7c409": "How can organizations manage GAI risks according to the suggested actions?", "6a8808c6-ab61-4357-826e-0a9c216b874d": "What are AI Actor Tasks, and how are they related to subcategories?", "ac5de47b-969a-4190-8637-c26262014360": "Why might not every AI Actor Task be listed for each subcategory?", "2d85a293-4a92-4d77-a148-01e6bdb2e171": "What are the best practices for identifying and documenting how a system relies on upstream data sources?", "fa278b40-585c-4815-898c-0b82776e868b": "How can one ensure the integrity of information when a system serves as an upstream dependency for other systems?", "d2d872e7-55fe-471f-8a3c-444fd24feaeb": "What methods can be used to observe and analyze the interaction of a GAI system with external networks?", "ce2f6805-1d0b-408b-93cb-df89733e20d7": "How can potential negative externalities be identified when analyzing a GAI system's interaction with external networks?", "f0e5e018-68e2-4930-9f82-18a05118dd73": "What are the key considerations for ensuring scientific integrity in experimental design and data collection for GAI systems?", "aaf22726-5e88-48f2-9816-290e976a2c52": "How can the accuracy and reliability of GAI output be assessed effectively?", "621c3d3b-6588-4b26-817f-34cf97d21faf": "What strategies can be employed to validate the authenticity of GAI output?", "d4ebcdf7-e442-4f4e-ae79-bc5b7049698f": "How important is content provenance in maintaining the integrity of information in GAI systems?", "1ed4e09e-46b0-42fb-8f66-ad56d901feb1": "What are the risks associated with the integration of value chains and components in GAI systems?", "4ee25323-db4d-4006-941f-65f9ef0a30a2": "How can one document the trustworthiness and construct validation of a GAI system?", "c87b4890-2e5b-4bac-9ab2-38345179a167": "What are the current consumer data privacy protection regimes in the United States?", "a5cec61b-fe2c-4f0a-b43c-0bcfda0a4e61": "Why does the United States lack a comprehensive statutory or regulatory framework for personal data rights?", "cd837571-a21f-4d34-be84-a254ecaabd8e": "How do existing laws guide the collection and use of personal data in specific contexts like health and employment?", "bf81addc-77e6-4962-a28d-6a88b104c798": "What are the potential harms of not having a standardized data privacy protection regime in the US?", "0a2781a2-70af-4a14-833a-be0db30d26de": "How do state-level consumer data privacy laws differ from one another?", "70ebdeb5-d993-421c-98de-6f8c8a151dcc": "What additional protections could assure the American public about the use of automated systems?", "c3a7e646-03e3-4fcb-a49d-f6aeb95cd652": "How does the patchwork of laws affect the application of data privacy in an automated society?", "b2069dc4-e694-456f-ac4e-bc92042d8f57": "What are the challenges in applying existing data privacy laws to new technological contexts?", "eaf0aa98-7660-4a3d-9f6e-c63c68dafdbc": "How can consumers ensure their personal data is not being monitored or collected without consent?", "d8553bac-b1a1-4c02-9567-b5f42bb59bee": "What legal authority is required for automated systems to collect personal data in the US?", "6a84a0e6-451d-4969-8be6-245780aeb397": "What is prompt injection in the context of GAI systems?", "7b29eb41-5e52-429f-af23-ae265e916f3f": "How do direct prompt injections differ from indirect prompt injections?", "f9c5155b-5175-472a-bbc4-0b5013dfe6b9": "What are some potential consequences of direct prompt injection attacks on GAI systems?", "8318048b-c34f-44ee-a2ba-511717b16077": "How can indirect prompt injections exploit LLM-integrated applications?", "0eb2f6c5-07ed-4df1-b0ee-9772ba08a943": "What are some examples of vulnerabilities that can be exploited through indirect prompt injections?", "1d52cdc1-e84a-4d67-80f9-7540a2133c4c": "How can querying a closed production model reveal previously undisclosed information?", "7b697db4-b43e-445e-bdaf-c9ceb08eb05e": "What measures can be taken to protect GAI systems from prompt injection attacks?", "41daecd9-dceb-4f72-a42a-e18ea7fda937": "How do conventional cybersecurity practices need to evolve to address prompt injection threats?", "fd25cb30-3e1b-4f40-bd7a-02df040ec85c": "What are the risks associated with malicious prompts in GAI systems?", "c5b9acfc-ac53-4dbd-85d3-f28fa4f32c26": "How can security researchers demonstrate the impact of indirect prompt injections on proprietary data?", "6d74ac43-d15a-4127-8db7-cef0fdb9d105": "What is the importance of using representative data in system design?", "c71c8dd4-63a7-4818-b770-97043dc5007e": "How can organizations protect against proxies for demographic features in their systems?", "2d5bfd5e-dbdc-4c0c-818e-615f91006738": "What are some best practices for ensuring accessibility for people with disabilities in design and development?", "0029c940-ec04-411f-a02c-3d2ed811162a": "Why is pre-deployment disparity testing crucial in system development?", "a07d446c-3b58-4fde-86ea-42a39458f472": "How can ongoing disparity testing and mitigation improve system fairness?", "c3e83063-2c07-421c-8e37-a16fff3f39e8": "What role does clear organizational oversight play in maintaining system integrity?", "606bee4d-8e16-4cd6-a403-2f19c040f21c": "What is an algorithmic impact assessment and why is it important?", "553267de-5d7a-4b26-91a6-2066782e1eca": "How can independent evaluation contribute to the transparency of algorithmic systems?", "2663ee64-79f0-4c1f-8410-651d596a09cb": "What should be included in plain language reporting of algorithmic impact assessments?", "dddf4214-014b-4cae-a517-b49cd94d6dbb": "Why is it important to make disparity testing results and mitigation information public?", "791a3b0f-42da-4f1e-8230-e984b9e83a5d": "What are some examples of automated systems that can impact civil rights and civil liberties?", "2e9b844a-3516-48c7-9857-37ddb2b13880": "How do automated content moderation tools affect free speech?", "41172f4c-70e5-4cec-9baa-a1647ae750e3": "What role do predictive policing algorithms play in the criminal justice system?", "e28c299b-5a05-4d08-b18b-a133fd152cee": "How can automated license plate readers impact privacy?", "89e46e02-8e3a-48fb-aee7-332e3cb87322": "What are the potential risks of using real-time facial recognition systems?", "d4f21fa8-9d60-4e63-b735-56afa6012b7e": "Why should the Blueprint for an AI Bill of Rights cover automated systems that do not yet exist?", "6605c4f9-86eb-48ba-970b-2ec5cbda43d5": "How do surveillance algorithms affect civil liberties?", "ed41893b-1bb9-4e32-bf94-8499f5845e84": "What is the significance of including speech-related systems in the AI Bill of Rights?", "06f4e666-7e80-47d4-920c-020d8e66d063": "How can risk assessment algorithms in the criminal justice system impact individual rights?", "d8fdf8f4-8a77-4d9d-9d04-353cf7713d24": "What are the implications of using automated systems for surveillance in terms of privacy?", "94bdbb5b-6d65-480b-8601-b46cc88fc568": "What are the potential harms of AI systems in terms of political opinions, sex life, or criminal convictions?", "455d3295-e1b6-4838-9757-ee00fbf6fc3f": "How can disparities between groups lead to harm when using AI models?", "dcad9f20-5d3c-4fa0-a3da-0807a6e8e29c": "What challenges arise in establishing a baseline scenario to measure harm caused by AI?", "a9fffb40-a9a9-44a4-8bd7-139528a1ab89": "How can biased behavior in AI systems result in harm to different subgroups?", "d0babf07-9aad-4ae7-a6cd-85ecdcba8291": "What constitutes an unambiguous harm caused by an AI system?", "89597661-548a-4a06-a978-89132aba1eb0": "How can we determine if an AI system has worsened someone's situation compared to the absence of the system?", "f9670d38-896e-4fbc-8f5b-b9d0037c76ff": "What are some examples of harm caused by disparities in AI behavior?", "a0b6a03d-c670-468c-bce1-9294b6d93b53": "How do divergent views on disparities in AI behavior affect the assessment of harm?", "86dbbc9d-37e8-4fa1-87dc-808e4eb2b54e": "What mechanisms can be used to identify harm caused by AI systems?", "001c0aca-6ef2-4987-9528-4e438dccb697": "How can we address the issue of biased behavior in AI to prevent harm to subgroups?", "ff4430a8-31f2-4f0d-a6e0-5c8649634b27": "What are some effective strategies for disparity mitigation in automated systems?", "e535b78c-e479-4e22-9856-3ae2f3a944b8": "How can we identify and evaluate multiple models to select the one with the least adverse impact?", "07577988-b22e-464d-a114-5a3836a9b93e": "What steps should be taken if adequate mitigation of disparity is not possible in an automated system?", "dbb54f3d-f426-42fa-85ac-fdf51750e91c": "How do unobservable targets lead to the inappropriate use of proxies in automated systems?", "7fdfe1a9-36d4-4691-885b-a3946764874a": "What legal requirements exist for mitigating or eliminating disparities in automated systems?", "7cae1803-9318-4bf5-a8b5-7013514edc4f": "How can modifying data input choices help in reducing disparities in automated systems?", "692e4a6c-a9d5-46ca-a0d6-9a31d7f06f67": "What are the potential consequences of algorithmic discrimination in automated systems?", "9601cee0-678d-42b5-a2a6-8db03a41e877": "How can we ensure that an automated system aligns with equity goals?", "087c8f2b-0171-4cb4-81de-f085a3a439ca": "What are the best practices for conducting a disparity assessment in automated systems?", "9ed4da28-5f9b-4191-8f88-b3d3f17c019c": "How can meaningful harm caused by disparities in automated systems be identified and addressed?", "1a5f3970-e2ad-4c5a-b2f2-464b42ba6670": "What are the common sources of bias in GAI training and TEVV data?", "1f4eaffe-4d1e-4e65-927f-b30861a0c252": "How do differences in outcome distributions across groups affect GAI systems?", "7108defb-707d-4f17-be57-997ab61d8de5": "What is the impact of the digital divide on the representativeness of GAI training data?", "dbfa0169-4237-4e27-a0a2-6f6add38775e": "How can latent systemic bias in images and text be identified in GAI systems?", "786162d0-4467-47da-a17d-83e793b39e99": "What role does demographic group coverage play in the effectiveness of GAI training data?", "2dccbfb2-4d4e-4e46-bb34-553b404afd69": "How can input data features act as proxies for demographic group membership in GAI systems?", "2026e84e-f0b0-41f3-a034-e2ee060d5e42": "What methods are used to filter hate speech in GAI system training data?", "af8f1310-f89e-41d6-a386-57d253594be4": "How prevalent is GAI-generated data in GAI system training datasets?", "cc1890d2-8f2e-48d0-abcb-8f49bfb2f5b2": "What are Winogender Schemas and how are they used to study gender bias in GAI systems?", "2ee7b790-cd75-4629-a52f-8036fad31546": "How can the completeness and balance of data sources be measured in GAI training data?", "53c25f8b-f67e-480c-ab2a-951cfbd311df": "What are the technical risks associated with advanced AI models?", "8be2ee75-d646-4383-b30c-966d720b0fa8": "How can AI confabulation pose a risk to safety?", "f18be8d3-47f5-4006-83fa-afb8ce01872b": "What are the dangers of AI making violent recommendations?", "4a8b4330-7921-4bb8-becf-4ea1bb6b54e3": "How does data privacy become a risk in the context of advanced AI?", "ce71108e-36bc-49d1-a29d-19023f609a26": "What is meant by value chain and component integration in AI risk management?", "4a77b200-1ee4-4bbb-b131-970c384060be": "How can harmful bias in AI systems be mitigated?", "3aa2d071-ce40-40d5-be9e-8219f64dbef9": "What are the potential risks of AI misuse by humans?", "73bb4bbd-de45-44ae-9b2f-d51c7dc9c39c": "How can AI be misused to spread obscene or abusive content?", "e5a3d5df-7333-4465-9aef-c982135b06e1": "What are the systemic risks of AI to the ecosystem and society?", "92a08a8e-413f-431b-92b3-ab96effc5cfb": "How does AI impact intellectual property rights?", "4e3b03a7-bfc2-43b1-b05b-f1a0c38eff6c": "What is algorithmic discrimination?", "70f62e50-5170-4ad9-928d-15660d1f86b5": "How can we protect against algorithmic discrimination?", "674c4e49-a7d5-47f2-bc90-df8320d389ce": "What are some examples of algorithmic discrimination?", "c61b9e7b-0fb9-4e82-a7c4-64d87b67c0b2": "How do algorithms lead to discrimination?", "2e142a47-da67-4d69-b1f4-c340f690597a": "What laws exist to prevent algorithmic discrimination?", "4e2fb6ff-c26c-4485-adb5-69581eb0a253": "How can companies ensure their algorithms are not discriminatory?", "114af13c-f1ec-49c5-a9a2-f604e098e204": "What are the ethical implications of algorithmic discrimination?", "454c3073-ee6a-49e6-b85d-712601950be6": "How can algorithmic discrimination be detected?", "890c20df-f21b-467f-9126-136a56509c15": "What role does transparency play in preventing algorithmic discrimination?", "ff14594e-b5c4-4958-99b5-c3c354b9f502": "How can individuals protect themselves from algorithmic discrimination?", "4b585781-3f5c-4022-b77a-4d5874711f71": "What is the significance of the watch list that Chicago police fought to keep secret, as reported by Mick Dumke and Frank Main?", "c3bfc377-21d8-4ea9-aa9d-28bf11fa7174": "How does the ACLU case in Idaho highlight the pitfalls of artificial intelligence decision-making, according to Jay Stanley?", "04ce1a4c-76ce-4640-9775-83d32fb8e781": "What are the key provisions of the Illinois Biometric Information Privacy Act that became effective on October 3, 2008?", "ee94abbb-0f06-48b9-944e-c9be35cad15f": "What is the ABOUT ML Reference Document by the Partnership on AI, and what does it cover?", "d9f37ef0-5071-4f8e-9872-db98336653a5": "How does the model cards framework contribute to transparency in machine learning models, as discussed by Margaret Mitchell and her colleagues?", "66ebcef0-85e0-4e6d-afc7-bdd573d73837": "What were the main findings of the Chicago Sun Times article on the police watch list by Mick Dumke and Frank Main?", "508213e7-c376-4ead-bb89-162fc3fef0e7": "How does the ACLU blog post by Jay Stanley address privacy and technology concerns related to AI decision-making?", "67c9bee6-f621-4966-ba5b-b12a16c01f45": "What are the implications of the Illinois Biometric Information Privacy Act for businesses and individuals in Illinois?", "061e1226-744d-4d65-8114-0fbadd9b3d06": "What is the purpose of the ABOUT ML Reference Document, and how can it be accessed?", "af11a645-0690-4cf6-929f-476f6853ee58": "Who are the contributors to the model cards framework, and what are their roles in the development of this framework?", "d7605540-f9a3-4e51-9036-60f1c4df408b": "What is the Biometric Information Privacy Act in Illinois?", "379f0256-6648-4343-82bd-85d0ba551fe2": "How does the Biometric Information Privacy Act protect individual biometric data?", "097159ce-edec-42b3-8e7d-f7eaa02730ca": "What are the provisions of the Biometric Information Privacy Act regarding the use of biometric information?", "80dcda41-28de-4d4b-a5f6-d968bf53fca2": "How are people in Illinois notified about the use of their biometric information?", "ada88ac3-f4cc-4c8c-9572-1eef6b100f8c": "What are some real-life examples of laws protecting biometric information?", "a9cc0b3d-ed88-41d5-9b61-453f88b305d5": "How can technical and sociotechnical approaches protect rights and access to biometric data?", "465a72fd-3256-436b-8e18-d0a099b3fc3a": "What are major technology companies doing to communicate with the public about biometric data use?", "8ffdf82f-7776-4d09-8d87-dd3b9b2d6dce": "What are the requirements for private entities to collect biometric information in Illinois?", "81883056-15e0-4dac-a219-beb17eae9f58": "How do policies help in protecting biometric information?", "30810cb5-f31d-468a-b938-a53597ae1d54": "What practical approaches can be taken to ensure the protection of biometric data?", "e7dc392f-0994-4434-83ca-5a2b95bfb984": "How have chat-bots and AI-driven call response systems improved customer service?", "d17c2c7a-b481-4a0a-8c1f-ecce10c054d1": "What are the benefits of using partially automated customer service platforms?", "1fd47749-e6b5-4a4e-9982-0d7a4096be47": "How do integrated human-AI systems enhance the speed of customer care?", "886cbf14-7c71-4800-97a2-a3f8f91c9aef": "Why is it important to maintain human agents in customer service despite the use of AI?", "c68e9ed0-dbc2-47cb-8531-79bafa03558c": "What role do human agents play in resolving complicated customer service requests?", "009e2017-2f03-4ae5-80e7-1e266e24c0c1": "How do businesses compile common customer problems for human agents to review?", "7e569e34-0129-4adc-9ffa-13874c019581": "What are ballot curing laws and how do they function in the context of voter signature matching algorithms?", "97e257bf-87aa-4bbd-a07a-8ebb0d758152": "How many states have ballot curing laws that require a fallback system for voters?", "78b69abb-f1c6-4398-9e4e-619d89054b29": "What happens if a voter signature matching algorithm incorrectly flags a ballot as invalid?", "9ede1912-c15f-41cf-83b6-4ac0a0be6c0f": "Why is it necessary to have a review by an election official in the ballot curing process?", "1c3e63ce-81d0-49de-920c-d4c81fd0d223": "What can I do if a former employer provides false information about my job title?", "729f05ea-87c5-43a9-8190-306ccc177546": "How can false data from a previous employer affect my future job prospects?", "99785c94-c276-4ed1-8039-13f1d0943fdd": "What are my rights if a company supplies incorrect information about my employment history?", "0c4d45a4-e1e2-46e5-b8cd-59ac132cfb62": "Can a job offer be revoked based on false information from a previous employer?", "fd23ff35-ce12-4ab5-8f00-6522d5466c82": "How can I dispute false information provided by a former employer to potential employers?", "68940d08-efcc-42d0-96b0-7e26d8c550cc": "What steps should I take if I find out a former employer gave false data to a bank or landlord?", "b8ac00ac-57f4-4259-aead-6d6f608ccea9": "How can false employment data impact my ability to rent an apartment?", "fe589612-ce8a-4fec-bfb6-4a9173dff0a2": "What legal actions can I take if a former employer provides false information about me?", "ae7f2e11-db6c-470f-9093-9964aba79b39": "How can I prove that a former employer supplied false data about my job title?", "9ccf00e2-cd6a-4824-8e75-cb8c6bc51c37": "What are the consequences for a company that provides false employment information?", "d7947ff9-c5a1-430f-a50d-48369fe5d238": "What is the National Science Foundation's program on Fairness in Artificial Intelligence?", "da12cd0e-8846-4a5c-84a5-84fc2b21b168": "How does the National Science Foundation support research in explainable AI?", "aad8a5bb-bb98-4c8c-942a-60d55b2f3a3a": "What are the goals of the National Science Foundation\u2019s program on Fairness in Artificial Intelligence?", "62e9df72-50c1-414a-85e4-ec1b02cc7b3a": "Why is explainable AI important in the context of fairness in artificial intelligence?", "e688d828-a12e-4b21-a574-d41223a895e4": "What kind of research foundations are being developed for explainable AI?", "d9a5744c-7e1e-4f1c-bfe4-9ccafe96cd82": "How does the National Science Foundation define fairness in artificial intelligence?", "bc9af756-894b-4f0e-936f-fc6f94db6b7b": "What are some examples of projects funded by the National Science Foundation\u2019s program on Fairness in Artificial Intelligence?", "c24ff655-ee05-4769-96a1-1598937bb5d5": "How can explainable AI contribute to the development of fair AI systems?", "6c682518-b844-4763-a6a6-0c784122c7e9": "What challenges are associated with creating explainable AI?", "02b62987-5164-4ddb-b1ea-ea1c5ae2bfa2": "How does the National Science Foundation\u2019s program on Fairness in Artificial Intelligence impact the development of AI technologies?", "9d1e0807-b733-47c7-aab7-6db6d469a140": "What are the legal limitations on reusing sensitive data in domains like finance, employment, and housing?", "11e017f1-450d-484e-80d0-fd445c1441ff": "How can extra oversight ensure the safety and efficacy of sensitive data reuse?", "09d9a596-3e41-43ef-83c1-590975022445": "What are the benefits and risks of reusing criminal data for civil legal matters?", "1845dce6-8092-4224-b27c-4416e17dad7c": "What measures can be implemented to mitigate the risks associated with sensitive data reuse?", "aa8a4f38-7a8e-4236-a7d0-6066bfef485e": "How should sensitive data be labeled to identify contexts for limited reuse?", "31b0b161-5d09-4e06-8df0-3cd3719087e0": "In what ways can aggregated datasets replace individual-level sensitive data?", "eba60a78-3558-4289-b6c4-c260c5ddd08d": "What are the requirements for demonstrating the safety and effectiveness of automated systems?", "3e075c52-14a9-4ce8-a841-7081ebf2773f": "How can automated systems be designed to allow for independent evaluation?", "76205606-28f9-4aa8-b954-f88b34b55c55": "What are the potential benefits of private sector use of sensitive domain data?", "da56e9b1-2461-4b61-9841-d2518ab0182c": "What are the criteria for legally authorizing the reuse of sensitive data in other contexts?", "9c269a12-6410-4ffb-b5cc-7e74a8f47bd2": "What are the National Artificial Intelligence Research Institutes funded by the National Science Foundation?", "b2d39dd5-4224-4f84-9380-d8ed71a03606": "How can I apply for funding opportunities in Cyber-Physical Systems through the NSF?", "607bb0c5-d732-4661-89d2-8c8968dc8954": "What is the Secure and Trustworthy Cyberspace (SaTC) program by the NSF?", "137febbc-d1a5-48ce-ae7b-8fbe207161b9": "What are the objectives of the Formal Methods in the Field (FMiTF) program by the NSF?", "0dad671a-1be1-4246-86ac-e3a70cf4fa38": "How does the NSF's Designing Accountable Software Systems (DASS) program work?", "5e1f7127-82d6-481e-b40c-00469b2a02f1": "Where can I find more information about the National Artificial Intelligence Research Institutes?", "ff09d5ba-b0ce-431f-a9e6-6443aef1d947": "What types of projects are eligible for funding under the Cyber-Physical Systems program by the NSF?", "ab935b74-ec3e-4c32-9554-a212ec3eac3e": "What are the key focus areas of the Secure and Trustworthy Cyberspace initiative by the NSF?", "77e27420-8614-491c-870b-1f18f4bc11ea": "How does the Formal Methods in the Field program contribute to advancements in technology?", "5c9a5427-5b74-4e51-97bf-ff3fe62e0773": "What are the requirements for submitting a proposal to the Designing Accountable Software Systems program by the NSF?", "ff62ad50-d65f-41bd-aee2-94ee02783edd": "What are the potential risks of automating HR functions like performance evaluations?", "fae2091d-78df-4aeb-84f0-c4796d91062a": "How can errors in automated medical systems impact patient care?", "1174d0d9-9f4e-48c8-b445-47e1b63e081e": "What steps can be taken to ensure automated systems in healthcare do not deny necessary treatments?", "a7594475-953f-4c1b-ba9b-7e06027df340": "How do automated HR systems handle employee appeals or disputes?", "38758d4b-c5d6-4e3f-a96c-ca37a83ffa88": "What are the ethical considerations of using automated systems to make employment decisions?", "8ff16b60-4549-4d28-a5f8-d09dc421e48d": "How can companies ensure fairness when using automated systems for performance evaluations?", "1e5dacbd-129e-4493-b540-f6526c84911f": "What are some examples of automated systems causing harm in the workplace?", "23c3efca-d4e8-49c3-b686-8b432530b4f6": "How can employees protect themselves from errors in automated HR systems?", "70b624ad-4e3d-49ea-b622-028680721234": "What role should human oversight play in automated decision-making systems?", "53b65fc2-c35d-42d3-bd12-9411f43f20fe": "How can organizations balance efficiency and fairness when implementing automated systems?", "79527f3f-64a1-47c2-8080-526e87d6de07": "What are some novel methods for measuring GAI-related risks in content provenance?", "6ecf170a-bccc-40bf-a94b-cdee16acc4b6": "How can we evaluate the effectiveness of technologies in detecting offensive cyber activities?", "2a759b35-2e4f-4be9-b7cd-da0671a48a2f": "What are the challenges in maintaining information integrity while assessing GAI-related risks?", "05a5cf4b-6f21-4f0d-a726-782a1b5f0055": "How do current technologies measure the risks associated with CBRN information or capabilities?", "7464ee39-be9e-4591-9805-942ced6f3320": "What strategies can be employed to ensure AI models produce factually accurate outputs?", "623fdaff-f939-45e1-b135-53865f42ef09": "How can harmful bias and homogenization be mitigated in AI configurations?", "cec41fa7-ba2b-4f27-b233-a38c64d2be39": "What are the implications of AI-generated obscene, degrading, and/or abusive content?", "4e306b7e-b18d-408a-a7ff-ff6ef4bb5ef0": "How can we balance the need for reliable AI outputs with the prevention of harmful content?", "861cc15d-b987-4ae4-8da8-87774f1fd4a2": "What role do representative AI actors play in managing GAI-related risks?", "9e1b0b50-9c70-4f88-b9e9-1bce21f959c9": "How can we improve the reliability of AI models in the context of offensive cyber threats?", "312e6911-48ad-414d-b366-80e0cca7c405": "What are the risks associated with increased attack surfaces for targeted cyberattacks on AI systems?", "822e0d55-b6d6-4fd6-b0aa-970bda988794": "How can the availability, confidentiality, or integrity of training data be compromised in cyberattacks?", "b39af8a9-fdbb-4957-b79a-1ff8ff6bebfd": "What are the potential intellectual property issues related to the replication of copyrighted or trademarked content using AI?", "789e4c72-791b-4dea-8f9f-92ecbfb598d6": "How does AI ease the exposure of trade secrets and what are the implications?", "f4b466db-87c7-4ab8-8d5b-cf86ef2d84e5": "What are the dangers of AI in producing obscene, degrading, and/or abusive content?", "e5767722-faaf-40d4-a45c-7a1913c24ca6": "How can synthetic child sexual abuse material (CSAM) be generated using AI, and what are the risks?", "e81ed8e0-0b64-4bec-b518-b4f8140f20b6": "What are nonconsensual intimate images (NCII) and how can AI contribute to their creation and distribution?", "583946a4-f8da-4e37-9457-b91e6a6ee12b": "What challenges arise from the non-transparent integration of third-party components in AI systems?", "63097304-7442-4cef-88eb-325c2ccf4638": "How can improperly obtained data affect the integrity of AI systems?", "e17ed95d-b504-4234-b183-12b62f58b109": "What are the risks associated with improper supplier vetting in the AI value chain?", "3f995ade-9508-4868-9f0c-834fe1fedd5d": "What are the key factors considered in health insurance risk assessments?", "faaf64ee-40f7-4b25-9728-d640745e0a90": "How do wearable technologies contribute to wellness apps?", "03221c60-4936-437c-ad35-65b5cb1935c1": "What algorithms are used in determining access to financial systems?", "d909e267-78c1-4718-abc3-4c5eacc1c332": "How do credit scoring systems impact loan allocation?", "4953d062-f6dc-4435-8edf-ed0f4e8baf36": "What role do automated interest rate determinations play in financial systems?", "a1f064d0-fe0e-4009-ad4e-da914ad74ab1": "How do insurance algorithms assess drug addiction risks?", "90c10536-7c1f-4080-96dc-6558f5fb05e1": "What are the ethical concerns surrounding insurance care allocation algorithms?", "9d896814-0ecb-4f79-9aee-8558f5e95807": "How do financial algorithms apply penalties like wage garnishment?", "9fba01ca-f4ac-40a5-845e-7e7798ee3c4a": "What is the impact of underwriting algorithms on health insurance costs?", "9c45750d-00eb-4f95-b273-e109238629c4": "How do wellness apps integrate with wearable technologies to improve health outcomes?", "9dfeb337-4771-4307-8f63-5a084befadf1": "What are the key steps in ensuring automated systems are safe and effective?", "5db1d569-a143-4b95-a780-abd6a626429c": "How can diverse community consultation improve the safety of automated systems?", "36cb7b8b-ec8a-4240-932d-f4d013ec010c": "What is the importance of pre-deployment testing for automated systems?", "4e1b4a56-946d-4b57-97a8-a7e7043aafbd": "How do you identify and mitigate risks in automated systems?", "ba44fb84-6b28-4604-b4cc-4e0cb6156a75": "Why is ongoing monitoring crucial for the safety of automated systems?", "709b6157-47f9-4f13-b778-73ea7b5cd999": "What should be done if an automated system is found to be unsafe after deployment?", "b831d5ce-83d9-44cc-8451-30165592bc6a": "How can automated systems be designed to prevent unintended harm?", "48591766-4e73-4127-9689-ac3ec34938b0": "What role do domain-specific standards play in the development of automated systems?", "70259660-a0cb-4dad-9af2-f1552f5c5405": "How can stakeholders contribute to the development of safe automated systems?", "5da3d175-a205-4f12-9bc7-b0694cc97663": "What measures can be taken to ensure automated systems do not endanger community safety?", "ba9a37a2-ff2b-496c-b75a-bea68b9b4d20": "What is the main argument presented by Darshali A Vyas et al in their article on race correction in clinical algorithms?", "77397e84-ee4c-4691-9eea-95549a0f45b1": "How does the Executive Order on Advancing Racial Equity define 'equity' and 'underserved communities'?", "c4c9deb3-0d0c-4b68-a899-07733f551dce": "What are some of the proposals offered by various organizations for designing algorithmic impact assessments?", "2af51b13-b8e3-4522-b662-cbd6f11fa87c": "Can you provide a summary of the article \"Hidden in Plain Sight \u2013 Reconsidering the Use of Race Correction in Clinical Algorithms\"?", "203b4219-e580-474c-8c70-588f4c24500f": "What is the significance of the Executive Order on Advancing Racial Equity and Support for Underserved Communities?", "c20f89ba-0198-4bb6-83d4-436159a5fecb": "Who are the authors of the report \"Assembling Accountability: Algorithmic Impact Assessment for the Public Interest\"?", "beea4d1a-fc3a-4f4c-b9fa-a3c73abf6041": "How does the New England Journal of Medicine article address the issue of race correction in clinical algorithms?", "b49c589c-51d8-4cdc-93e1-6d74c67948f7": "What are the key points discussed in Section 2 of the Executive Order on Advancing Racial Equity?", "71a79413-b607-4093-91a6-cedb011b3b57": "What is the role of Data & Society in the context of algorithmic impact assessments?", "89504f33-70c7-44e7-acee-ad69679cf410": "How can one access the full text of the article \"Hidden in Plain Sight \u2013 Reconsidering the Use of Race Correction in Clinical Algorithms\"?", "abccae6f-f846-42a0-ae0c-e7553ed3bcf9": "How do automated systems threaten the rights of the American public?", "1fe6aca7-365b-48fd-840b-21cafc375daa": "In what ways have technology and data been used to limit opportunities and access to resources?", "35d9684c-2d7c-462a-888f-687e5c1743d1": "What are some documented problems with systems designed to help with patient care?", "f3af11a5-dee0-41ce-9ec8-2befaa2c4b82": "How do algorithms in hiring and credit decisions reproduce existing inequities?", "f6a36e8c-7547-40e4-ab95-7e3640f7d9b0": "What are the potential harms of unchecked social media data collection?", "fd9e9dca-1dea-4621-9bc7-a973840546ac": "How can automated systems undermine privacy and track activity without consent?", "86f7a45a-b678-4ca0-9231-2659114e545f": "What are some examples of bias and discrimination embedded in automated systems?", "2807de8e-8f7e-4f04-9938-9baaf2698edf": "How can the harmful outcomes of automated systems be prevented?", "82042ccd-7ba3-45de-be88-dd7926ed2fa7": "What are the challenges posed to democracy by the use of technology and data?", "3f49ac16-f537-46ae-ac21-b82e769969c2": "How can we ensure that automated systems are safe and effective?", "ad65e418-3028-4caf-8dda-28cfc82b6e65": "What is generative AI and how does it create synthetic content?", "8a14b547-517f-4f3e-b937-5205af638a25": "How are foundation models related to generative AI?", "e996fac3-d63c-4fec-bdd3-645e2052ce20": "What are dual-use foundation models as defined by EO 14110?", "2fff0be9-8a59-42da-9d8f-9bf01c03c3a5": "What is the significance of self-supervision in training AI models?", "ff4c692f-1cfe-467e-9925-e44da6bac8a6": "How many parameters do dual-use foundation models typically contain?", "77dc336f-61a8-4597-8e60-7d6a28b2750a": "What is the role of the National Institute of Standards and Technology (NIST) in developing resources for generative AI?", "992eb21e-18d3-431b-82ee-6c8845b57e84": "What is the AI RMF mentioned in the context of EO 14110?", "754a638c-4963-45fc-b877-baf962d095cd": "How does EO 14110 impact the development of generative AI models?", "7837880f-1aaa-4a74-a820-8b848142069f": "What types of digital content can generative AI produce?", "83e7fc2a-5676-4a2d-9b53-fe9dc216d52d": "What is the purpose of NIST AI 100\u20131 in relation to generative AI?", "da4c933d-5790-469e-8c9c-08a80e387962": "What are the best practices for obtaining informed consent in feedback activities?", "b85c7dc7-598b-4f88-a94c-845f6bfd5cec": "How can organizations ensure they are following human subjects research requirements?", "e973c5e2-042d-4219-9ab3-9357946c544f": "What are effective methods for compensating subjects in research studies?", "3750af0a-f7d0-4923-8075-a53beefd9441": "How can feedback activities improve system documentation?", "5d13e05d-ba03-4e6f-8b60-c6aba8ce6ab7": "What role does informed consent play in enhancing debugging practices?", "de4376c0-1172-4302-94d6-b2308ff91785": "What are the key components of human subjects research requirements?", "35b33780-ab39-406c-a73a-5af626b322df": "How can organizations balance subject compensation with ethical considerations?", "2da11dfc-4bf8-44c6-b11b-bb784841bfa5": "What strategies can be used to integrate feedback into decision-making processes?", "9c4e8887-02a6-46a2-855f-dd49cdc870b1": "How does subject compensation impact the quality of feedback received?", "c3b4dffe-1622-4cde-b35d-0384b5fe8b90": "What are the ethical considerations when implementing feedback activities in organizations?", "ee6a09cb-bc0e-4337-94c7-9f98ea4e5ee3": "What is the role of the Software & Information Industry Association in the tech industry?", "b23e7b4f-f930-4e25-9deb-614c17f86250": "How is Stephanie Dinkins contributing to the Future Histories Studio at Stony Brook University?", "40981bd2-f90d-4edb-a063-87981f5fad81": "What initiatives does TechNet support in the technology sector?", "dc149b68-bca5-4e5e-912d-d7be8e71e054": "How does the Alliance for Media Arts and Culture collaborate with MIT Open Documentary Lab and Co-Creation Studio?", "d5696d57-18c7-4cce-961f-32fcfcc85df9": "What are the main objectives of the International Brotherhood of Teamsters in relation to technology?", "61c0fd7b-0e62-4a0b-9328-ce82b486e261": "How does the Leadership Conference on Civil and Human Rights engage with technology policy?", "6c72d4a0-152b-4b01-afbc-bbec5f6e4b2f": "What is Thorn's mission in the context of technology and human rights?", "8478afea-bd7b-47d0-8e3b-4ade048996f5": "What is the US Chamber of Commerce\u2019s Technology Engagement Center's stance on emerging technologies?", "f855fd92-7288-4188-bcc8-9e9d6658cb4c": "How does the University of Pittsburgh Undergraduate Student Collaborative contribute to tech research?", "9363da58-a7e3-417b-8b3f-f36f8cc73b2e": "What is the focus of the XR Safety Initiative in the realm of extended reality technologies?", "c7a8a9e5-f64e-4726-a954-752b9bfa62d8": "What are the key components of establishing policies, procedures, and processes for oversight functions in the GAI lifecycle?", "d4f81aa1-536d-4926-ab69-7bf7a5842d58": "How can organizations ensure effective oversight from problem formulation to system decommission in AI projects?", "6ec865eb-0ef1-4d0f-a574-83c9bcb8e93a": "What roles do senior leadership, legal, and compliance teams play in the oversight of AI systems?", "10621bd5-3020-4fd2-8090-ee0a5a365370": "What are the main tasks involved in AI deployment, design, development, operation, and monitoring?", "8a985b69-031f-42d8-b2fe-c3c003206f80": "How should organizational teams document the risks and potential impacts of AI technology?", "d55c5fb5-723c-4f9c-b466-fbe12ec885fb": "What are the best practices for communicating the impacts of AI technology more broadly within an organization?", "87cdba18-ff3f-490d-97f4-3679a7ab585f": "Why is it important to establish terms of use and terms of service for GAI systems?", "d1a0a484-a7da-40ad-b494-d8fba48d4cc8": "How can organizations address risks related to intellectual property, dangerous content, and abusive content in AI systems?", "d95e85a2-6162-47ed-bc37-6e7bf3a90580": "What is the significance of including relevant AI actors in the GAI system risk identification process?", "c8a9e9a9-ec2d-4b97-a906-cc52fec59a02": "How can organizations verify the downstream impacts of GAI systems, especially when using third-party components?", "050deb8f-2d38-4b19-abce-8d42367a3897": "Why is data privacy considered a foundational principle in modern business models?", "e4ead17c-b324-49df-9768-46f3947244e3": "How do companies use data collection to build individual profiles?", "2e67721a-7a35-4081-a350-c289a7b797bb": "What are some examples of how data privacy can protect against surveillance?", "ffcf5bdb-0269-4d50-a0fc-e0f5cd5d6cf0": "How do government agencies utilize data collection for law enforcement purposes?", "805ffbfc-6fde-4a13-af7b-92d425ac5ff2": "What are the potential risks of companies tracking the behavior of the public?", "b7173668-8845-4ff4-a8bf-82d58d4670cf": "How does data privacy impact the use of automated systems?", "f786de70-6184-40ef-bcd4-dbc7e418ec99": "Why is data sharing and reuse a concern for data privacy?", "dd27c837-114d-4771-b470-8edadae0e05e": "What technologies are used by law enforcement to enhance surveillance capabilities?", "f986530c-70e1-4674-a4a3-d2c2fa9f3c48": "How can data privacy principles help protect individual freedoms?", "d6f7085a-3f9d-41d4-a61c-de22f689406b": "What are the implications of expanded surveillance capabilities on data privacy?", "69e6c713-aa1a-4a39-b3b8-5a9c3d192560": "What are the potential risks of using the same algorithm in employment and lending decisions?", "b9ded17f-ece6-4a9a-83db-3d1c1ce214b6": "How can correlated failures in AI systems affect the labor market?", "118c93d6-f413-43f8-9eb0-5322555c7701": "What are some examples of unexpected shocks that could impact AI decision-making systems?", "378ad341-25e9-4a99-82e3-2d3b20c79183": "How might the use of AI in employment decisions influence job security?", "3e2c5916-b757-4486-950c-91ae7c275a13": "What are the projected impacts of AI on the workforce according to recent studies?", "604a356d-af0d-4af6-ae0a-2362a5215a8e": "Why are fewer studies examining the impact of General Artificial Intelligence (GAI) on the labor market?", "70c87bb5-4afa-45a5-a192-51c40b99026a": "How are employees and employers reacting to the potential disruption caused by GAI?", "6246f0e9-a17a-44cc-99f0-30db573afe58": "What measures can be taken to mitigate the risks of correlated failures in AI systems?", "e8751b61-9fee-43e4-99ce-3c16b3f81807": "How do industry surveys reflect the concerns of employees regarding AI in the workplace?", "5e9a5f8c-765c-428a-a81c-62574fa7770e": "What are the implications of multiple actors relying on the same algorithm in lending decisions?", "8fd0cff6-85a3-4dba-abda-91cdb762349a": "What are the environmental impacts of high compute resource utilization in training AI models?", "5cd61fe8-2405-49cc-93bf-9ad9a656887b": "How does the use of non-representative training data lead to performance disparities between sub-groups or languages in AI models?", "05c34830-40b3-4fe5-9695-44a5b4ddb868": "In what ways can AI models amplify historical, societal, and systemic biases?", "a5419133-45d4-4c99-9e69-273c2f4058cb": "What are the potential adverse effects on ecosystems due to the high compute resource utilization in AI operations?", "ef754679-4152-42b3-a445-a7b434eebddd": "How can undesired homogeneity in AI model outputs lead to ill-founded decision-making?", "832fc999-15fc-4b42-9654-2177da3e8011": "What is algorithmic aversion, and how does it affect human-AI interactions?", "15f4dc2c-db75-4f81-a8cf-149786ddd579": "How can over-reliance on AI systems impact human decision-making?", "e4d93cdb-eeac-42f2-9ae7-a646b63d3e7c": "What are the risks of anthropomorphizing AI systems in human-AI configurations?", "ec919833-ed12-42e2-9839-457e42e0568f": "How does automation bias manifest in interactions between humans and AI systems?", "3dd2b1f9-4fae-4eb2-8ead-27f913346a48": "What measures can be taken to mitigate the amplification of harmful biases in AI models?", "8eb81f04-21af-4b5e-b8bc-5dd9ddf12a19": "What are the most effective metrics for measuring the effectiveness of data provenance in security?", "7c17281e-f1f1-4b05-a681-4f085c6d7b0d": "How can user feedback be analyzed to improve understanding of content authenticity?", "57455a6a-fdc6-40a8-810e-ce67e69a459c": "What are the common concerns users have regarding content provenance and authenticity?", "cf632d03-f1b6-4eac-82f3-c0eeb5c6a7b7": "How do watermarking and cryptographic signatures contribute to content authentication?", "cbedd326-5ed2-4e1e-8b52-9599a3a3ef5a": "What methods can be used to measure the reliability of content authentication techniques?", "e199ba84-6648-43a1-8075-da2bccf8135f": "How can the rate of false positives and false negatives in content provenance be evaluated?", "42cbc313-aee1-442e-9707-f8a4cda1b659": "What is the importance of model integrity verification in content provenance?", "f174193d-52c9-4008-9de8-a830c2ab8659": "How does the number of unauthorized access attempts reflect the effectiveness of security measures?", "447c315e-07a6-4f78-bbc8-b783bcbf758a": "What role do digital fingerprints play in ensuring information integrity?", "3b87a3bd-9810-4130-9a77-016a6194b4af": "How can access controls and conformity assessments support content provenance techniques?", "6d54ebc0-4ddd-48e8-ac25-0f3b41399d6e": "How can I find out if an automated system is being used to make decisions that impact me?", "6126ed1b-d1c5-4b25-8dc0-506e2f075845": "What kind of documentation should designers and developers provide for automated systems?", "4e4463eb-454a-4f19-83a0-710cb38be7ed": "Who is responsible for maintaining and updating the notice about the use of automated systems?", "0875bf4d-94e5-4bc1-b7bd-a0618b496482": "How can I understand the role of automation in the outcomes that affect me?", "c4015147-e2ab-4686-b432-838d5504d8f4": "What should I do if I notice significant changes in the functionality of an automated system that impacts me?", "8f24b9bc-1fb7-45c6-9731-cce8c51d4bd2": "How can I get a clear explanation of how an automated system determined an outcome that affects me?", "ce820b6d-3bab-4138-b21e-3c94a05b131b": "What does it mean for an explanation from an automated system to be technically valid and meaningful?", "415c6ec6-f835-405b-bbcc-1e0ed22ea3e8": "How often should the notice about the use of automated systems be updated?", "7f81c5f0-eec3-40da-88f8-3a10b20297e9": "What should be included in the plain language documentation for automated systems?", "9fa50cab-5394-4aec-916b-5e47d663a1fb": "How can I know if an automated system is not the sole input determining an outcome that impacts me?", "ca32a8ce-0352-48e1-b059-40b626805076": "What are the key expectations for automated systems to ensure they are free from algorithmic discrimination?", "c303bf4a-19e7-45a2-a17a-3a2cbb7c518b": "How can automated systems be tested to ensure they do not exhibit algorithmic discrimination?", "3248fe7a-931f-43b9-a807-be31c9cba85c": "What does it mean to design automated systems to ensure equity, broadly construed?", "2541eb4b-0325-4e71-a932-aa9c2240ff5f": "How do existing anti-discrimination laws apply to algorithmic discrimination in automated systems?", "f12e1350-6a69-48ac-9443-7bdf2fe8a551": "What proactive technical steps can be taken to reinforce legal protections against algorithmic discrimination?", "d6014c0b-7667-4f4c-8c17-3d64e472ae45": "What policy steps can be taken to ensure equity for underserved communities in the context of automated systems?", "caf9948f-d28b-4304-b2a9-7b8099401937": "How can protections against algorithmic discrimination be integrated throughout the design process of automated systems?", "aa086a0f-d233-467a-a4d1-307fd662e78e": "What are some examples of sectors or contexts where additional technical standards for automated systems might be needed?", "1ed1b3ca-848b-43f1-9413-11eb5e39d5cf": "How can the development of automated systems extend beyond legal protections to ensure equity?", "30377abc-d189-4360-8bda-75aa620ba449": "What are the potential challenges in ensuring automated systems are free from algorithmic discrimination?", "5c610b06-5ba5-458d-96b7-23660211a937": "What are the main reasons people might prefer not to use an automated system?", "f2bd0bb9-78d0-463e-9e19-206aca0bdab8": "How can automated systems lead to unintended outcomes?", "9a107472-63da-4150-b45b-9ee5a0f906d1": "In what ways can automated systems reinforce bias?", "38c99ae1-7267-4154-86cf-10c475dfab87": "Why might automated systems be considered inaccessible to some people?", "1fdcc7e8-a5a5-4068-84b8-8860d4fae85a": "What are the potential inconveniences of using automated systems?", "fabe7ff6-7665-4ee8-9e67-18e690b5c99a": "How can the replacement of paper or manual processes by automated systems affect people?", "4f7e6756-8436-4690-bb5d-8dd5e1dc19d3": "What challenges do people face when trying to reach a human decision-maker after using an automated system?", "5c5aab7b-a9cb-4e35-9853-670e020c2aff": "How does the lack of human reconsideration impact access to rights and opportunities?", "110bdd1f-0551-477c-8ad2-fe3771e46d11": "What are some examples of delayed access caused by reliance on automated systems?", "d584dcfb-bb0c-406b-813d-7ed4f0a60630": "How can the principle of human alternatives and reconsideration protect against the flaws of automated systems?", "6f2db439-4624-4bad-9cfd-f22bb387bf87": "What are the processes for identifying emergent GAI system risks?", "71f13148-d4ab-498f-ad28-d437b31d648c": "How can external AI actors be consulted to identify GAI system risks?", "915bdb26-45ca-41a4-a5c4-026de9664a4d": "What is the role of human-AI configuration in managing GAI risks?", "3f1a9bb4-4f35-4ca2-8c26-6d0fe7f70c0c": "How does confabulation impact GAI systems?", "562ffbd0-42ee-4f9d-a966-3fb61c224bdf": "What tasks are AI actors responsible for in the context of GAI risks?", "6ca39f09-5de1-4418-b8c2-8326d872cf08": "How are feedback processes for end users integrated into AI system evaluation metrics?", "47ffecb1-e88b-42b1-aff7-8feb6a9ed56a": "What are the potential impacts of AI-generated content on different social, economic, and cultural groups?", "7d104609-63ee-48d6-a2e0-efeef372ac3e": "How can harmful bias and homogenization be mitigated in AI systems?", "5d2f156d-e171-4aef-b290-48f2732097ef": "What methods can be used to study end users' perceptions of GAI content?", "4fed3fad-982b-4a59-8f81-d2bad9cb3670": "How can the integrity of information in GAI content be assessed?", "a063b509-7e7c-4889-9a12-8706969e2004": "What are the best practices for AI testing and incident identification in an organization?", "1c222789-364a-4695-8083-7136f31694e0": "How can organizations measure the effectiveness of content provenance methodologies like cryptography and watermarking?", "350eaf07-f263-4761-aa56-cb44ce3ea3d1": "What criteria should be included in an AI system incident report?", "aa8cb59a-2ee8-4000-a39f-520d7f5ab6b3": "How can AI deployment and design be integrated into the value chain and component integration process?", "55a7d693-1922-4dd6-8840-e8017fd5d458": "What organizational practices are essential for effective AI operation and monitoring?", "088e1b4b-78b3-4dca-a354-7414b72febcc": "How can information integrity be maintained through content provenance methodologies?", "4979e972-3cfb-4a75-b2e2-92fc4640b928": "What are the key components of an effective AI incident reporting system?", "826ee64d-fab2-40f1-a6c0-911a7a68785a": "How can organizations ensure information security in AI systems?", "4216a185-7dcb-478c-b3cb-1cf32485b29f": "What tasks are involved in AI development and how do they impact the overall AI governance?", "1f50cb43-e63f-46c6-8a05-c03b8d9f604c": "How can organizations establish effective policies for AI system incident reporting?"}, "relevant_contexts": {"1d8a0cfc-df53-467f-88e8-cd378990da4b": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "9ec9d981-9115-4672-bd74-23035ecc2e7f": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "0605df28-3443-4e71-b065-c5e957b1a3be": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "bf3d5106-092b-4dcd-84c8-a6f876550060": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "84fa4ea9-be18-4dd5-9e29-a6a5c02feb54": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "bb5dcf48-cb19-47b4-b313-265f9f7cb3c8": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "bc5f183f-8819-4d54-9853-b8f799e02906": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "8d9f5ceb-fce8-4d14-9811-6140c0e69900": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "2f174ff7-4261-4c60-a49e-51bc536fd900": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "1cef392e-71ff-400b-a4bb-39f4c8d78dab": ["70527d5b-6c55-4982-ae73-eb6cbd605bc1"], "b2fdeb09-6112-4cbd-a860-6b770d65c9ef": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "50a10f19-e2b0-4f53-bf39-c09e6b2adedb": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "41d1c796-9d40-450d-bb90-58541f4dc294": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "8ea4ce00-241b-4a28-9f10-127da59b19a3": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "ee13b775-a27a-4b83-b019-8da1bfe33cd4": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "0835eb5b-ff9b-4c95-98a1-c1aa96b695d1": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "a25fa3f1-5fad-4e5f-8635-3c5d7564bc9d": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "cad3f6f1-e1cf-428f-b6c0-986a72e27923": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "7795698f-b343-44b2-90b7-333d3e874947": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "0563fa4d-b516-4410-ba89-1f0315067735": ["e1c576fd-2981-4519-abe0-a6954ac7bfc7"], "32e11143-ad57-48c7-b0ce-e4922c8b1cc7": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "94e3e1d2-3500-48e6-97a6-8c83517b41e0": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "abb3f98f-480d-4f2d-bf70-0bb22379dbae": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "2a61da93-9e17-4be7-862f-ff9f941284a1": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "6b6dc282-ed05-4387-ae3c-28fea9f6aa32": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "a435043d-540e-4134-9d59-e921db6866dd": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "c64396a8-0017-4d46-aea9-58f81191f6b9": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "90341a67-3948-47fe-acaa-32e9896acdd5": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "a93e9f47-5572-4f65-810c-e65f0c0cbc35": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "6b14b109-2e7e-4d40-b149-1eb05bc882a0": ["b0c72830-d6f3-465d-9db2-3a0fe4eca18d"], "cde52e0d-45eb-41b7-89ac-0e8e28ac0457": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "0d6f47cc-2f11-4d1e-9d54-90b10f6bf2b6": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "ed8c6ae4-9a32-44a5-b820-0681f03fd325": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "cf07e760-9b9f-4be0-aaf2-9ce158ee1c98": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "7a9b270d-4022-4dee-8044-c221948c7d25": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "93f3824c-e2a6-424d-a45b-f7a6e593c5d7": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "c0441d12-0139-47e0-a780-13868a3b0469": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "7a65af06-b620-44fe-90b4-53bdcb7c8559": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "3cc7ff9c-22a7-4d2c-a02f-353ba7ef9fe1": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "daf97136-c510-4453-a3f3-dfdd41c8a7eb": ["c8f55f69-05d8-4ee1-a313-d97e683e995c"], "4f8ef4f7-9616-44aa-abfe-8cdaf1756b90": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "6f46f00e-c286-4eb0-815e-32faf4967e59": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "bf6d72d3-a20e-4589-8cbd-9e4fcf8ff4f9": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "1c142ec8-b4a3-456e-ba1c-a485944b6ca4": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "b39288aa-0a9d-4d94-82e2-35409659c7d1": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "21eaa61b-6208-40e7-b9a3-5a37038f9d48": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "1233bc76-0ce0-4740-bad7-5de774ca0fd7": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "988d7c42-6b16-42d7-8c3b-eca27423ec98": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "d0ee89a9-21ce-4521-a946-de628b905143": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "23394fb9-945c-4c8e-a361-a1cfd375abe0": ["0c405046-7c29-4c97-9cd3-b0fa85f4e26e"], "37aabec8-0539-4438-8489-8a52afbff96e": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "a8ce087b-ada7-4cca-8488-e90b57a28b9f": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "607fa183-8c10-4e68-860f-41798c764cc8": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "f9c3b55d-7f98-4fca-a8b3-2276e0cf58d9": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "f8fcd7dd-4077-4dd8-8a19-cea03ba0bda5": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "21f56f77-f14f-4eb0-ae84-a15ddf44c478": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "c1c10c23-aba8-48a1-a47f-0ea7b9ee4058": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "bb1d1592-7123-4163-a963-913da8006be9": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "07ea05d4-6b90-4936-9dae-b67d0e70640d": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "2f84ffbd-1a21-475b-a55e-ed7b70ad40b5": ["f477adb6-0df5-47c6-abe8-a23fe8b6feec"], "9e4c34f4-3d3e-45f8-b44c-86725048d92b": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "64f82a67-4768-4e6b-a43d-f04086818e85": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "7f45b7a7-9fcf-4391-89e9-8cc77c095f2a": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "78c93a0c-adfa-4c7e-a126-d47d3e13352b": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "f9d25a5e-23c8-45ee-aef2-d7db0c12b767": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "86604d73-f71a-46b4-b444-0f78f434d3a8": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "511742cc-dc64-43a3-a033-bd545ce33e57": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "04a0b08c-92b8-44bb-886c-252f86746d2c": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "3e71278b-e893-4a19-955e-1505554c8686": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "04516eef-32ce-4fcc-8b77-263ca9de6e10": ["77cdea0d-2124-4f93-8aa1-8bca37d473cf"], "a0c0459c-054c-42ad-bffd-3e69e7a03bbc": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "b159acfb-35e3-4cbb-8158-d4d89ccc5870": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "391ca451-b0e7-43cc-afcc-32b7d1fa8da7": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "b15c0ca5-79fe-4cb6-971d-891ab63966bb": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "f8ed278b-212e-45c9-ae9f-5fe98bbd00fb": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "c034102c-8cc6-4f9f-aa97-54c4123b05b8": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "19a80a1d-4c42-4b4a-a9f1-8c58f96c948b": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "84b94c19-44d8-4b0e-a9ea-21d1b4e745a9": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "08b5ee06-991e-4f06-9629-71d3500295ab": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "4328f04f-39f0-472e-a479-26bcd1915133": ["ac99c8aa-92a2-4280-93c2-b20c7b858026"], "03a9fe91-d78e-473a-a2f8-3106648d7314": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "c5f06a48-24b4-4bf0-85f8-836e9d96099d": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "2335610a-fe1a-4ccd-b194-0d8b2734a4d1": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "fe0c5f11-3d4c-44d7-8f27-6a2da2f8536e": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "5342b516-b61d-4ea4-a2da-9a05638d3918": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "f43fcf7f-345c-4942-b43d-e75917a9d53a": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "4a15e20a-7144-40e1-93e3-114864d97085": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "32cfcd73-0b8c-4469-91c1-5f5209f15d84": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "85538c5c-826d-45bc-8de8-297408b1897a": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "08be8570-a2e0-4e56-af80-157c2427baf9": ["4b7be742-d2a1-4cc5-9bae-6c4cdda913c5"], "24c87258-bf5b-4144-b1cf-149daa5a5c29": ["2bbd9e32-096d-4731-9cca-baad231face1"], "1f1e3f39-8772-4eb1-b7f0-12ff89060503": ["2bbd9e32-096d-4731-9cca-baad231face1"], "69e2d6b3-12b3-47d1-a249-65657ba34d64": ["2bbd9e32-096d-4731-9cca-baad231face1"], "f139859c-a3ce-4700-a971-5548dc026253": ["2bbd9e32-096d-4731-9cca-baad231face1"], "04bebb3a-8ecf-4543-8736-671e1a4585be": ["2bbd9e32-096d-4731-9cca-baad231face1"], "df7b1cdc-bb0a-43b5-a10c-7189eb63373e": ["2bbd9e32-096d-4731-9cca-baad231face1"], "7cb4296e-4abe-4bd0-a8ff-edaf4a243ffd": ["2bbd9e32-096d-4731-9cca-baad231face1"], "12b8555f-380e-4b4a-adc7-418d59a61297": ["2bbd9e32-096d-4731-9cca-baad231face1"], "3aef7413-5f60-4e9b-a6ac-191b6972651c": ["2bbd9e32-096d-4731-9cca-baad231face1"], "65303757-3ce8-4416-96eb-99d843d040d1": ["2bbd9e32-096d-4731-9cca-baad231face1"], "a7a1d3dc-f420-48c3-a3cb-1e0a676e9e7b": ["170bd80f-9f9f-4392-a542-6c6048951770"], "24566fed-3d75-47f8-80b2-03f3d0e9e7d3": ["170bd80f-9f9f-4392-a542-6c6048951770"], "1cc3e51d-3cba-4b58-b525-f86babb21bfb": ["170bd80f-9f9f-4392-a542-6c6048951770"], "e0f9ad8a-c33a-4717-88ac-d8559f6c974a": ["170bd80f-9f9f-4392-a542-6c6048951770"], "d4342127-5df0-405f-bbe8-dc24711944be": ["170bd80f-9f9f-4392-a542-6c6048951770"], "65bf43cf-81c8-4872-ae3b-81bf21751ba3": ["170bd80f-9f9f-4392-a542-6c6048951770"], "2f3abec1-74b5-405f-b734-843ecc211a3a": ["170bd80f-9f9f-4392-a542-6c6048951770"], "a4768a2a-62c0-471c-ab81-e9e0d81d83c0": ["170bd80f-9f9f-4392-a542-6c6048951770"], "d68eeb41-7bcf-4744-80e9-7614c92438a8": ["170bd80f-9f9f-4392-a542-6c6048951770"], "5d143734-c64d-4ee5-9600-544de3dc380b": ["170bd80f-9f9f-4392-a542-6c6048951770"], "2aac2f6a-920b-4393-b61e-98aa36d35f27": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "6de62d16-19cb-45d2-949b-92609186983f": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "8d9912fd-4615-4118-8134-a893b80920c1": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "8e48c274-2976-4927-b9ab-6bef0546c6e1": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "7c6ecd5b-5475-43a4-8259-af3a5f014d39": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "cfab0915-6e9f-412a-a432-bc0a06148923": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "eaa0dca7-f7a9-4bac-a8f3-dc0e410b9a94": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "67312adb-b7a3-4c16-84b9-4e4697e002e8": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "d1c669d5-4d91-4300-8e70-5a49b9b7b517": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "a44278d2-1d80-4211-93b9-785f091ee871": ["b3c303db-afa6-45c9-b4c4-9081fde8c735"], "670b6be9-bff2-4e3f-9ddb-48503a59925b": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "966409cb-250a-4100-9cb5-4cc606c16452": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "42ce5b4d-fe04-4d95-87e8-bd0a8f2a1cf3": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "5a9cd608-2ac4-4bfa-8cfa-ca357e9841ac": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "a18b2932-8fa5-4635-9302-7ee0cbaf194a": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "379549a5-d7e5-47a9-bafb-a7408746aca0": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "03540efe-c7b8-4bd6-8835-c6f7bb4e30b5": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "09f74024-8869-4f32-b626-2138e3564a74": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "d8dc0a3b-18dc-42ae-93f3-7614e9b55cfa": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "ecdf8fe5-f149-44ac-b465-3fbacb40865e": ["4fe6d91a-9662-4530-83d4-30121fbaa3e1"], "f5606d22-27dc-4b36-9ace-72d6cc48660e": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "ae491447-4cf4-4991-b43e-08454411a5a6": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "7ee78a3e-1ea7-4da5-875a-f65aa6f20054": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "2078e8f7-9bea-4153-bc28-2944699c7a6e": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "7b087c9c-e491-4d32-b692-81313a1e6afe": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "208c1297-47eb-4aca-9561-f53fdb900d7b": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "44b1a320-c340-4a37-bcf5-c93a6963e2e0": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "c223bce2-8a81-489c-9230-31400072a3fc": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "8b96696a-d368-4159-b644-3e82a3269da4": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "e0ca9dfa-fbe1-41a0-b064-9e4655a76477": ["65f61b92-532d-4937-a252-a07f66dc8b6d"], "ee3ed9e9-e13a-43ae-8468-ceb8ffcf49b6": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "56c869aa-9f69-476f-bb2c-ff1cb0080445": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "ecc2c5eb-78a6-4d19-b33b-19b08c0fdbfe": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "db262b36-e3f4-48a0-bddc-dc36a289a41e": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "6d61f78a-7ebf-45f4-a5e6-4881a583f4c8": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "d1b8703a-3114-4088-9309-0c02821f5a37": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "80cdf547-f019-49d5-b954-fc44fb501eef": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "074222ac-fda2-47de-b57c-5ddc1063b7ba": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "55189c9a-9917-4b08-9b39-7f0a6c48256e": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "50e8150e-4538-45b0-a17d-be191d1fa248": ["4bcaf9fc-e04a-4901-a926-656d64bc2300"], "eb5cb844-864a-468b-8c58-527d3531343d": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "62d2b5a7-b986-4048-bb97-766d7b98ce5e": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "f72b3652-6c80-4c83-8aeb-9eee8ef96c3e": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "afb866f7-aa0b-4116-9378-f258a4bf12ad": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "a1ddee9f-3320-4745-8c67-624c95b262a7": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "099e3b3a-218d-4c72-8460-0c899484ecd1": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "b923df73-4a58-4d22-8b55-1a01145c5ac7": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "1464f6bf-4914-46ea-a181-13cbb9e7058b": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "4e333af2-62cb-4a18-9737-5e42d27891af": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "44cf4f5a-ee5c-4173-8c24-e6a9084636d9": ["be74e45d-d511-47b2-96c9-b56ff1e3ef92"], "732e3e58-5725-48ab-a40c-961aea398141": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "c0e18063-83e5-462a-b99d-ca04e73f72f1": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "ab81895e-d428-4fd5-bed3-030440a61e22": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "33ec5775-e26d-4365-a8e5-69cfded28cf2": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "595f4527-0e21-40a7-b9ec-6a757d80f511": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "1bc870b7-1589-44a0-ae78-e41f637ce93b": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "25ab951c-e59e-43dc-9315-79f04297d81b": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "c8004ebf-4187-4dab-b1fe-1c725fa9d4f4": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "99062576-8040-45bb-ba14-33e7d06843c4": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "f631fe4b-8511-4605-b562-6b32a5fc4ce3": ["56e2a4fd-2a2a-444b-83a2-afa40b190add"], "e909abea-82ea-425e-9eb4-e7fc5cfd1afa": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "8f2b141e-7e25-48c4-8d6e-c7649407098e": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "2504edde-ca0f-4ab0-b316-072c94ed4fd7": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "ee944244-c6e4-4f82-81c9-f965ee4f8f7c": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "fddee110-d5c1-4e71-a392-a18885c4d603": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "0a8ddcee-d0d1-41e1-a6c9-246be5b5ae5f": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "c51a2c1a-9f87-488a-9d42-41968bf4cab1": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "95c6ba82-ae2a-411b-8572-31a3d48cc6a7": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "0c97280b-063a-46a8-bbd5-f47e888354e5": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "fa8d6dc4-7eba-45d5-a66a-eb6b8ab2ecad": ["cb3de03f-35e4-4032-8357-256a59e540bf"], "21ceca0c-ce44-43e3-8f77-a9dc1f5fbcc7": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "d3a9c980-e575-44e6-bab5-07aaca0e2886": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "7f116d0d-2c08-40d1-817d-6f4c1826f047": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "380fee90-ebb8-4757-8fa4-7bb4a3b50dfd": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "f0b6fb64-5d64-4512-97bb-0a674ad3b42f": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "e2b27dbd-6b32-4fba-8260-c9b54e2ac8de": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "dbe0bf87-a716-4127-9f80-f9e325332d95": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "b8b6fbea-dea8-487e-bf33-6235d3a607e2": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "a0a96c91-6c5a-4fd0-8127-b7c974303838": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "dbd41bb0-0c09-4495-b286-dc9622de9e86": ["3c605102-fdfc-4821-b1f4-70662c729e24"], "0e1301c3-0b83-4603-9d39-8c374b37340d": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "c00fe054-52c2-4561-80e1-4b27a535ac03": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "fd45462e-12d0-40d0-a428-90041a7dff99": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "a531e2af-3989-43fb-8d5a-5808c6ec17bc": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "69db45ba-aef9-4c24-b15a-824ead34c0cf": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "c8828f01-879b-4d90-bc2c-f97dde5bf765": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "ce0d270a-d64e-4526-97cf-96ef519cf24a": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "b8747453-8022-467b-b1b5-5145d44ff203": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "ddd8c6c0-1296-4762-b557-374a51ff3add": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "14fc80ec-eef8-48bb-9d01-33069ee9b8fe": ["79c6fb87-7d88-4ad2-81ff-29bd976fa9d3"], "df8094ac-8e28-4a78-838d-606526447758": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "4b8eb074-1b7f-448a-ac6d-be4d1ddaed05": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "3710b3fa-3a13-4381-b514-9fc7f8dea69b": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "8aa6ab37-2653-45b2-a725-a7735747f754": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "4c0876f0-776a-4fac-a0e2-a3148db5b6f9": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "288d8ca3-7d48-4d33-8b24-d9e7bc4d94a8": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "fe80034d-0f1d-4bd0-8551-8f0d79001f23": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "23dc8c4c-f982-4955-b9bd-af9b54a6b57e": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "90900ded-0b53-44a3-9a22-13f8a5988204": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "0ca2f759-ebbe-4a9b-817b-1c16c7d1e376": ["6f9495a3-d6c2-4cc8-9349-19f6faddcec6"], "40801caf-7abe-479e-9818-e654ea44c389": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "0943156a-41b4-4d6c-8add-0bb5010e1936": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "10a26c62-5c72-487a-b041-044cc2f6e7e7": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "66155b2d-caf6-421b-86c6-ca18b2579570": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "120752de-fe52-4f54-9e4a-49a9641d77ff": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "28130de3-a5d1-4a75-aef6-60681684793e": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "a74132c1-3fa1-462f-88d0-d418f51aaebf": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "66be8191-b960-4fed-b0ba-c92018e56838": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "edbb2f46-b7fe-4483-98f0-ebc5eef57da8": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "cb87b719-4b5f-41e2-af94-c5ee3c301e18": ["e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409"], "02e280c0-6258-47f4-bb62-1324b012cd32": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "57a1c358-afc7-4547-8390-0545a91d0b6f": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "9ec63ba3-4a78-4919-9641-06406a26d3df": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "30f03404-8efd-4a10-8b15-4563ba490e24": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "5911ed64-f3fb-4bbc-a354-710e9eb4a6c8": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "0a2e4e30-4e96-4313-8701-66fb74fbaa6e": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "754bdb70-fd0d-42db-b514-ed3e792c9c66": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "2b123ed9-5e26-4f5b-8bac-71dde9f51ca1": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "a72510fb-3627-482b-bc59-e8a9d2d9c7fe": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "77f146ae-3822-4a25-b535-3110c58c0759": ["2a987bbc-787b-4a9c-b59c-595f5b9c2fc8"], "b2d56c7c-fa94-47b2-9b32-0c4729fb2f12": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "31cf8128-b127-4673-88f6-875bd9d671be": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "4c7d4d1b-256a-408a-8d84-d8a691961a20": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "b5d4dcd0-59de-49a8-97e5-1bc4c4de0c99": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "43f7aac1-4168-4700-b4c2-ba575419b046": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "b5e47640-3a12-4f25-80db-01036d1872b5": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "032a7d8e-cbd4-4005-96ab-461828a13663": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "7ab5047e-51ca-40c0-8cc4-48fd68fcb7df": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "0338aa3a-47c1-4a51-acce-90bfd9a2dcf2": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "5001a4f7-f1ac-4bf1-9152-b893c1f45412": ["1bf50b19-5134-4a8f-be9a-e74affa8cdd0"], "06e02bbe-ec13-46b6-a82a-c3eedbe9df94": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "c5af22e4-d127-4a04-a563-47eaad15d6e0": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "d073d233-f4cd-4e5a-a3af-b8fa829b6a27": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "f810f097-05b7-43e9-b7e2-fa7d21b5c53b": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "b56876d6-1fb9-40ab-86b9-a78a889bbbec": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "73e2f0c1-f0cf-49da-906d-fc8510921e3f": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "d8c94105-67dc-479c-a469-65e8104efca1": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "1a7e6b55-7f76-4439-9567-12cc8d835c96": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "c0aa67d6-de29-4bf6-8b18-04bbaf49240a": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "88df5351-74b0-465c-9de9-b5807548c0dd": ["3203966f-15b4-425b-b621-fd964e3bd46f"], "168b2e48-4e05-44a6-b944-aa2feb95f9e4": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "b72701b8-aef7-496e-8747-79854e65fcda": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "68af651a-6b8b-41bb-8f3e-14604272284d": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "7faa544a-97f1-4d9c-804c-36a978652159": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "3d27b603-05bb-4a64-bc55-e9add2c9f314": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "63d1952d-d789-4fd2-b18a-c7ddf8ce8cc1": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "44065a66-2c8a-4d0f-9ebb-8dcfbdefe195": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "fa9aff28-f81d-4034-acb6-19b295cae5fa": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "7e79602f-7d17-47d4-8874-50337e61fdc9": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "eba5ba0f-95a3-4429-9437-7d0be9b5e0b8": ["4bcc930c-273e-4cbd-bba5-a297e9f9e390"], "b3486f90-735d-49df-86bc-739ba6403a31": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "d5195aba-7870-4c55-961d-ed0ffe276376": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "f0365668-2d3e-4a22-aac1-d25a64683ef8": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "ce4365ac-9fdf-43ce-b139-503ec9ad37a4": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "6737307a-5bbc-4ab4-b524-9437eee812f1": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "e209d21e-f9c1-41cd-bbf5-14d112a1db70": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "139ad9ad-384f-497d-af84-dc9b24d2dade": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "a24c2df7-395d-4019-8e4c-feb9d16b19d6": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "93b1cd97-2fe1-4152-ab7c-a0efbf49a6b5": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "9c028b3c-c92c-4ca5-b7c4-e8d1c358b6c6": ["e4abdb97-5b2c-4dc9-974e-3357719369a9"], "0bfaf653-4226-41e5-865c-a5d63578d4e7": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "0fd4687b-e9e7-498f-acd7-9eb020850852": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "ebdadd5c-1215-40f5-8b2b-f242fbd35451": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "fba46c57-4104-40f5-bc91-fb9b706c2ef2": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "dfef4df6-fa61-419f-82ae-6a148536db56": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "0fa04860-d710-40b5-856d-1dbc9d3bc759": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "6275567f-707b-48df-93ce-787e58e407de": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "5c0dd9dd-bd3d-4f2f-b1d3-e8d142665e33": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "be446845-3854-499c-886a-d08121ae6700": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "21abc9b0-3037-4ca1-bebe-3ebbb20cbd20": ["e1c3047a-75b7-40a8-b937-22cfe0b3f2b1"], "735c4032-1519-4fd5-8cdb-0e9a0ce6da5c": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "9daf1259-d81a-4cc8-8d81-e60f2962b064": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "d927d226-242f-4436-8243-c62ee8339ab8": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "920eaf59-a989-498c-8964-7ac0a342d7b2": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "1ddaf3a2-3fbb-4e1f-8bea-3a5bd70e5f8d": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "aa9fb7f7-6205-45a6-8e2c-e7713a8420fc": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "f738f637-ecf5-43fc-b89f-65431bac4cb2": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "9db348ec-94bf-435c-aec7-b1da0e3acad9": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "2af37117-da7d-4d16-afb2-b96ea0513e67": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "2939615e-23fc-4214-966c-6ba4fe1b7335": ["48516a8d-e470-431b-ab6f-2b090a40107b"], "a7c4e78f-9e12-4e77-80a9-1b6d0e378dc7": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "f9d93ab9-108e-49db-901e-3dc647847f97": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "73c8f947-0282-4541-8b91-1746ecf82b5a": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "f28b1c3a-c692-41ea-8144-dd7b5230aab8": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "14f8c882-d08f-4bd0-923e-5d8b8841fd45": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "17eb019d-fa03-4024-90b1-435b97d565de": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "e0f19f85-0585-4ce1-b033-c658c72c6731": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "ff276cd8-c1b4-4ac4-ad1a-fe49517a74c0": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "afaa6994-4806-44cf-958a-057084b24a97": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "b7b673c0-0b37-42fe-9b9a-93758490f9ef": ["7b016e8e-1c9f-40b9-b7f5-246b6e7584d5"], "98816e76-3c36-44a7-9f7b-e1c94ca23586": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "8fe3db0a-9fc1-49f4-b08a-ed387dd67784": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "62865470-d8f3-4ebf-bf2b-050f5fcbce3b": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "fe41122b-c0e0-4a47-9424-b21efd0d8405": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "c5fabcc7-22e9-49c6-b965-c80316d5b4d8": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "8086f26a-3e53-45ca-8e09-adcaeb35b100": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "61e1e6a5-ba18-4763-9a90-5f6b278dcda0": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "45090c7b-a6aa-4517-8b79-30964e872d62": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "3b005aea-9582-4e7e-b2ed-86f7c16dffa6": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "2bb3e489-4c81-4e1b-aa8a-149336e61c0f": ["498bff21-8221-4f12-b4c1-93f47337fba6"], "77aa993b-372c-4534-8252-ca9c4010c1cf": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "6a1dc5f7-53a2-431d-80bd-8ddfbb546084": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "0b5870d7-28c3-4da6-82be-4f2dcc63e1bf": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "673c25a5-a7a8-4bfc-82aa-81b7f1bda04f": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "ea441231-122d-47f5-8f5c-11085f9fd1f4": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "0ed1645c-4661-46cb-8443-35a4c8e8ce60": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "11076db2-fd60-471a-aba9-62b355aed19e": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "3ae32342-738f-4cb9-b5dc-16fcf2130ea2": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "ff50e166-1288-43c1-a067-65b0cf902d06": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "f12d83b4-be05-4565-86df-f93a0c64b324": ["ef7137f6-e670-41e6-8905-2d7c0a133bea"], "37dd7447-0e75-4e14-8e17-865d6555c74a": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "1687fec4-dc70-4040-b728-4b5e44edc435": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "10d62236-489c-42fd-81dd-7885112ad9b2": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "e9cd49c5-55d2-4b58-b05a-d0534264115e": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "e659b628-dfa2-49f4-a768-e8761ae1c74e": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "7ea78477-98bc-4ff3-8d95-b316f391a035": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "74061af4-43ea-4d23-9765-abe2b5fbb9e1": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "dfdb3835-219b-4d3a-9673-d2923df2fb4b": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "18a0946d-d4d3-4c73-b1bc-ef5d2d6cca86": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "3f73048f-6c23-48db-be72-402295190c87": ["d3ad3601-20e3-41bb-bc05-092a46287a0d"], "f9d78f26-8f38-45a5-a05d-126e99e0b295": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "caec4118-4465-454f-8361-aebd8d21a912": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "318fb3a4-b202-4e61-870b-b9a18c7c8f54": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "3681a289-6754-4f14-9c68-bba82f3179af": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "424ac5ef-46e1-4f01-bee3-d71a0e907e5d": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "032fd89f-0b92-4533-a520-39fe68cc1744": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "a788ae08-165b-4797-b530-9054fffc79b9": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "c823ae3a-5c23-421a-a316-3fb0a90fdff6": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "4e06d594-f59f-43ed-9c42-766b3d71377a": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "f0a8cfb7-ca5a-41af-874f-24ca1c10564a": ["d1e994ed-591e-4c95-a8d4-cf84a7926602"], "ceea82a8-3f36-499c-8999-f705089fd5c4": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "4da1b1fe-5a0f-4873-9a51-592e7031b5f0": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "2d5f0e7a-37f4-41f3-a270-083718406673": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "b597f3f4-73ca-41d6-805f-ea8566ee4335": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "51a36031-cbe6-4896-a26f-0d1231ac9ac5": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "28e9d3c8-9aad-4ac2-8b76-8112ba317fc7": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "3cd1b051-c5ec-4ba6-85a3-6c8ecc3d382e": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "93157218-65e8-4b27-bdc0-d3b5c75016f2": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "6a607a7d-6808-444f-a772-aa55eccd8e61": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "522e086d-9b12-48c9-b070-b9f0c6b871e8": ["aa34d6fa-7664-4579-90fa-4f3a7ef8610c"], "ce02496c-71b3-4ab7-98bb-eed1cc3edc11": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "55c73a61-2c93-43db-b06b-142fea09d1a3": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "8dae2406-bb95-4836-9224-224fa830dd81": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "1ddfec1f-63dc-4114-8d2c-6c6b080df23d": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "ca588f53-c672-4ec4-8c2f-4edb75780625": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "f01609ce-a9ca-4166-bc21-4837e9f8dbe6": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "bf05189a-5783-4529-9a04-33ecb2b039f6": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "69cd9e44-ebad-4444-bd6a-0ffe7dd0fea1": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "5592a53b-bc51-4eb2-ad09-d91eab84cbca": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "da87855a-e8d3-464f-b44a-2f1c0f36efd9": ["8e92bbe0-47e0-42c1-acae-c650f210bf9f"], "537d6d33-2d70-4cda-bfd2-a7e7d58dca1c": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "f9c45eea-e5e9-4963-b36b-eac4e1041f10": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "95df7bca-5759-48de-aded-0bdcfd73b47d": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "08b6077f-18cf-437e-93e3-b2272cafdf09": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "32c66693-7c38-4482-b27e-1226a0d82810": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "b0213953-d766-4b10-aa1b-c7170ea2c1ab": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "a7153e52-4939-4c06-bfa6-43f880a88d70": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "c1a6fa3b-e163-482c-8ca2-b05ca142d9f1": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "1b8e4a92-c4c9-4200-9e7f-a441aa82cd9b": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "b3ed9e28-9cca-467c-9c04-1b533b1ff751": ["64a6a839-474e-4542-96a8-cb04179fdd27"], "b814a05e-73e6-4ac4-8098-646df1806acd": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "e2b9260e-db2c-4449-9f56-d5cbf8e617c2": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "9a8125b8-6346-4652-9584-b5e5399fa7bd": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "2c59dede-dec0-4ef4-9a1c-f4e8e47a6727": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "3c7f39ce-861e-4abc-8b91-941233a6ff50": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "0e532265-3e9e-4043-ac99-d073b610bb72": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "3479b71e-e61b-42b5-9866-827680891f1c": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "6334ec05-5071-42d8-98b8-fc5306d236b2": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "c5293c46-c7bd-4332-b088-0f07e7fca312": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "5386554b-00b3-4e7e-b130-eafd4d8eb057": ["cdc23111-1d88-49c3-80f3-9ebd70b44b12"], "22b750f1-231b-489a-bebd-3b193bea2a49": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "5de799b0-d874-492e-aa45-4fd7647abab2": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "81900311-01e7-4092-a602-1d8187f760e4": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "65bccbee-7732-4dd2-be47-39627cdfeab0": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "db31ef6b-186d-4690-9967-65f38c663c5b": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "540f0169-64c9-4548-888a-05f286d81496": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "415e4fbc-b69e-428a-9f94-7312d737e467": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "1bfaa0c5-41a4-4311-a8ca-15d61ff4d3cf": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "768e8af3-91d4-480a-8e3c-f3fb6fa362a0": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "85eec5c3-d2d6-458e-ada1-b311bf4a72f9": ["5741a856-e0d3-4918-ad9e-a70c052fa59f"], "3d150e71-c79a-4bd8-8084-2ae5bafda260": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "1a24bdd1-8636-4d56-8125-f82bbece4d8e": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "84f606b0-d5e2-46fe-b0fe-9f8bd603926f": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "b0068345-aa37-45f2-a66d-ab765c64e4c3": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "322cd2e4-d7f9-48b7-8de7-f1bb820e45a4": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "555f7d72-2732-4953-b60e-5aa9bb3b5329": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "0a9a8d6d-03b6-4a09-a022-86b5bcbb4f9d": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "28ffa310-5101-4271-a96f-ae81552638be": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "e59e161d-73b9-44a7-ad00-86e936500c7d": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "7712df59-b5e6-4300-81f2-1d0e16da04e2": ["416b3a7a-8d1a-4742-ab01-b6663ae8e0fa"], "ecfdcb90-ee70-422b-9850-10c9ebbf34c6": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "6dd78dc1-02ed-4f5b-abe6-398caa87184d": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "43f126a5-f5cf-47b2-a267-d84e162dc483": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "2d69fe62-48f6-443a-af60-47dd8aa8e223": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "b15d2c3c-6578-4ac7-bf29-1bb9ffae84d3": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "c4aaa7b2-60f1-480b-9a24-5f8d421c6136": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "98490660-182e-4994-a0e9-79f5063132a2": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "db8d310e-c511-4d66-9486-f8723f206d2f": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "4dce7da8-27f9-40a3-917b-17d3726fd5ee": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "9f0a6fe1-594c-447e-8837-2a78a94f7b03": ["f6481b29-2176-4fe7-9fbc-c0b492535374"], "60709a95-43fe-4194-a602-00bee13a364a": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "d64d1e05-da97-4caa-a99f-c615a4ec29f3": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "ec9e492e-7c12-49e0-b47d-e552f7d1f972": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "336ad8fa-faaf-4447-bfc6-ef161da017ce": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "3b634a49-58a5-4fcf-a112-9a86f5ac0893": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "4b58cb88-0109-4889-95e2-055c73c49939": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "21b86043-f03c-4c93-a916-be8fd8b2ad61": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "63133742-a77f-4db6-846d-4d1f5059f01a": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "661fa6f5-10ba-4b3b-a3aa-67fcb6b7c0ec": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "d181a784-4cc1-4749-98e2-0f0ce4f2e3be": ["8cddb4be-1854-4ac9-96d8-27f9a2fb057c"], "ebf17b14-0e3f-4c3d-904d-649dfc42fa95": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "a6fabc41-7dfb-4c46-aa88-b4a3530b1d84": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "24c03da2-ba86-485a-86f8-59894c03d3e1": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "5e4464c8-91c2-44f1-9ab2-914ebaa083c0": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "6784a5e2-6056-4306-93f8-80a495e09e01": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "79f1c48b-968c-4851-928d-ae6f42c61343": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "925e7b0d-8fe7-40c2-a359-c8ee0f5ed344": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "3397b416-793c-48c5-be7e-199cd416c910": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "fc6683f3-3edb-4168-be40-07bfdd0d288b": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "f7d73d0c-df20-4ca2-91d2-a8c5afdc14c4": ["6dbc971e-9d84-465d-bdc4-ea5d2474f896"], "36690cae-c8ac-4416-86a2-94636c14e662": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "ed1dbeba-1948-4424-9f4e-49940e6ced35": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "6144bb8c-dc20-482c-b508-3a6746a31aad": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "10bc0fe7-54f9-4f07-8e70-4dc100547fa8": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "a2df3dd2-4f82-4dc5-a09b-87f4bb447213": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "1ceaf948-0544-42aa-998b-834363202b17": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "7ecb6173-5692-4d0c-9c5e-8b0b13bffa23": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "1e214f08-1e4a-4588-b61a-55376c21b541": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "68fab0c7-c29b-44ec-af9d-05ad2474f8b0": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "7bd24a17-0f1e-4165-ad6c-a177ddda289f": ["dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc"], "c8b856a5-bdd4-468f-967f-2a2d971d1c03": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "403aa84b-752d-4588-b4e3-fa5a8c170913": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "d6cc3ca1-888e-40c0-be4f-a80b26fe264f": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "33590f14-1e42-4a8a-b98e-4b50ed420647": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "a2a6d551-b7e7-4648-9043-311d908a8546": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "8da5e9a2-f967-4417-a687-2d49feb921bc": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "2826ff48-bba2-4f87-9f2e-e425dd1999ff": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "ff195633-1d86-4b29-8add-e361af17a5fd": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "d1ee3ce0-b52c-4ea9-a326-9e3a36f91eab": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "655de1e5-ce3a-4bce-95ec-ebe789004f66": ["cf94ff83-f7a3-4d72-9761-ee38111d903e"], "ec6b21b6-677f-430b-b3f1-682615b0c999": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "16885872-4b72-4e64-b7db-18121bb64984": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "f9d9b18c-d71e-4abd-b3ca-0223dec3ad95": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "2bb558fb-06ba-4259-b5c2-1069543d1700": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "9082084b-5bcf-44c9-acbf-4b018579d045": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "5fc54a90-cce1-45f8-8502-abf134313535": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "98454bb2-eaf7-45a3-8e6d-9781a4a9d584": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "e9753e53-f009-433f-83b1-86b01c4fdf46": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "7c787e2d-b2de-4444-a4e9-08b349e388a4": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "6c304545-a0cf-4473-aec5-25670f40a825": ["e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa"], "34a2354d-7bf6-4914-8c4a-cf196db36040": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "0aad6105-223c-4dab-912f-526f486cdc6c": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "f59c944c-112c-40a8-a47f-ac2b0347e2c4": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "c8df4ea5-17bd-4cca-a295-e7ee59f9e7d0": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "98b08556-abfd-4833-ae35-a8535571fea9": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "cba6f835-caec-48d5-9b82-232ba8416963": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "d5c902d5-6803-43ce-9cc7-e41da57f53fe": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "b810fe11-8639-4aa1-9680-e9aed0a7f23f": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "cf8d9d77-d12d-4feb-affb-6668a8f6b2ce": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "d47a2d26-34fd-45b5-b3ae-e8dfe20c77ac": ["31953e75-65ce-4852-9a87-bdbefefcead4"], "345ce069-495e-49b5-af9c-c3149193f72c": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "74332ba4-7dba-484a-b7f3-db3c9b6c6955": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "8480a6c9-125b-435b-87a1-ffad65340e1f": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "d4afcb88-ecf5-4717-9b29-d9b4cd68121b": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "468af870-6b02-4fa5-9205-37a33e43b63c": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "12891465-101e-4897-a86f-046d85706186": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "14c6eb6a-036f-4f26-bb80-a075fd2979a1": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "861a1a80-5a7a-4b03-b86b-e6d7b357b14c": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "beb717de-9182-47e4-82bf-c27fde4a43fa": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "840b62a5-c2ca-45e6-9793-40fed59c838d": ["59cc349a-6628-4b71-8f4f-56af9bbddfdc"], "bd65a985-6adf-4ca0-800a-2f5e4eaf9313": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "d1bdd54c-5d9d-4677-b5ac-065fd4b4e8e8": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "e937f160-66b0-4289-83b1-2a5616b701ae": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "a9f3fd07-2b06-4abc-9bae-9cb5c884420a": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "00c694b6-964a-4ff8-9c00-082579e3705e": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "0d2d3927-99e8-44c3-aec5-08fdc4fc2fd1": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "227007e5-7052-4b42-b7b0-b914bcbe185f": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "262df558-56e7-445f-9a93-ad716c929f95": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "b6f4c293-115e-4811-a3aa-9a9b8ddf6f11": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "46fd166c-6f0e-4c3f-85c3-b30f0a1d53a0": ["ae1b2df2-0052-44c0-9465-17e88b67c991"], "560ee295-11c4-41df-a82c-d7e7f0f44386": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "66e50da4-fea8-4795-8a27-663c0cde6f02": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "4d449176-2653-4092-8b2f-4f047ac02fb5": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "b8a080cb-dbf5-4ca7-b5a4-5c84f821b1ef": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "cde58352-24e9-4fc4-aae0-9d41f4d197d6": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "3426f11e-e5f4-418e-b97a-01f241a68b12": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "f3e038cf-7997-4724-8e6b-99c701363442": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "d730bbe4-b1d4-48dc-9ae6-737485ab0e96": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "e4aae9a3-d377-43f0-b0b7-61cca8c61714": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "4237e65b-dc5a-4002-b88b-cd45f37bc5f7": ["69b93abd-4e87-4056-87c5-e5819c310f8e"], "a824bf8c-fd69-4e1b-a7fa-d8a0e946b3e3": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "305a21f4-80f2-4d2b-9d75-6745caca88fb": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "d4751075-70d8-4ec0-ae48-12518f492e05": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "9a023a00-7a74-40a4-9616-0444aeadb0bb": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "01f67521-6c20-4fd8-8434-616c85133c1e": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "2fd099a7-bb3b-4ae7-97cf-d4a44e7b14fd": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "c09872bc-2fe0-44c2-92ff-4737772a9980": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "01e6920d-c467-4660-aeea-b58e1ad39cbf": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "1ef08275-0345-43e2-87d8-2c0014a954c9": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "5d1ea4e8-03c0-4918-a4b0-ee3fb7de061a": ["757f2dc8-3aa9-4e16-9c81-12c6b390f008"], "f15a27f7-7498-4ada-be1c-3cac898dc341": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "b3491e6c-e129-4aa9-81b0-cb27a5390728": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "02f41084-6947-4e5d-a014-2701aa7a8524": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "37901011-f77e-472d-bc43-758d35691417": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "d152bc28-9605-4041-84f0-75278caf5c62": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "29842944-f55f-426a-b9fe-f93af91ad99a": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "8eb02857-2af2-4998-a936-ddb76db5f513": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "7bebfe19-175e-4123-abe1-e08b0c2d683d": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "98de5d6b-fc75-4dd6-90ed-3be730aeab0c": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "45c83246-49f0-4517-8449-22f61e1ac6a2": ["fc97d203-7885-4077-a742-eef7cbd16ef1"], "d0176225-348b-4c83-ab7d-82277925e6e9": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "f75dba8f-4254-4fad-b3ba-5ce51b3065e9": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "d6be5ece-a538-4e8a-a374-7789a34d34c8": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "a1d04dee-74ab-4d43-8717-6e2684c5670b": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "48ff1eaf-6834-4862-8e80-624bb71169e7": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "7b9de209-f9fa-4ef0-8ced-8969ee2ae73c": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "a2d29100-ebeb-4076-9ddf-1f64b365290a": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "fd57bf95-f994-4cf5-b914-af75dc140b11": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "4eae01bd-26ed-4b64-9606-abf42d1039e7": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "c93c5ccd-cd7b-4a31-b8fe-2730399bf76e": ["b10395f5-dd7d-4bdd-8b79-b1049adaf1cc"], "8e93918d-d152-4bda-a527-51eb1e4a563a": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "8bd0ab3a-519e-4f8b-bd09-52e086f06ab1": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "9ea19168-923a-4776-b043-9cc4c8d33b84": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "48f1823c-987c-43b8-ba09-2f7f30c39478": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "69904a33-e3bc-4725-a589-ae8903083507": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "1ff81582-703f-4f1c-9591-b190d995ac44": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "70b48d78-cf62-458d-9cc8-cdee5b6b30ce": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "3cbba8c5-608e-41ba-acc1-e266269a8d04": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "c0dbcb52-583f-455e-ae5b-a4baefe89ffb": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "022fb6ca-e847-4f32-a2d8-a01f772f1c21": ["7b72f3dd-06a6-4760-ade3-b68fa700d859"], "776a11f6-1df2-463f-8f5e-9ab8c838c2ed": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "b183d8c6-087c-4de9-9b29-7b89974ffa14": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "611c347f-fd3b-498b-87ff-095aa469d501": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "ccd936e2-e89b-41c3-9b5d-4c171d9f1eb6": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "de9aab5b-822b-43a9-97f2-e9d6959e33ba": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "daa39ee8-a5c1-4bef-bb7a-eadbcc70f33b": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "cc6d42a1-429f-4ea8-a0c8-481571c586ea": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "1fa9c10e-8e74-472e-98c1-62ee3c81305f": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "3d8c9a00-451d-4097-95da-7db467588dc5": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "d5fd9d65-12bb-4f39-a0f1-3cb54d012fe7": ["fbc5530f-3dc1-4c5a-9119-a4b13018ba7d"], "cb0bf5f3-38b6-4eec-81ff-530a778e0328": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "207c0258-6572-48ec-ab8d-8f6558681013": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "eba2a62c-de0b-4649-8f1e-438799536f1a": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "015148ae-410b-409f-a89d-73d1a425a37d": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "b4463b2e-409a-48a2-9a74-455373d37285": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "6ae4fc70-185d-4aae-bc07-faedd4dbae8e": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "f67fd894-f032-407f-8771-8f564f4f9ca2": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "ae278041-1973-4238-aa7f-a6cac1551a8c": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "f6cbd86e-2f15-4311-9c83-e407554ff032": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "38192107-9ce8-4cda-b34a-cc111e33be8a": ["403a6510-6af3-4ee5-bbbe-b97a65a94130"], "af1bb742-8ec7-4495-9141-a44df3bda1a0": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "379fa7b3-02e9-4abb-a67d-f5611f67453f": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "938ae17a-7800-4fa0-9d13-64d885add576": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "c1a34901-9205-4c40-81b9-92d9bccfe577": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "fcd299fd-aad2-49b8-9fd4-d6d51e7d6d06": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "8fe9af88-6425-4dce-ba89-5f8170af3a50": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "dbf83cba-a1ec-4513-a5ee-dbb91d87f238": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "07560e9e-9c12-44bd-8b3e-a2aa039ef9bc": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "773283eb-6b5a-4256-8ddf-8f255df3f593": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "d57e3ef2-9ba0-4083-91e5-bbbc46ea63c6": ["fc60770b-eee5-46f9-aa49-803aa5ea6dfd"], "ae1eb349-2033-4722-ae50-edd77d230dee": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "f123f8ad-0f9d-473a-a63f-f10e7686745d": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "61295097-606f-4249-968f-47372578c40c": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "663dd7a9-c1b9-432e-8fbf-ea8ec15a775b": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "a0a14c6f-86f9-4164-99b9-4db3716b659e": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "5b5f3867-1803-4238-af24-bd788829ee4c": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "5e60b645-855e-47df-8ee9-48b1a1a83efd": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "7e2af9b3-68e7-46c0-9213-acdc636f4273": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "3176e372-80fb-44f7-ba7b-210022be925b": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "dc35ee01-7ac3-4ccf-8b45-14b5aa5a6c9e": ["dba635cf-41a7-47e0-820b-1d2232b26d3e"], "d599c961-25c4-4fee-8ed1-d5c62a73647f": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "73118592-ad30-419d-b6b2-3dd0d9384da9": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "e5807faa-9a9f-4510-807d-c4553fcd9780": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "d70f0a08-4229-4e3c-96e1-75b97e2bd955": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "e56fdc33-7ce6-4293-9d14-efd06aa134c7": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "e3954bfb-ba3d-4cae-bbcf-993ccb500b97": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "1bbd547f-2198-4ec1-ae87-5a5ac9ebc2d1": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "54655f06-a110-44d5-84da-2b618c017d76": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "775f37c2-5451-467c-8c55-81cbdf42dc44": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "6092bd99-29ad-4e06-bb27-ad1ae9006a58": ["aa38da46-37c1-4f6e-b21d-0f01b6463f05"], "bc600deb-fbcd-400f-9140-0d915fcdaa91": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "2aef734a-ac35-429c-a7ed-45f4bc41a30b": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "14c90494-2295-4e90-ae42-73e7ae4a413c": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "cb928351-26a1-491c-9262-10297fb46ede": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "90e2f5ef-ab38-4486-ac93-f172d2500a85": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "d860db03-3ac9-4ee9-8e9d-68730a2dbd6e": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "7629f220-d4f3-493d-8160-89a311a495a7": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "e54dbc2a-f5cc-47d4-b7f5-2cdad36a161d": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "864610a5-0a37-4ef4-98d4-1b6d7d32f886": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "8fc6576e-192f-4f3b-af15-d1c21b908da9": ["cf169e60-9a05-44b1-8364-9e7cefe6a856"], "9bec12eb-4fa7-477e-a667-fd165c16f38d": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "ce2ec10e-da61-4462-b06b-0aa052282a80": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "782d7b57-c109-49a4-aee8-fce11007be0d": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "04329baa-7e91-421f-b129-dcab9672ade8": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "f84ca6ea-58d1-48f6-a5a1-cd8ef9f4658f": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "f2ce84d8-0c07-4e0e-9bcf-3aaecc8dc8a2": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "366462e4-8d9e-40ee-b30d-f4817377ba05": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "e8433dc8-3bc8-4eee-9a40-71e4005f3386": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "ead8c5d0-e517-4da2-b4b7-885f6c6031b4": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "7f75740d-9d77-4338-9f3a-78ab85ae1906": ["e188de33-eaa4-4379-ae37-a335ce7e69b7"], "caeaec67-6507-428f-8b4b-41751a7af496": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "2987bea3-66c4-4ccc-92fb-5c073f60fd97": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "48581f7f-03f6-4ee0-abe0-09bdb7f54800": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "66f09013-2126-4c82-bc9a-c3a11320b3d7": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "a7a8bc83-2e72-4a24-a793-f1b816f0c283": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "cfb4d915-c088-4618-925e-02e974b919bd": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "ae31cf33-efff-4297-a26b-ea5db0b92c1c": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "c8138f61-21b5-4802-ac6d-f18233410054": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "0f1e8318-43b1-4523-8576-17ceb1fc8061": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "25ff123c-f46a-4669-9903-16149a4ec2d2": ["032e77bb-7ee8-43a8-b13d-4f1aea5a1084"], "1e6c80b3-721c-45ae-88a6-cf0f8ced7156": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "df6054be-5a75-416a-9eaf-0be1a05ee1d6": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "288339c7-c00a-4dd4-aa6b-e2002ba889ce": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "deaad193-bda5-46ff-afa9-72206425c873": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "2d47d0ae-c76d-4ae5-a638-a6517c6220ba": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "76fab1b0-0e24-4c35-a7ed-8d49cfe2720b": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "495b8ac6-a421-46e9-a6c3-985e29b71272": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "9326e08e-8852-4d72-a983-7e09ff35af6e": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "a4b17ec5-fc35-40d7-9721-d18501274964": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "9bc8756b-e0e6-4b2c-8ff9-a26618ca918d": ["8a259709-51c6-4a1a-bd12-461abc901ca1"], "fc3b2287-133b-4a7d-beb3-436fe108fc3f": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "b35cb75c-71fc-4af1-9c57-c5ae6ccd8cb3": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "381e4eab-6b5b-4ca7-8571-31aba3dfe943": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "08bba123-7cf2-4473-830e-9c28dc6dbce4": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "ff25d5f3-7350-498b-8341-45dae4e79977": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "8ae5b2b8-aeff-4c5e-aa72-6924b2fc4dd3": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "c1f383d3-687b-4a57-a98a-c2f2212368bf": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "a8540ee7-471f-42c8-8ff5-4d74af3e9e84": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "abee8f0e-95b6-4a66-9584-2fa2c87344cd": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "86a7086e-9edd-4247-96a7-f4bd37a9d20e": ["18bfbbbc-c65e-4bb3-a596-44dd8f4584fd"], "582fa583-c146-4227-a529-e7428ecadbd5": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "f5ab7c6f-0d8c-4379-a524-ea0abad40378": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "62dbacb4-0a6c-437d-b130-68d38e4fb182": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "a8ba6c23-7fb6-47bc-8dcd-4242dca46afe": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "495f990c-cfe1-4bd2-99e7-728aaca889b1": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "dbf021da-491b-4d03-b6c9-7641362406aa": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "f2e9d547-10c8-470f-be55-7919d3b9fc76": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "333ba50b-1816-4a01-9ec5-0dc8942bd96e": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "f743f398-d354-4757-a791-669763758105": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "493410a8-8da9-4c05-af25-02344ce5fb36": ["b8198357-3185-41a6-a9b3-5f2d5839c91c"], "d43b5165-b424-4efb-8ac5-c70839068a9b": ["424352b3-190e-433d-89f6-beb5f2519530"], "e745cebf-169d-42f5-a769-451d4e947df1": ["424352b3-190e-433d-89f6-beb5f2519530"], "48cc0ef1-0635-4131-ad9d-a75b6f87ba6c": ["424352b3-190e-433d-89f6-beb5f2519530"], "77a87b11-faf6-450d-bc8c-8fc3bb450471": ["424352b3-190e-433d-89f6-beb5f2519530"], "32dbfbc4-6fc4-4307-879f-1a14277108ac": ["424352b3-190e-433d-89f6-beb5f2519530"], "d129f587-db8c-42b2-b15b-f48854e829b5": ["424352b3-190e-433d-89f6-beb5f2519530"], "595e4aba-2967-46e1-9276-4619eaf23ee3": ["424352b3-190e-433d-89f6-beb5f2519530"], "70ad54f1-132d-4b05-b0d5-ec776fc6a092": ["424352b3-190e-433d-89f6-beb5f2519530"], "e782da3f-efb4-4a2b-952d-815b7704c1c2": ["424352b3-190e-433d-89f6-beb5f2519530"], "d7780233-e970-44f8-941c-d04776c461f0": ["424352b3-190e-433d-89f6-beb5f2519530"], "411f2cb4-761f-4481-acc9-3379b98be822": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "aff655e4-7805-4005-96dd-97e57a4ed1f6": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "b2cca45d-920c-4cfe-81d7-5dfa18ea4ac3": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "c834f9d2-569f-4a49-aecb-ae1382201dda": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "15467914-2915-4097-a975-a2b7a2bf6e0d": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "7233a36e-44dd-4945-bdca-92ff30a47933": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "22334989-4055-455c-a95e-7e517df2a381": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "d818454e-443f-4926-8398-9edbf7ce1b23": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "895f4711-def6-4b59-833c-f71a070facef": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "c4d00e1c-836a-4ceb-973d-e0df3779b23f": ["6b457a05-02a8-43a9-b7bd-39e2c396f577"], "1057d1c5-3ac4-4d8c-9193-566dac0f5640": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "8c3f757b-977d-4050-b35d-aea49c010cf6": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "1468fc8a-bccd-4e2e-a99c-fd18b4bd12c2": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "ff6dc571-3da6-4cb6-ab29-a41494392ed0": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "5e523726-a9d1-4807-9295-3881cd46f457": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "83c676ae-600e-4c27-98f4-24e784b1d7e9": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "c2b9fec3-5182-456a-8d05-894e91037f8d": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "0da8dd83-5ddb-4544-a4ca-0fe64ff97e5d": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "3d03cf3e-8e29-4424-b5ee-db3ce2aef10c": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "9466ba73-2db9-4dbb-a45c-9cf5fc4b9233": ["c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7"], "c8753999-1abf-4fe0-b602-96c7badbe1b8": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "b146064b-3808-4dc7-ae9c-1fd32c51462d": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "26296ada-303b-4757-99c1-d0b666b5ebe6": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "22a9a1b9-5625-453a-8f38-7e6e1faece16": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "456281a3-2fc9-42d0-aaee-b9e644b01898": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "d0fe0b08-146c-467e-8042-9160ccec66de": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "1c65bc00-7698-4aac-ac75-f4cf0a56d7d7": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "3b23f18c-6948-482c-ad4b-4094da577801": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "4474dccd-e072-4606-bf97-01a59797d81c": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "bc98f064-db21-492e-8cbe-6bb73bfc8422": ["4b37fe12-0be1-495d-bba0-f49ca7f5df94"], "660c7ba7-9614-4425-afbe-8e9230f9e5a3": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "3d79fef1-7ec1-4629-8dbb-c4c88e55354f": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "19a6f91d-d99b-479f-974f-60046204684a": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "926d89a3-3c21-4df7-9dfd-ced249915ecb": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "8aee8607-430b-469e-86d5-4232adc7e194": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "f7611088-a63f-4ae9-a027-929e84a6f966": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "93b98b07-e59b-4886-bc4c-6b487d0c9dc7": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "47a623c2-1493-45ff-82a5-c288ad0a01c7": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "b42c870c-4507-4740-8c36-662932a8b9fc": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "38e32513-a3a7-4720-96e6-35e11ae830b5": ["6745762d-e5d8-458d-be17-641b6b4d516e"], "1c8ce2b7-1996-402e-b151-319c407a4805": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "0ac24fc9-606c-4eaa-8576-5bafe235903d": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "66d10b11-4e1c-4323-90e6-162723085f38": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "be510be5-869b-457f-8999-878fdb0b09ba": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "e5d619fa-49b5-435a-adc3-565acb71c415": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "c5c5e007-9f35-46e7-aa48-ce8e0f608d5b": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "5437738f-c791-44c0-bb4c-acc4b6d2ff09": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "cfe9c2ed-5bf9-478c-89c7-794e93148715": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "48425d12-7cb9-4383-b947-e6e9566b9e7e": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "692bc9da-55ae-42fc-b205-f3bafca5a068": ["56a48ef8-56cc-4d56-82a4-b0c6d34bab3d"], "d7c50441-1bb2-4a13-a9b7-608f14fd60fe": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "614fef06-0fe5-4830-8050-73ba9a266f5a": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "1a1246aa-f540-4d9b-970a-69324e141925": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "c5c03047-1888-498b-8f37-9d697a2d08d0": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "0b6f15fa-f4eb-4918-81c3-03c0a661c2c6": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "69197eb3-9af4-4e2f-a207-c4e672d92d0c": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "018142f3-7555-4ee1-9679-91e57ae8774f": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "fb522bed-6381-4de1-8e9f-51b4ea153f04": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "1cb575b3-9f7b-4da7-8af1-f027dd6c2a12": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "433a4b30-f091-441a-af1e-ce2dcc4d8afb": ["677e974b-bbd2-4dc0-be4a-146bbf69bcb2"], "1437e5d7-0d57-47c3-a4a7-051a8d7ff1b3": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "ad5327c9-e60c-470b-8f4b-f68f13ef62c3": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "e37a9ed4-73f7-42c6-8260-88f6a0063f99": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "f2c16031-33cc-4d57-989d-4964ca15cdae": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "118530e2-7547-4cc3-ad5b-6568ecae31c4": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "c573b1a0-67f0-492f-bcc7-67d803c58308": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "e343ee0e-f5b4-42b5-bc79-b18001db5dc5": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "55a01f18-ee4f-4970-ac12-de3cd610e0b0": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "0bd8ff53-8c08-4507-a5f3-550b439e5efc": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "bb56381e-85cc-43fb-8e88-7117f7aacd52": ["ad920bc1-0052-4c4f-bf89-3468cb95f316"], "a975c7d1-2ee6-4b57-849b-847b4a68ac46": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "0611d75e-5339-4d6b-8f5d-9768777ec2f5": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "afb164b5-c73e-4af5-984b-7ac0babf5ec6": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "290e48c4-183f-4050-a61b-d3d835bca368": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "6bb17fa3-0268-4f77-a5f6-77274be300b9": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "741f4783-a448-4014-a7f1-a230bb81f3da": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "46545ce3-f86d-4a59-89f3-76f23173b07e": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "9cc2b4c1-cc01-463e-bc84-67dd2629f727": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "818ef24e-1ab9-460f-ab5c-12753804fd50": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "5741db49-72b8-4e42-bb0c-8b664a4b6a54": ["50e8d128-6b17-41b4-9e92-5de8e14f82c7"], "243360b2-c711-4a99-94ee-2d017ae0b586": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "a6579796-6adc-43f8-b162-a104e18bfd58": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "4e1b2a11-72d4-409a-bff8-53dbfd748eef": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "867fc430-fe4c-420b-a096-016280fb19a5": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "1c1161cf-f257-43b3-820c-89d59cf30c25": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "b06a86d1-7e66-456c-ac18-f879ce265907": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "78140874-f80e-4e3b-929f-f5a10c950800": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "cffecf56-bb7c-43ce-ac11-2bc5285c2d7a": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "782f973d-10c8-4637-b58d-78028023f937": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "c1507631-a4af-4438-86d8-01fe4c764de7": ["22b8897a-4de2-4c91-bd41-9e32a6c4fdfe"], "77d8a4e8-7880-4a14-9f78-6951755d3877": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "31445acf-bbb3-4ba2-815f-8f4fc6c17277": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "45302ae8-2e25-49cc-bf44-08b27c61b02b": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "3bbac743-bb7b-4abb-901a-036782abe40d": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "3c04f443-114b-4094-b7ea-0850a1205413": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "5f301a21-1a6e-4fe4-bbc4-3ed0207e093c": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "117fc83e-30f4-4fe8-b500-30f64f143ed2": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "0cb33d22-ac9a-4005-af6e-aa9926c5be8b": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "bb631381-802a-4eaf-ad0f-69f0760683bd": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "f58c6ed9-bdbc-40f5-a23e-b7a8a3b74b39": ["b3f535d6-c485-498c-8c1f-b421042bb5b4"], "c4d1371c-ac60-4fbb-9bbf-22fd9a717c1f": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "fd3f2838-886a-4dc2-b79c-47708237c0d6": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "1c90fdb5-fe0e-4f62-81ef-68f4f27e5611": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "7d7082bc-3629-4c33-bf06-8d1de10409f5": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "b9df2f99-799c-46bd-a20d-5f8d2914b226": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "a6631d31-0e87-473e-a319-90fcda9c1c3d": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "d7d6e108-130a-4ab9-915e-d9ee5a20c635": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "b9ccc9b0-0028-4ba4-bbb6-5233720451b2": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "609aa6d8-e41e-4fc7-83a5-2cf32901c497": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "2e407155-794d-4127-a539-3071466d8964": ["d53154bd-1a82-4a47-9c10-600a528e532e"], "b3a14e62-cbd5-446d-bce9-c73acd2baea8": ["6048d14c-7855-4647-b087-57ae0d713497"], "236f0306-90cd-49d3-8ec0-6a0cba6252a5": ["6048d14c-7855-4647-b087-57ae0d713497"], "e08d701d-e244-404e-a35d-5e7a74b18eb5": ["6048d14c-7855-4647-b087-57ae0d713497"], "a0516a4f-2bd5-4c53-9ddc-49b6d709e2ef": ["6048d14c-7855-4647-b087-57ae0d713497"], "18f7d41a-eacc-4b77-bff7-dd63adf0b0e5": ["6048d14c-7855-4647-b087-57ae0d713497"], "b1ff8e57-f9c8-4c9c-b80d-0b8cab8414d0": ["6048d14c-7855-4647-b087-57ae0d713497"], "99396743-8ac7-4e6a-8174-ca002a8bc22c": ["6048d14c-7855-4647-b087-57ae0d713497"], "e1119908-110c-41ef-a480-2c1db95e1206": ["6048d14c-7855-4647-b087-57ae0d713497"], "59a7e5e5-28fd-40e7-9eef-7a8c47988333": ["6048d14c-7855-4647-b087-57ae0d713497"], "f6fccc6d-ae1d-4832-a6a1-6a02d216d0ef": ["6048d14c-7855-4647-b087-57ae0d713497"], "2ad0d5f2-d9ad-4172-9854-643c6a273f4a": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "d378ac98-249d-4ce1-9350-5282c48770f8": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "7dee64d8-bf12-4366-bc43-ff71821a2047": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "79cbd8a2-e147-44bc-8a55-26401ebfb951": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "5841722f-bf15-4fbd-a76a-b321b179aec4": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "6b8892ae-acb4-4abd-9d0e-befbaa39382f": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "d7804551-0ff7-4e12-b264-7be2a008bed1": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "f7d942c1-1e8f-484b-8edf-c8d0a64caf8b": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "42a9d92b-e27b-472c-ad56-07cb1140ffd9": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "480733dd-d93f-4c4b-9b40-56e5910e831f": ["bbba260e-ff51-4950-bfd6-f97904f8b366"], "7798df83-4529-4317-b89d-746485eb64a4": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "f695f50d-54d0-49c4-be89-203b99be0e61": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "0daa74c8-7975-4a0e-ad33-5c760795c2d8": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "28c42deb-d5d5-4709-b15d-3cea81950b3f": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "ae24e00b-3f03-4260-b4ed-5e6848c7864b": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "8b794925-1544-465c-82ec-bc7f37298137": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "605617f7-69f2-410c-b50a-c0974457693e": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "47c40993-468f-4792-b50a-d2624c30ed04": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "8f2a4098-0fb5-4199-b811-3a63eacf1485": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "ebef13ba-6679-4da6-92e5-0cf132673dba": ["9e642b01-e82f-4f7e-ab17-f89df81f45ed"], "bc196094-f147-4147-8ab1-25cf7cad041b": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "aa3c17a6-1a44-48e0-9bac-245822879aee": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "d4eb8817-0f37-4caa-ab0c-ed5583f18564": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "74a83c47-6ea7-4e21-9325-9cd0a9b29316": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "40cace51-c6f7-4263-909a-934539ab4313": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "104aa783-0d34-44c5-bf81-967d2c43daf8": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "e1f816cd-f3d5-4510-b7af-29babd74b1d9": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "561ce82a-df07-4979-adaa-2450c3b8fcda": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "8868798f-7d64-4e60-bc77-56725a2a1c37": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "8474f531-52b9-49b1-96e3-7e64a855cafe": ["4d8ac59c-02fb-403b-bfba-6d1c62c8513b"], "2f0ad5b7-bd5d-4662-a86c-d0ad96f67935": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "6a01a7c9-235a-4a09-9bd5-e38c1dbc4b94": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "b8db359d-0588-4011-aae6-c6838c31805d": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "754f8572-6872-48c8-a9c5-c998b8091f62": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "eeb0bfdc-2e3d-4d88-b02e-9c4f33d462a8": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "5412af4a-850c-44fb-9945-95686cb9ad6a": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "6351f3cd-667a-4aad-857f-81df7b601da0": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "8dbba1cb-269d-4957-b4fd-308f00b91882": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "fcbbdd00-85b9-4396-9b7a-5de417c1179f": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "4d05c32c-5866-4426-a745-2d2c554a5691": ["8ad112fa-a36b-4a31-aa34-22d98017a0bd"], "7f590c18-47a2-438e-977a-cdd098e7fa86": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "edde2af3-d243-4ef8-869b-bbc0626bc609": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "541450fd-838a-4b58-94cc-20e3944691ea": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "b0e29ede-db9c-4382-943f-c21b4946bdb7": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "e6832258-3b72-4cb6-a1d3-f906eb781da4": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "b6d68612-46b8-42cc-ab3e-31be5702199b": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "f46074c9-19da-4ec7-90c0-400d08cb34d4": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "a8106443-688c-4550-a5fe-1c5ab81fcab9": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "4b8e23f4-abf1-4143-93a4-47f53784794f": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "fb69db56-a88a-4b72-818e-8881601a2bc5": ["491f5c27-32de-47cc-97aa-a62540e2c12a"], "b78a1120-785c-4e9b-adaa-d3f2d69f0561": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "18726698-c2d1-4390-aa93-ba12c019bdec": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "05e386f4-de4f-4b24-911b-1719161ea196": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "3cd1a4cc-ca2b-4d1e-8ada-446a7c42cb20": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "182886ff-5412-4948-8295-701e0d7fbfe1": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "37631ff1-592b-4472-94a4-2ce2caf8eafc": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "75e3d9c3-d911-48df-9ddb-8914d28e9008": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "421b5545-cea3-4524-bdc8-d3a22c30dd6c": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "83379563-52c0-4b59-b40b-f4418017f011": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "d76b947b-8f88-4163-9e86-05a0d2e86981": ["ef089202-5f7b-40dc-a4d3-19c9a7065d3d"], "9bd4e2e2-cd2d-4535-9dce-679178b1e43c": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "d3ba2ee8-7ef9-41ad-92b7-b27c7adbe4b0": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "787e31c9-98ed-4d18-a73c-6e891f3e92a6": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "0649769f-3bca-4cfa-8515-d227e06e7fe9": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "83843f77-efa5-4c8e-b8ea-95ff293e2900": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "94bc029d-f508-4768-a427-507ea45dfe8e": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "679f0e14-0fda-4866-baa6-90cbd9892af0": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "93f909c5-8ab0-445e-893d-16ef7d118607": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "571ae548-9f61-4e1e-95a1-4357dbb1c8b8": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "30e3bc0e-5f22-4216-a2c0-f27f861b5634": ["18a7df82-ff90-4408-a0ee-a2d91effd2a2"], "1b6a2ed0-d374-4c90-9b2f-26802ca2cc85": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "6c6d9102-09bf-4244-b8ba-21686bf078c4": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "397d1460-bd65-4980-8b60-2ff58fcfc82e": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "bc39145c-d1dd-4e69-a545-04944b0249af": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "c567b788-ba2a-471d-8a61-f8acc8154f67": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "29011969-35fb-4185-9b93-65a5945bf067": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "5f790da9-449c-41e8-865a-7f51afe460fc": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "cd10682a-f510-462e-b688-e6038e4d2166": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "20c5ca9a-d0a2-4fa0-b02f-0f7351890b70": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "7d213d18-b9dc-4003-bf54-21ec8dfb8397": ["81cbeaae-c1fe-4914-aebd-a0670f500364"], "575386df-5852-4b96-98b2-146127539141": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "f0b324e0-dd60-4f57-b879-e56ece38360e": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "5d3ecf42-2ce1-468b-9dbf-e4bbc5707389": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "e4d68479-8a79-4216-bc1a-4c8b156f3b37": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "fc9490b3-396e-4ac3-95ef-80d13f6cf544": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "1cd90900-c572-4975-b7b8-fc93f4189d8e": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "2e2511f7-776d-400f-99e8-22c70ecc04d5": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "e3b81075-8ba7-49f1-a7d2-87168a99617e": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "e5339170-f4f7-4d0f-851e-a4b684f4de56": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "db8ab762-0912-47d7-9d30-648164bab39b": ["10009fcf-4d1c-4343-948a-2ed6fd9bcc09"], "59c7ac21-d712-480d-b1f2-638dff0439e1": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "00e72162-956b-4ed9-94b5-b0ce264a828b": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "0dcfc118-6d07-45b7-9542-8c8fa5036354": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "60fe80fd-12ab-4a19-acb4-511d6cade220": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "6152c7f7-4267-40c0-a522-620e1574f605": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "bc2942af-52bd-4fbb-816d-9db9e02d78c1": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "ba3309a2-8a2a-4ac9-a89a-5df5b997a37c": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "8f11cb0a-3059-4f8b-991a-c534983d2ab8": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "18631367-2871-45d9-8b6f-049eee6e413e": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "da3446ed-9014-4635-b576-0dc495e48111": ["6d448984-e848-470c-8ed7-a1a740d3544d"], "bf7e5553-71b4-4912-8f36-d5596c343802": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "99d4fab5-34b6-4be5-ae63-ed96a8f6f688": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "cee03924-6fe3-43d2-8378-250f3240ea48": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "1ccab29f-bc4b-428c-98d7-f783de1b512b": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "e3ea9516-f177-4b9a-b7a2-f1e46892ec09": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "bc2b4942-0fd8-48fa-989a-4c49c46c7b96": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "6c84c34b-2170-4b7d-b82c-331dd7539cbd": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "278b5143-0278-4559-bf04-512ef1f0c323": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "0059e842-db08-4e0a-a061-7d5f37511184": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "ae4d086c-aee0-4107-8d11-6d98cd132c52": ["2ff90b7e-f382-4179-9da8-628b5a1a1b92"], "0f1efd9c-7fbd-4b7b-ab5b-c3e90282a9ce": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "067133f9-c096-431e-b340-aaafffcd7f18": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "f50ad4e5-8546-4e08-aa4a-2f9430c6ef48": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "74155138-199c-4f03-8c74-36f550d07afc": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "bc6e0508-7335-48f1-89e2-85a0b185e8d7": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "8c0cd3e7-b416-4eef-9685-68b4a810756b": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "90cd38de-1740-48c6-b5cd-21899eb63f8b": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "f5303ea3-19f1-4a4f-952a-803e65a5a0fa": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "4d896f9d-2d20-47a9-8054-3f5b528ef495": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "18fd587a-4be2-4840-9c67-9ea224d4de50": ["9f8f273e-5d23-4eb9-89b0-255c934e24af"], "dfe37234-9ca3-43f7-a3e8-b86a80b04f3c": ["77cc9870-55ed-4598-9480-e11a183531a6"], "f23d3c7e-3ec7-4ff7-bdab-5cdac88803fa": ["77cc9870-55ed-4598-9480-e11a183531a6"], "702c8d56-ad6f-4de1-9926-285169f470d4": ["77cc9870-55ed-4598-9480-e11a183531a6"], "be6e8191-35d3-4f3f-94a2-c509ba2c3e49": ["77cc9870-55ed-4598-9480-e11a183531a6"], "4f8e4872-d2dc-4de1-aea1-da76b4471020": ["77cc9870-55ed-4598-9480-e11a183531a6"], "e614182a-3697-4fd6-9166-b61c28d345dc": ["77cc9870-55ed-4598-9480-e11a183531a6"], "d01414e7-9f4e-4a8f-b650-88cbc28f03f1": ["77cc9870-55ed-4598-9480-e11a183531a6"], "d6520c9d-85d3-4d22-81a2-22649cf66c30": ["77cc9870-55ed-4598-9480-e11a183531a6"], "ec9b0344-0489-4878-b603-f0e89be5311b": ["77cc9870-55ed-4598-9480-e11a183531a6"], "85e83a32-66ea-4124-934e-cc197072ddea": ["77cc9870-55ed-4598-9480-e11a183531a6"], "00bb38cc-b363-4224-9cad-242db0bd67bb": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "a221d1ab-5fd6-45a4-b6d8-8b93d9ab4bdd": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "12a024fb-ee6f-4e56-a00e-3f762d802ec5": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "857a98f9-c84c-4e60-a015-c7857dfc889e": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "77ca506b-f725-4298-bb8b-1192ff1727c0": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "6ad116cd-414d-4fbe-9971-f3c0165700e9": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "06c33b54-b7f9-4684-ae49-4ef790e2a2ee": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "3af5e01d-6e1d-41c7-a864-f35d9f4c081a": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "1e4703d7-50c1-48e9-9662-fd835aca738f": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "d84d75d8-f21b-48d5-87ba-40e65b40b8e5": ["34b4a549-0384-4808-966e-f713a6ea12ad"], "dff03de3-cf16-47fe-8c62-7acc7396c8e5": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "f5dd8a98-1cb1-46cc-9437-9e36b92e9027": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "4c8db1bc-307b-4041-a352-9f9af6cbb286": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "8ec278ab-326a-4b26-81cf-07a2748a5f1c": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "8e131803-f72b-4edd-a60c-cdf1ae47f020": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "c4536521-f7e6-42da-876e-138c80d5af1d": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "28061b71-fa18-4a73-973b-595f5e9f23b5": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "6a32e6c5-5b4d-425c-9310-ac9e34058595": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "8b41d54a-0e4f-4063-9bfb-e5633ef01f3d": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "620c90fd-87a8-4508-aef6-49ce040c25b3": ["d2512e2e-5c60-4d43-af2b-449ec03a2865"], "b2771425-f693-4b11-8a2c-cd5ece2a9f39": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "d5648e81-154d-4e7a-9f98-89e26af03143": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "8c9a7763-5902-48de-9624-1fe4f06180aa": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "71023f86-db69-4922-aa49-0c7884cda337": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "95e33fee-d6c2-4676-8f91-f97b88f4e897": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "c5122f36-0c06-4f80-9d69-d7b5b7ba4c0c": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "6eda9f1e-d41d-4951-aeeb-6e3163a690d8": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "1ee096ea-f4e1-48d5-8859-1d486e08313e": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "3908a945-dd75-4d68-b75a-c42acfd1c173": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "e20ddb14-9cac-4fc8-8013-cc39c4a8924f": ["83aa9af9-6569-437e-af51-675af1d1e46e"], "52b9675c-9516-452a-94a4-69f03f5c10d5": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "f678bc09-26c6-49a9-838d-b7ed1c773949": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "56255088-9e23-47d6-b73f-3890da92fc7a": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "046fd2c8-285c-4ea1-b2dc-0acdba7f5f1b": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "7f889e3b-de8e-4bc4-895f-af886478d21f": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "c66a77c5-9ed9-48c7-b414-54d06b37b5c4": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "14df2313-e73b-4475-9175-b15e327a4691": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "7442ff35-391c-4c27-821d-8246d671bdd3": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "6da33505-e013-4410-87b2-c06a8f9a986c": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "0c6a74d4-9976-4dac-82a0-85562c58e9fa": ["c51b3f98-fc6a-430b-8e01-4248eccba989"], "90cdaeaa-2d23-47cc-9c62-a678caaff1fe": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "c94bb207-795e-4949-afb5-51060a4c898a": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "aed91732-c782-4c41-b30d-74b4bb0bccdb": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "9df72cbc-5cee-4e8b-a0f6-a71f2453c828": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "5fb78f30-b8ee-4a7c-9d4b-49a3d7203935": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "76bff3c9-e07b-42a2-a053-f2b1aaa03c51": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "aeb7622a-78ad-4d23-add9-376a60612c11": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "11b4686b-199b-42c0-87ea-4cfdf61fc1a0": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "b80438a1-1067-4a5a-820c-4133f009d448": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "50466b56-2814-489e-a27c-db75ad0859de": ["1b44f9d8-f8e4-40fb-9dce-83baf7655bb3"], "720b9aed-8f99-4d96-9c31-c592612cad56": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "52379352-3058-4c02-8a14-bf94e6ff2793": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "a6730cb7-c308-49ef-976d-2cd8dae8fea7": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "eb718d42-c1f0-4bc4-b821-2f51d3d1c876": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "9d567b63-4cdc-4526-bc7c-9227afac30df": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "c673db12-f99b-450f-b738-61159c6ee5c3": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "7d8746aa-a1a7-4dbc-bf5e-40f65e995b9f": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "449278d1-b673-4878-8c11-d1d5ab0d6db5": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "5969e8c6-02b2-4c2a-b739-bdc23cad38f9": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "205446ad-e139-481b-81fb-2548db84a0e0": ["427b5af4-e471-4ebd-8a89-da73e750b6d4"], "928e6923-bcf0-4580-ad02-7769964afe49": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "a1b627aa-a1ff-4a68-a3ac-2a726f36747d": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "70fa19b9-7985-4eb6-9cc2-7bf86776c53a": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "16562b7c-6b93-483f-a689-d28eab8e4aea": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "b86ef95b-cdcf-455f-bdc0-474aac5b35eb": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "9aa09b13-c9c6-4729-8029-8b893a1772a7": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "c971ec0d-ff6d-4adc-8ce4-f31428e4b284": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "85942b84-5c76-435a-b971-83296f24e8fc": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "7a683ec1-c238-48c5-bf76-00845333806c": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "aa558c0c-2ab3-40c8-bdd9-3daf1f4f43a0": ["fbb4ec61-5d7a-469c-8266-667075da02ba"], "19703e97-9d93-49e6-864a-23847656346f": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "52e88558-d8cc-413e-80e0-9f094bf4270b": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "6ad81596-3e02-481b-b7ac-73940d631df3": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "dd2e0e56-2211-4a81-9712-aecf2fda1a6b": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "bcd77494-a31b-4fe9-b2c7-3876f1f5306e": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "83631b6a-fd3c-4582-973c-c108cbc54c76": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "fa5423e6-cb1e-4043-af5e-e939741381e2": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "45fc2c2b-6bc0-4a8d-8731-dfa83c3ba355": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "94cf27b3-d287-4e48-b4f0-1acdf891f42f": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "47124446-7ae2-4444-8f9e-e86adddd76bc": ["4a1f6a6c-b79b-4a2f-b298-a7261f617663"], "af389dfd-cca2-4851-9450-166e9fbd38d2": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "9bc603e1-c26c-4926-b91a-c8d4e1a57fbf": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "be75402c-f043-4191-9c8b-7d5af25bef31": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "0dc744a1-6026-46d6-aeb0-8af84f722ae7": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "66f03050-aedc-47b1-aa15-51450e4329a2": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "9c5cf2e4-1467-4a23-8857-28214677eca8": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "710fdf52-6834-4b9d-9ce6-a653d5cece1b": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "c505559b-8a65-4f47-80ef-b510d054137a": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "b0c53b5c-f74e-4529-a8d6-a123ed691081": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "86fad521-a1f8-4c03-9313-ef0a9cee5091": ["9eef7ac9-2709-4532-bbf9-e0d3adbf40c5"], "55bec949-76bc-49c1-a3ae-5f874eedfddc": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "9cd11b1f-87d1-484b-aba4-b76a5cb30945": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "9ee178ca-364a-4792-8ee5-68de0f17f7cc": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "616332c3-0795-4741-9e22-e94ffa2fcb13": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "d00c3c3b-c827-4205-bc58-f5520ba6e148": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "bf6e3576-bc85-4045-a9ec-7701f2ba852c": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "3a2a0084-eefa-406c-b6a6-ae09a47d2c23": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "3139865a-00dd-4da3-9038-84cb5a1b8b12": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "ef18f0e4-616a-4bde-9199-8407daad074d": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "65c26694-76cc-42b2-9982-8baf476bbaed": ["90eb2dd9-3186-4c57-bcae-574485a1be52"], "ca38f0f2-707f-4776-8565-5a88ac901073": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "dc4c667a-b953-456e-a31d-8b4221f41335": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "c90a6481-11b9-4db8-b24d-cbb9f232a4a5": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "f989e408-3f17-4129-8bd2-87ae2ceecc72": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "dac82b9b-8e84-414d-aafc-7f3764ca2b23": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "df4dfdd7-6c4c-4dde-b6dd-5123571b1254": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "5a88e70b-4815-451a-bc8f-e1606c9f23e7": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "9882e344-3c67-494a-b083-c0b2ae59a9df": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "26c3c033-6002-4d01-8f46-96bad11d3572": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "6536996f-b790-440e-afc7-66774db4a2e3": ["f46c1f82-6ce7-493e-b3e8-3d02f09a2bda"], "f9e8bebc-884c-4775-95e1-78d70dc642b2": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "6a36fd5d-98db-42d5-b0d4-493641747d7b": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "d4f86701-19bc-43eb-9713-57eb57e073f6": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "ff6fe932-7482-44fc-bd67-812520e17bcf": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "830c3554-63ea-4a61-a4f9-95e5c8297675": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "621c0613-588a-4422-951a-45a7ed78fca5": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "df860556-2e58-4780-972c-1293c0231372": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "9c78e9c5-3af3-4520-a602-f9a45cebb1a3": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "359d8ba9-c8b2-4d18-b5c2-9c0369390b86": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "e1de377f-ddbf-4599-99e2-116dc45fac8e": ["dabf101d-f998-43b9-ad15-0a6933bde316"], "e5df3265-2f97-4196-b75a-ab39e66e7b97": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "ae852f4b-1551-4734-b962-337bb0060682": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "5ce6cbf8-e3c8-414e-8e92-59e44f212a1f": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "9a8e63e5-0caa-4533-b223-17d6c3c56133": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "fee74500-d0b9-468f-a6c0-cbb66d324f73": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "7e4ebd6f-c29f-4c5b-a9e6-c00489ace11f": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "cf2fab83-75d6-4fea-a72d-449833ab78e8": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "bcb8ba55-d026-4851-a522-40e9b1e175af": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "f89691af-5a2e-4c6b-8894-fd6d9cd424b7": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "6c7e2aa2-558c-4280-93b7-bf73de34e57b": ["74f049f0-a803-4a0c-80cf-58f96cc16b98"], "9f5142e7-fda2-481b-a08d-fddf4197454e": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "0cdcb840-24f9-49b7-b4b6-bd409bb4d50a": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "4dfa9162-0bd0-4926-a27a-2fe9231160ed": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "49ca8dea-a08f-410a-a643-8e37fc896455": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "4636292e-8f3e-4711-a175-b28428d0eaa9": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "69456d83-d8c3-4d1c-bd31-704d2c934902": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "d62ca080-832e-4f4d-960d-bf8bb0d1b97e": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "fb2ddd6a-239b-4b2f-a336-f921a876390c": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "9208081d-3da9-46db-b27d-f25ed1c64ad2": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "edb639b7-e492-4442-a339-62b5e921ceb2": ["02282833-7fe4-4b1a-aebf-3604b78c9e23"], "572c1b8f-43fc-46ab-ac02-ef9756b67234": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "5e70d1eb-366b-403f-b833-9bb92e6a6cc5": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "f9e5f377-fe4c-43e8-8cc5-f004e49964e9": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "be867fe3-bca3-4b2b-9a5e-471f230b333d": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "4b9a0a17-252c-4972-9be3-1b4156f5b5ce": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "0f22a8c0-7a87-4416-a640-3c2187b8aa3b": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "338a84b7-78f1-442a-8e90-093c313eceb1": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "bd811679-d8f6-4568-8799-aa826cd0ca8d": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "6015d9b2-454a-4af5-9a8e-ac87189a84e1": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "dc4d23d4-5c52-47f4-b5b8-01c23fc3bff9": ["aac514f9-9cfd-4d14-8171-8d4c909cede1"], "a6399ec7-f330-4390-8da1-35f4821069f7": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "ec418640-346c-4981-a095-cbed724bdd60": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "7feb0289-a3ca-4a4b-9809-7ff2b8a6518f": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "c7d1871b-b9ea-4a5f-9ab2-e5b06fe26a8c": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "1f235bf9-1d62-49c3-9f64-b74ff53e1c0c": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "f370e6a8-32c6-4260-8fa8-013612614ab1": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "ee9515a6-b640-4034-98fb-4a5837748cf7": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "745e45f4-bba9-45ec-a7de-e6ad0e0e47e1": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "a02fa5da-7bd9-48d0-ade1-35bcb7529e09": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "4a54ea01-870a-4fe8-aa51-bbdd95af1a67": ["aebd3e65-e2da-4b5f-ad11-9d4a31881517"], "d9589a42-1bfd-4e7f-b849-f21901e4405c": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "a2cf57d9-906a-4fa1-83ca-9bcc8e64edbd": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "6bb0aeee-f203-4ae1-bdb1-ef25d1fcd53a": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "168e18c4-5764-40f7-83a7-4648c98b69c9": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "251640f5-543a-41f8-9cf2-21a40ea67aa2": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "c1e447ca-42dc-4355-a5b2-c55e9ace2bbf": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "2fd28c08-3fd3-41b6-927b-527d88dc0694": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "756d6e43-205c-48de-84fb-df3fe1ff1fd8": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "fb40f206-d170-40e2-8c0d-6fed13736a04": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "ffc49df6-1959-4e1b-983e-d5d6e8ee184b": ["d372d7d5-e676-451d-9c07-75dbecc3beb4"], "4570505c-0e36-451c-a22f-787893a90d09": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "6218a6ac-9d82-4ee1-92b9-bcd6637a0981": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "5a330f85-885f-4b40-85e8-c3a6dca75ffc": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "129f0f97-98ba-42e8-9d21-f3d2c6917470": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "0b4f9884-918b-4031-a687-d99338d8456c": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "6e60a744-5a8f-4a68-97f1-ddbd13855330": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "d4c7738c-fc98-4fc6-b879-f6dc0a273084": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "3ed724ad-1946-407a-990c-be9a281312fc": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "76c3b003-88f0-4704-84f6-839b2a2119c4": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "587362e8-2dd6-4f47-98cb-003f14dac0ab": ["771dac26-6584-4f0b-bc14-a12c56cd48ea"], "b3192f95-ad30-4882-b2d8-fa1e9be2394a": ["998558ef-dea7-4966-b3be-986e930153d1"], "22645ff1-03b4-424e-a146-2bbf328e966b": ["998558ef-dea7-4966-b3be-986e930153d1"], "e87363bb-640a-4feb-9d9f-9d2e0f866665": ["998558ef-dea7-4966-b3be-986e930153d1"], "149c1365-2c5a-4e3c-9c87-1105b2746f03": ["998558ef-dea7-4966-b3be-986e930153d1"], "13982a84-3a75-4f82-a35a-e52df15dc29c": ["998558ef-dea7-4966-b3be-986e930153d1"], "e7d71aac-05de-4e78-b73e-39d585314e96": ["998558ef-dea7-4966-b3be-986e930153d1"], "77de8575-cb53-451b-8b8f-7da0b09fd6ec": ["998558ef-dea7-4966-b3be-986e930153d1"], "b624cd05-c166-4243-91a7-4a3f693f185f": ["998558ef-dea7-4966-b3be-986e930153d1"], "68a8f208-c093-4f42-afcc-c1e71dc344bf": ["998558ef-dea7-4966-b3be-986e930153d1"], "a001c1a3-207f-458e-8638-0dbbd1a23b20": ["998558ef-dea7-4966-b3be-986e930153d1"], "be2fad5e-2033-4a8a-9741-2b83774c1ac9": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "93c0ee03-f986-4d51-b9b3-8367e10d9256": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "9e6f708d-f474-42bc-8052-4762c8d80510": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "ba8b12ee-5d56-421f-ace2-208edb8cb51b": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "3f587118-82b2-499c-ae3a-e1ec3142c71a": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "b29a4d65-c495-480a-89f0-fc4e91aa0c70": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "4e5b85e2-a644-40e2-8abe-4c376884d3eb": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "27cee14e-d18d-48ea-a59d-8e81fb9daa11": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "b3dd37e2-835e-4821-94fb-6bf7178aa938": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "1d89baef-c455-48c0-8ef3-307b96c366e3": ["6d5fe8b5-2369-47c1-9bd6-5aa5d716523a"], "aa7473ee-3668-4669-ae62-9f1a1516a53d": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "fac36b7d-de59-49ac-bb84-23feb28373e8": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "4e0354ea-fffe-4a01-a4f1-d51ae008059a": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "0fc325e8-7e09-4ea1-9db6-258a0ae665f4": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "7d295303-fcad-4c6b-8c7b-f3d3081439ac": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "96eff3ab-89f0-4b9d-a269-f0357278d6bd": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "2d48e1ac-0686-4e2e-bd95-e2db7f8c5339": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "0b0edbb6-e73f-4cef-a8d2-76c2d7e8176c": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "d3db021a-8f8a-4113-b5cb-0ef5de176095": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "6c3c2809-c142-4336-94b6-0cf80dba8956": ["3c0b8e22-5102-47e7-b4a0-8252e2df10e3"], "ed5fe850-efcd-40d3-a5d3-0cdc40a8a5d6": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "1aa205a1-c8ec-44b5-99e2-66b9acede8d5": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "107537bf-1e76-4d1e-ba37-84a3eb8872c7": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "ed6516af-d770-452e-88cb-cb49fe224a96": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "aaec1c72-545e-420c-83dd-44ee03eb5ac9": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "4b3d37df-b2be-4662-b131-e87a2cfbd191": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "404568f3-40d8-42a1-a031-1c920e94bdb4": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "b41f2aca-4395-4b37-bfec-fdbe04578292": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "14ac205f-14d8-45f2-b537-5aafa71096fe": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "86ffb580-7422-420e-8dcc-d891924d573c": ["da2d77db-b7d9-40d4-b923-2bb8dba6d444"], "7d0b283d-f144-4a82-b19e-c2112a913ed5": ["868df082-6495-43dd-a3a9-a3989e022a35"], "0fe16b30-1cf2-4712-a01d-4f6ebe321037": ["868df082-6495-43dd-a3a9-a3989e022a35"], "30bc10d0-919a-4251-8d8d-b540b171b181": ["868df082-6495-43dd-a3a9-a3989e022a35"], "86589cc8-2f3e-4292-a99e-3a491d7a3790": ["868df082-6495-43dd-a3a9-a3989e022a35"], "7bf74331-98cb-4cc6-a850-92ab97ea0e1c": ["868df082-6495-43dd-a3a9-a3989e022a35"], "6a18c1c1-6ac2-497c-a1fe-11422ba30b35": ["868df082-6495-43dd-a3a9-a3989e022a35"], "0af459fc-739a-4dd4-8b24-8e56ff57fd4f": ["868df082-6495-43dd-a3a9-a3989e022a35"], "8d37a2d1-6bd5-4294-8e51-2cadcc1361ac": ["868df082-6495-43dd-a3a9-a3989e022a35"], "d141a6e7-cd0a-4576-a702-58134cba4442": ["868df082-6495-43dd-a3a9-a3989e022a35"], "940504a7-7a40-4be4-be3c-175d09bb3ccb": ["868df082-6495-43dd-a3a9-a3989e022a35"], "2c185373-e28c-400c-a25c-f72dfe3eaa57": ["d40255dc-30f7-4d12-a137-356c9331d036"], "35d3df9b-ca04-45d6-ba61-910781efea5a": ["d40255dc-30f7-4d12-a137-356c9331d036"], "9dc2b214-8912-4afb-9003-08f83e541c16": ["d40255dc-30f7-4d12-a137-356c9331d036"], "a5eacbd0-3001-4167-8925-1156e278be54": ["d40255dc-30f7-4d12-a137-356c9331d036"], "fbb7532d-4e3e-41f8-8fa9-f7b5a02d5cf0": ["d40255dc-30f7-4d12-a137-356c9331d036"], "a9a83f45-168b-4db5-ace9-45e7fc8c91b9": ["d40255dc-30f7-4d12-a137-356c9331d036"], "3d684fd5-d832-42dc-b175-14f788a2e83f": ["d40255dc-30f7-4d12-a137-356c9331d036"], "fd5250fe-6121-425e-b4d1-c8e0bb203d19": ["d40255dc-30f7-4d12-a137-356c9331d036"], "08d9cafd-06aa-44de-ba02-3e37b37a6b17": ["d40255dc-30f7-4d12-a137-356c9331d036"], "98218ea7-de37-4779-8228-a2bbdb9ff990": ["d40255dc-30f7-4d12-a137-356c9331d036"], "3a930566-71ec-465c-b726-c04b4278a904": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "f5324885-70fb-44aa-abcc-bacc277ed26a": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "48616ea8-c86b-4f86-aa5b-e94d1bf34068": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "db5d6aab-79c3-433c-9530-afa6fd56678c": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "adb92956-0666-4e9c-83d6-139c568fedf1": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "00d0dc92-3f12-4a6f-97a5-9bfb57bdd801": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "1391f61d-1d2c-486f-88ab-108403d31a1c": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "1b6fc2b6-eb9c-4f80-9435-aed7e4c8c00b": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "b79e8099-38c5-4a4f-b636-f1054ae0ca4f": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "3fdc8d07-1b55-425f-93f6-e66f1429df36": ["dd4ef4a9-d623-4695-8648-6dc788ce570b"], "a9dc1f08-5014-4341-8489-8745d5cc3daa": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "1b659988-353f-4357-8611-7249beebc92b": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "ddbf64e7-c5b6-434d-8d6e-31d1e5d33d28": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "4a4cbdcc-49b2-4fb2-8c24-812bca763c37": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "1c1c0ea6-3ed1-421c-a55f-e58d82db9618": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "62ddbc5e-ccb4-4713-b294-20cbbd2eb010": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "ffd63cbd-ea55-4aad-aeaa-0dbe6fa096c0": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "483b6c63-303b-4f23-8f34-5b22a4ec2765": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "ac189b5e-6349-44af-bf0f-702798289347": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "8a3d3981-24a8-498a-b509-25543fd6afeb": ["6c472ef1-8115-4732-a5dc-a39c15d5eb94"], "638b3c99-e6cb-49bb-95c2-f267b2d1ec00": ["f469924d-ea0b-4792-976b-4b057974feca"], "f0977b5a-0de8-435b-bfee-e6a457db0442": ["f469924d-ea0b-4792-976b-4b057974feca"], "d2628c21-64b0-4b61-a595-7097a4f233c5": ["f469924d-ea0b-4792-976b-4b057974feca"], "17c050f3-312d-46b8-b066-2bf66ab0142f": ["f469924d-ea0b-4792-976b-4b057974feca"], "1116721c-2698-4938-a6bb-f96c2698380d": ["f469924d-ea0b-4792-976b-4b057974feca"], "68367977-e97f-4f35-a78a-63620dbd4fb8": ["f469924d-ea0b-4792-976b-4b057974feca"], "4770227b-6fd3-4a08-93e2-eac625aa3e2f": ["f469924d-ea0b-4792-976b-4b057974feca"], "a50d0a5a-b180-4209-b057-8a3ce7ca5c8a": ["f469924d-ea0b-4792-976b-4b057974feca"], "e538cb20-92e5-427f-b9ac-cbe7f61c3654": ["f469924d-ea0b-4792-976b-4b057974feca"], "108fa021-85ba-4c80-b094-bc316a5210da": ["f469924d-ea0b-4792-976b-4b057974feca"], "25bfeba9-9dba-40e4-ad97-8c839ca8c51a": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "67f472ad-fa27-4999-b17b-c75cccb0ef3b": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "f40897d1-e74b-4ae9-bed5-132e9360c38f": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "07c1cc8e-f86c-4899-82ed-bf9b470ddf20": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "e43daa73-3e5d-4c9e-a3ca-075dec799ed3": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "ac6364d5-5041-446d-b5d7-e6d6fc9fa97b": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "eae909c9-9200-428d-9d2d-0cf8ae39ea48": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "e04d6574-6d9f-42a2-bf82-086109b4bfec": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "43545e6a-0aa6-4c7c-89f0-464c8c0cb1f9": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "9ad47d0f-1d88-4ffb-b427-4f8ecaeaee01": ["e072ffd2-a697-41f5-ae4b-0ee596f98d0a"], "5612aea9-1a55-450a-830f-c05c88a34e6e": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "69417cf7-74ea-4fe6-bb4e-c4424621c4e4": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "85b1f1c5-fb0a-43ae-ab3f-703b8aa0663b": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "1d2c36f2-6923-4498-8504-c5c93c6eec30": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "4d3e7bb9-45c2-4918-ad20-9225ef78885c": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "886bdf88-e7ed-48b2-8616-3b50743a28b8": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "f6f13394-12e4-41df-8d73-8f8a59b625c8": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "7b479921-3a77-4f29-8406-2f7b03ab8878": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "c60ab7a5-60a1-49f6-b20a-5707d699cb8e": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "32fe7223-3f84-4ef2-a178-0fba0a0e178f": ["c02ee20e-7730-4a17-ae99-3ef97db1ac6d"], "19b3ef87-4631-4218-ba95-80b2c105535a": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "005c7c98-75a6-4242-878e-530c529e641f": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "1ba49c65-ac05-43c5-806b-21171820c4da": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "deebad53-75a3-40d9-83f0-b09ff89b0b35": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "04ad04d2-396e-4701-a6e9-2422c69accc7": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "d70da12d-584e-4e52-9eae-1f9e2f62e745": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "56387c3a-2259-48da-88f8-d5890f89f39d": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "857b6f51-d048-46ae-85a7-5495bb30553f": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "8ae7d54f-19dc-4a4f-9065-c5993f18db14": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "fe8e6119-c018-4ce9-9b76-272740890ecc": ["4c667dca-e6ab-4029-9e45-9e9f70dee4c1"], "b1a35db2-e2c3-411a-9942-84f2ce3db112": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "985b4d2d-1090-4510-b623-7cd70b25fb56": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "a0eb8337-f777-4726-a13b-d7ec625951a6": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "6c03ed54-6ab0-4045-a251-0dce83ca395f": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "402b5f7a-b09f-487b-b5a9-72eced264252": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "18dc0b62-513e-4205-8429-d0f22c15a613": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "8ce49ea6-adba-418b-a59c-9007b6ae81da": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "f8ab2a45-69cd-4dc5-a52f-d340a7e463c6": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "5e9092a5-7669-45cc-af06-639af0388ab2": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "d829fb79-fb10-4f92-b38b-e155bb3fcee3": ["b8c15525-e535-4798-9d4a-7c596b7a91d3"], "a023b48b-2c54-40ef-8767-5d3f03fe2914": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "33fa9547-975a-4352-8f53-81d66f44c49f": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "a6fa8d04-3c9b-457d-88fd-dfa23ce40ff3": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "8f6d5636-40eb-47a8-ad69-b87dfebea48d": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "a71268a9-4141-4a0e-b1fa-0076d05cd42e": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "691fbe00-89ad-498d-8d41-323cf1e97661": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "e6e14601-db3d-4f7d-8676-d4eab8ecb25a": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "7068ff06-d301-4011-b487-7cfba94a4861": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "e8dab0e4-d104-4378-a736-39d0227294b1": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "aca5d4a8-7b84-4056-b0e6-e04d36ece68b": ["ddd2f334-6ef8-42e7-9807-2ca32d1cfa58"], "6897739c-d427-406f-ad1d-d7c4c77294d3": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "e0dacf35-3db5-49c4-9766-2c3dad84baab": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "226859d7-3332-4379-8494-f26272702d7d": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "b791d661-0b16-4057-b391-7d89660c8a6d": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "18041e17-7026-4c7d-9458-03b95bed2260": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "efafd5fa-5e44-473a-89e5-972ae57c28f2": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "f6f5b288-0e56-4759-a22e-70b60f145993": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "06185446-c806-4fde-a2bc-435e67f172e3": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "e23e338c-f249-4c47-9177-bde54d129a27": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "20366931-36fc-4932-be26-4a2236da5849": ["2df14ea9-2d95-4d2a-a898-51924df1d539"], "c02e5bf5-5f98-4e9a-9951-b476de779a36": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "67fb8dc5-8f12-4f4c-b3dc-891c8219e312": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "6df4dda4-63e2-45ab-82e3-2ad196f97ad3": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "1626e1c6-eaaf-458a-9d62-6539ba4af917": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "ade6ed4e-ae4a-4354-9696-9be41db683dc": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "c37cf334-7c3f-4151-88b8-baf4569164a2": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "8ccf5159-8cbc-4786-9c65-58c3df77213f": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "91601426-b8bf-4362-b7fe-360c419ef972": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "822d55ee-6b63-4714-a957-a43dc8d843c9": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "284bee81-fb81-49a5-bbe0-49ef74483c20": ["9e6e5d45-3bc3-48da-b43d-f8038b9cde7b"], "bfa377f7-0ab2-4134-b646-98d6866d752d": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "bd0492b6-4b03-4c9c-af31-313a235c4c93": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "8eebfbca-8240-4861-bd05-6a53ad7ccd8f": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "3ceb3870-78e8-4ec0-9491-241fccb93ec5": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "7d63dee3-6d48-4393-8be6-8480ce6ced6d": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "e027c23e-9be3-40d9-a43f-99b9cc85df42": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "eaf96d9f-b112-43f1-ac2a-15c56c9e2ed8": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "4752b74b-3b45-4293-84a6-9129151862be": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "c98a9050-6d52-4601-8554-821c0659bcb8": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "015b7ed6-dfc9-419c-b332-8d22138cadfb": ["baca4be8-4a8b-4a79-9934-69c4047b87fb"], "b4881d3d-9b06-4fa0-94ca-6df2782ac2fd": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "c187cc72-ec40-4262-a0a8-dd0a7d8236a5": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "ad51c1b1-347d-4f10-b264-363ddcdfa938": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "a7fc40f7-d119-4980-b911-ad106e77b51e": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "102600d4-12bd-4196-b867-7b5b9f746130": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "307c1ef5-e97b-48be-a89d-57c5db8c3e6b": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "761bbb97-b978-43a2-8f4c-50adc790fd0b": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "9e565cf7-9f25-43f9-9c04-55f723fcfde1": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "b14dae2d-32f4-43d6-80c9-6f7752b5ddba": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "b976da61-57a7-4e5f-a8f0-1328f4cd25f4": ["39c0388e-6071-4e28-a76a-89954c7a76cd"], "a21ab356-0089-4dbf-b788-2b90e85ee801": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "b444448c-39c6-4825-a428-c35b6541fd7a": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "af11b4d9-49c2-4c1e-a752-6ee9983d2405": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "667c8aaf-0338-4557-83a6-11d3be7ef5b8": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "51f1057f-cd68-4ea1-bf25-3d2e6c150aad": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "efd5daa3-ad2f-403d-9faa-cfc0f273c6bc": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "cb42a96f-6b88-4a1d-933c-4b66bf56803e": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "54e478ee-901e-4bfd-b77b-ae6dbf060187": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "0c29d394-6d79-42ea-ac56-98ef3e0c1ddd": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "334d816c-6309-49be-8b84-552c577f1ecc": ["6616086f-0d37-4a95-8d4e-56b9a0c20ae7"], "ef292182-a80e-4c2e-8112-92ec138b322a": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "9a613a68-8a96-44f3-924a-fd86cb2bd642": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "21861de8-59d7-4dbe-a656-594afc0c20c0": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "1744f37c-51db-4c5a-b1a0-80edb5bd1d47": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "4bc70be6-98e2-4044-81b3-a337497b444d": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "73b8b746-f1dc-41ac-8aec-5465b68d6593": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "55b0124a-dcc6-42f6-b033-337a237adf54": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "96eddcb3-606d-4fcf-8ecb-1aabb6410965": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "7c0b2c60-6f69-4f22-bb40-b69c5ea6020d": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "fc8a6a87-f230-4bb1-bcbc-b84d5e624924": ["c7489ca0-62a7-4494-9c38-dc8c208a5226"], "586aeb54-40fd-4a75-a7c0-246e11eb3ae8": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "aa626c2d-93f9-4ce6-a685-92c1a419dc4e": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "3e4a7bfc-a423-403b-aecb-603a13cbe16b": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "dccc7390-abc6-409c-835a-9ebe4f7d0b1d": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "731906bd-9376-465c-885b-47262c0ac060": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "a869d0fb-d027-4edc-be16-ca89dad5107d": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "05fba6f9-2740-4b27-beb4-8b707e3fa913": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "14a3553d-b4f0-4a82-82fd-f4ded8de7679": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "53598710-70e3-4180-bfa6-20045b17b328": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "d156ee61-a192-4285-85eb-3e0f4cd4eb13": ["80737253-4302-451d-a2f5-0b3f5dd957e8"], "6aceb768-3368-49cb-90f3-790b8d4131a3": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "13caf8a9-5882-4525-8431-f068b49ab01c": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "0a4c99ed-0f3b-4ec0-b9c8-91d7a35b593c": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "7a7a8144-8fa6-4ca8-a505-9a5781f7850e": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "cb3616bd-f4d8-4795-866d-04c8fe1a8b49": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "77e9d110-209d-445e-8b11-16e0f2209022": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "ad18f30e-051e-4a17-aa6a-1192c3cac7a8": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "4d0345e3-6423-4930-96b7-564bf85f7af2": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "af6f7cb4-a265-408b-a84b-4b3fee1fde06": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "0718d3de-69b5-45f0-942f-79426b4d5ad4": ["0dbf8ebc-7f28-4e8b-b047-5c5399e261da"], "3e3a23a7-0b1f-4b74-85bd-a73370b108e6": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "c4cf1592-0435-4ae4-8a4c-c5ecb9257eae": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "d6c51c91-05a7-402e-893e-5fc29584968b": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "e0587b4e-ddbf-4754-8557-bd12c8e644da": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "2544321b-4f79-413c-9a51-2b0b5346a1b3": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "9011cc3e-7c14-4371-99d2-06a06ec529e7": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "0048257b-c29b-4594-829c-28fc43d6a999": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "99ba8f01-a9f5-4945-bc6b-2c290ee16f8f": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "78f31d73-92bf-4caf-ade9-8bed953bdfa5": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "33b17f33-82da-409e-8c46-2ba705142a99": ["73ffeb66-1a26-4495-82ee-3a48226ba7f9"], "8d45f0a9-2470-406e-845a-42ae6faefaaf": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "e1b36541-9713-4537-8ae0-68c508ee31cb": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "9761ef9d-05ad-4eb6-84fe-a3e40e15721f": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "6f912bc7-a00d-4201-a5ed-67ad675fb196": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "d653daf1-bc71-4825-b8c2-d7a2bbbad861": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "c65c29ab-b699-4074-98c3-1d194961096b": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "2a5dd66d-4d91-48e5-b4d1-0e97ead849bc": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "7ce28472-4d89-4da8-9e89-4fc46c2a13c5": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "afa13ecd-3449-42b8-90ba-ea5b76fb4649": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "83bfcfa0-0f58-42c0-b0fe-8d24efe51b48": ["b85154a6-7baf-4d71-a85b-7d5874470889"], "efd3986d-bd53-4b00-8759-234ee59b5935": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "b9552078-c814-4194-ad10-5170b3d76f76": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "24d775f9-adea-4036-a91b-2b8dbdf09f8d": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "dbef7a7c-d59a-44a9-85d0-0922c437187c": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "06d81d53-f1b8-45b1-9d99-aad6159cf2f1": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "3e1a2639-ad92-41c2-b64f-74852b3978d5": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "137541ed-d9cd-4d7e-b4eb-09e87ee778a6": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "f54b5a57-e54a-4eb4-b5ea-3861f01c7a4a": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "dc89246f-d70d-485b-a723-08770bd02f19": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "9a719c6a-76c4-4af6-ae8c-1028bbe8568e": ["7ff9ca89-d042-4d93-86ca-c819ce96f571"], "2f7c682f-8e96-43da-8397-67ce42d16885": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "fdb90e56-c634-47ef-8f82-cf244064da4f": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "03bc26cf-d584-4f6c-952f-6da051e2ec27": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "b36444b8-7e2e-4cc0-923b-d70acd084d36": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "a8d11191-6908-46c0-9ccd-5526ac9ac777": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "4e4d21bb-0a0d-49fd-948f-d1510ecdda24": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "4ba397c0-1b13-4613-9e30-01238dcdb761": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "c38be416-9682-4d97-8adc-600dfa5d5218": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "d8c53a02-255b-4261-813a-b50fa08cd969": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "7568d3e5-b407-4b82-a31a-2d7048871862": ["9ce6023d-f47a-4508-b851-7bc92ec92e5d"], "fe6be310-abb6-4d17-b332-42b573870741": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "4da20bc3-35b4-40d8-8699-3a01d413ac86": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "e3b5fbce-4994-4bee-bfde-419e661a8a25": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "e6f6d4ff-5bc4-4809-aa55-4639adf8fbe5": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "f4c81b62-7898-4380-bbe0-2e9ef2568424": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "1270cceb-5232-47d9-b785-280f19ce1fb4": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "77d31311-acc9-4a0a-9ff7-8cff6cda43c8": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "6409d026-b4de-4585-b70c-a6aa6203ecdb": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "e6682ed3-e17b-47ae-9d07-f50e8f6c2df3": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "13d62abf-9a46-4079-9a23-4ed18c3d22a9": ["09e1411f-8588-40fa-9a33-66cb9e9a8933"], "e4777246-3922-49ed-a2cb-5205495a2534": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "0f7bdfbc-7ab6-4861-8efd-a3729e4128ad": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "3fbd4df7-eb9c-4760-afb0-2f341544e453": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "9861991b-cae5-4ecc-952d-0c2a6be10285": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "4219fb8a-0ac8-4ecb-9d80-7ff321f4cd59": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "74b3d860-da7f-439b-8e61-5c69ac15c28b": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "bf920e1a-ff07-4d59-b892-8609aa91fdce": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "19f1343a-292b-4bb5-8567-c39e272ec0ae": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "8e6fc90c-607d-4e5f-8946-0f2b984a4975": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "e2f0625f-8931-45c4-b593-bb284f09ddd2": ["a1d58349-50d4-45c7-922b-3fb722db0852"], "7a5e1a08-397a-440e-8f6c-1af66ee702d0": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "75e53249-b463-44d5-83be-bdd902fc92ce": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "aa76d60e-073a-408e-bf7f-44ee6a8d0a9b": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "66ee0108-3649-4abc-95cd-c102ba9c3451": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "67c464c5-77f9-4875-8e31-8402a617f457": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "2b18ac77-f66e-449f-bb8c-641d6d6e522c": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "a8055010-820e-4e2a-acbd-4363d93ffeef": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "b1c0e6a1-f07d-4e7e-a97c-2264522b8bcc": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "421c1f70-89d3-45a7-92c0-6b6fd7393f78": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "41710f51-0c31-4835-a239-bba06e0b5003": ["9414a260-83d0-4bdc-8083-68512cb56f75"], "e9ef61d9-acfb-4295-8166-81d2d8e29cd1": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "63251280-da3b-4d49-9e3b-8b79726b8e55": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "c739a52b-1410-4168-82eb-52163e1c9cc2": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "7da3dff9-04d3-4cea-814c-cd1c71e8f2fa": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "70a71142-ccc1-46c3-85cb-f7e5573d1ea0": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "b1801b81-b269-4946-9f2f-f3bd71107763": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "22b9c165-4460-449c-a296-6369c392e632": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "f40a3247-588b-4f4a-8c27-0234b1af2a95": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "55315bb2-f065-44ae-bc39-ffa0fe34eb65": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "f2e63c22-7347-47c9-b0a7-ff7f363ad3c1": ["e2a000b0-29a7-49a1-9c87-de674486a78a"], "9128749a-0d1d-42e5-94c3-9b820c40125b": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "aab3924b-582c-4dc6-9118-49c1c8b51b5a": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "aa7f061b-c839-40ea-ba44-f24f4ab55467": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "bd8aaa1f-1382-47e3-9ce1-09f9e7164ebc": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "048678e4-9c45-4540-a0c6-72c404e2106b": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "51ba72f1-b32c-479c-820e-2d02252ecc21": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "4ec83564-814d-4175-8f0f-704d3e58d2f7": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "3569ca2c-24e6-4785-ba6e-317a3d302202": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "2869107a-ce0c-41b3-8508-c60d12c31fe7": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "cc253ba2-15d7-47dc-a233-8bc4a26cdedc": ["5f733903-c4f4-41c6-8410-033c4e6b3390"], "153cebaa-730a-43c5-99b7-10c18bec1538": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "dba1f4e2-98a6-4d42-b384-1a54e1d6ad1c": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "becc940f-e73e-4aa7-8581-f1ba6def41f0": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "7a90fb5d-f437-4b59-9931-c0af0ec35339": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "43e9e772-adf1-40b7-9b1a-9fc44772e900": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "c2a5af10-b86f-4924-8010-b80c2782b006": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "6e36882c-0b7c-4f22-a9d0-560ddc1859f5": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "7e0b3a69-208b-4140-8f56-b1041396d409": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "7968a9ac-bfaf-4a59-bb35-1125136d6a96": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "c942dcb0-ebf6-4dc8-aef1-d3b7642ef3c5": ["9278423d-7d2c-4d91-8e74-2ff10c9952b1"], "aab11cf2-1200-420c-a291-040945c00aad": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "96ade2c4-dc96-4d8d-ab36-a0860916d265": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "1b1d4db6-e59f-4967-9a3b-17ad799ac4b4": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "ca0cdce9-6039-460a-8ebc-0d2da442b1a8": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "0df51587-f84b-43eb-9c03-341d92892edd": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "add464c4-0e7d-4348-91dc-15a25c5a309d": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "36e32c02-9ee0-485c-8d4c-110273810e28": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "877d24bb-418d-4075-b1e9-a626881ec675": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "3da8d02e-d37f-4c4d-a06c-1f61edd78d4b": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "841e4207-02f3-4f15-b3cc-0574eccf296b": ["c8290368-e6cc-4c11-86e8-211b0f6ff429"], "b4cfa32e-3647-4808-9718-9b77c8956ff5": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "34a1099f-5eb1-4cdb-bd14-0904a0d6883c": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "baf6bb09-9e41-48d9-b4c6-1d06d90b9bb2": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "fd7da839-a0e8-4f92-a7fd-2e894b6e5c04": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "a81b08ef-f745-43dc-8405-02f77e2ebee0": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "a3e89095-24b0-4e5b-957d-ac24b193a418": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "bf278d53-8d4e-44d1-a215-6464076e0e89": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "52b91d4f-d324-4333-8d00-e513e1af51b5": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "e8955df1-dc4d-47af-b782-993b5b795595": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "5cbad06d-9274-443a-8465-7a7347fd1bb8": ["92689676-f99d-4f3a-aedb-82a851140cf7"], "098c6a18-502f-48ec-999e-13d995b28661": ["1ef77bcd-1235-4349-a57f-76304c020694"], "699f9227-0a04-4b34-9fbb-bcdabe65806c": ["1ef77bcd-1235-4349-a57f-76304c020694"], "da24d51e-024a-422a-976c-cbe53754d0ff": ["1ef77bcd-1235-4349-a57f-76304c020694"], "d984a686-20d3-4998-bddb-d15ef2a55d67": ["1ef77bcd-1235-4349-a57f-76304c020694"], "7769d886-01e5-43d6-8808-a34ed6787c48": ["1ef77bcd-1235-4349-a57f-76304c020694"], "3f2e20af-2d2f-40e2-9c9d-5f447f5fd470": ["1ef77bcd-1235-4349-a57f-76304c020694"], "8cc369e6-5dbf-4195-bdc7-59d477935e01": ["1ef77bcd-1235-4349-a57f-76304c020694"], "2a846355-ce0a-4a42-bf39-c58e28255baf": ["1ef77bcd-1235-4349-a57f-76304c020694"], "97341ca2-a8ed-46fd-8b73-27cf49a776db": ["1ef77bcd-1235-4349-a57f-76304c020694"], "ed70d285-bd85-4086-a63a-22c25a560082": ["1ef77bcd-1235-4349-a57f-76304c020694"], "8c7fb32c-c935-4651-8283-e8c472a0b365": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "78c6ff01-8630-4cd4-a808-4a934eacb25f": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "cd574726-80a1-47a9-b260-413ed3f52311": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "85722140-1b7b-46da-9ec6-432923b12175": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "eded288b-f225-4e3b-be1d-6732eeceebb3": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "d91e1537-de8d-491c-b125-765de2f2afe8": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "c91b6fc5-1d16-48e6-b88c-8dd1e3af6559": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "3cd4fd6f-9208-4ceb-a69d-dc920993a98d": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "e138b6b8-2968-4bab-8d4a-edca29460bcd": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "1c59e241-7137-41cc-946d-c1cb0d30a4b1": ["2d23ffb7-65db-44d5-bb6b-31fffc00e7bc"], "a4b6ef42-b7bd-444a-970f-c4708415a0ea": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "6b536f29-aa04-4434-b133-bb4e20ecd6f1": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "bde91ad8-b00a-4c6b-ab35-cbc33641a8a4": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "6fdb5ca1-7096-4613-ad70-ef413dcf51b2": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "5d29075f-46e0-4828-b712-657535007fca": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "3b68fd1d-60d7-4479-bc67-e72dc34b2baf": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "b8bcfc18-dfb7-45f2-94fb-686ffb671400": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "448bc715-abc3-4d6f-a386-a2e6c48b434f": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "35077bce-fc70-4f73-9e85-bd50ea0b4421": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "b299ae9b-dd09-4132-a8d7-685ac966b912": ["47d45fac-167f-4abe-85ba-c592dfc54e76"], "26969f38-1ae0-4596-8d89-e2796ab42cc5": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "4fcf7347-858d-4761-95f8-19fb1af11339": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "589055f4-8d10-4b06-b68f-9cfd4048cefe": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "40defdf1-6880-44b7-a1ac-b88f466b074a": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "8f3e8e77-90af-4d0d-8dd3-5d5f2e614d74": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "ca4886da-af0a-420f-b881-1207c2a9181b": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "c13ec0d2-6f92-4b7d-8743-62c761213c6e": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "b1ec710f-242e-4b53-a427-76970b24a311": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "edc582fd-2c53-4a88-ac44-3c229de7085a": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "f9e0f746-0407-4024-882b-ca132aac8938": ["ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5"], "547e953c-3c15-4ee0-b243-3c2761a19028": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "8787fe19-eb36-4f63-8355-085d09e9c966": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "4558a005-edd2-4a8a-a3a1-96e8a91bdc99": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "905133de-f72a-42a8-9a2f-e194bee80d01": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "0093d7ea-7e1a-4cad-a33f-26434d5e9417": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "d802150d-0458-44ad-94e6-d48a2bafddc7": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "3dec342d-89f1-48ea-b474-124dabac2778": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "26e9fdf4-b265-44ac-aab5-a4811434f00b": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "e9a896b0-946f-4635-90a0-2abae8017e85": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "2a1a746c-d31f-4062-9339-f652ee1cd0fc": ["f5b7c80b-9987-4c1a-90b7-40725e445d68"], "4adc70dc-e01b-4a8d-bd8c-adef4690717c": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "81c46268-ad7f-45a4-ab44-b39eec645df2": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "6c21060a-4d83-46b3-a846-697b9a09c91b": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "7753fc6c-a2dd-4e2f-9694-1ad7f0d6c64f": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "ed7e713d-1382-464c-aaa9-eb64bdc88fbc": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "26951caa-8dbb-4149-81de-40d042c190ba": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "19fd9fc0-5705-4c8f-9471-301741097255": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "1d8d1efe-bee3-41c4-a9a1-549978df9821": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "c886eb12-d921-44b8-8452-f1f5bc1225f4": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "0a84141f-d8b5-468a-a16b-93a9a0a47587": ["1ad6e133-af0d-462b-8fcf-c037919602b6"], "42804d52-5b54-4e4a-aa0b-0f295b8b73bb": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "beed6021-c77f-4dd5-b636-dddc90934522": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "7f734e3f-9eb5-4d60-99e6-c12b8c79fe06": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "b5c02363-613b-4083-a3a5-ae724917ecbb": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "84e252fa-dec3-4ec1-9b3d-a0e88f6a8b55": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "fd9556ae-448f-4d3d-af10-8089d6b043b8": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "e580a445-024f-4b81-a591-c79fa6f7fb0a": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "ee7f5729-19f1-41d3-ba40-12390626207d": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "0b0819ad-858f-4cc2-9a1c-41f9c77d93d6": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "3850de5f-db60-43a0-94d4-63536424c707": ["3c014cdb-93a6-4ced-a0b2-4da9ff5e1441"], "dcec4fe7-764a-4c8a-b07f-90fcbb7708ba": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "e9fb9f53-24c3-4408-8e9d-508409b7f92e": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "1c6c655d-fa19-4fa2-8143-ea52a0a44611": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "97179ce5-9493-4f29-90be-01818f234c9c": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "10be3d49-59a6-4675-8216-5d2a7bb76336": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "1b29da2d-0fb6-41d6-8e7a-38493f5b169f": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "43d4dced-d40b-4a55-9481-6f383f7be02f": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "5f442e2b-07cc-469f-ac6d-9e468c48364d": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "ed225928-3594-4648-8270-ea357b99ac8f": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "2ac2ed73-c6e1-4af9-8a79-5c62a839798c": ["cf73a083-880a-4afc-86b6-5667ae73d419"], "fb32bc70-21c5-4841-beea-62de5c884172": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "a012d748-c92a-4b5c-ba2d-b54a3914756e": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "37d2418c-90ce-43c2-809f-d6c0c96ef7c3": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "8af40417-015a-48e0-965b-fc636209e614": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "805a7e12-3609-48b5-8320-157d4b583f8e": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "baa6b25d-2b92-4536-9b88-465f65d6d2fe": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "4e4d7968-ecd4-4428-9b66-54567b3811a3": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "7e5eef6e-1818-4a3c-941b-7cbfd29ab0a4": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "4ed36e1b-43e7-48b4-a4cc-ef3087d2246b": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "5d208543-8d35-4012-8c41-f13b2978eae6": ["cb37af2e-5517-4a81-8049-07d3d61ad295"], "a8dacfd0-ba17-40c9-b350-208d2e297cd2": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "32da3eaf-6181-4aab-a4f0-41e9f8e2c415": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "f713140e-cb77-498a-b734-2e3226c38cfa": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "5aa920d1-a55e-40d6-99a9-e31c968672cf": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "3ebe14d9-c0f7-44b2-afb8-cd0da676e8be": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "32b1ece3-14d1-4487-853e-c72b7d522734": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "5dd34d24-f3ea-4aa7-b42b-0f26e197f095": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "3f3496fe-0f67-405f-9bec-0181454b415d": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "98b6ca14-c6f1-4733-90a9-e8fcaac3e450": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "dd5071c9-0180-4916-b959-c6a07c56db03": ["2183508f-9a9e-41b1-a517-f2fc17931b63"], "aa2070d5-b6bc-4073-a4cb-b2a83531da3c": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "ca96d7d3-1320-4416-8cdd-7963833fa563": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "ab6fdd15-c2c9-4c64-9836-5410506b43a7": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "cc9ce0e0-cd3b-452e-97cd-67a557ba37e0": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "31087be7-62d7-4c98-a630-dd1d81d1181b": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "8afd8443-7a29-410c-96b7-f41aea383947": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "4a65f47c-1889-45a4-9ff3-a8bf32191dba": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "8cda0b73-a4d4-4d29-9816-3f0cf0bbc8b4": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "b25eb9c4-f4bf-4134-b0d3-e134b79ace2d": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "27b484ed-f229-4b08-be96-75c3abc5f8a3": ["39d7af6a-7298-47bc-a608-55c4a9d09d31"], "b3f70511-7f40-4e59-81c3-ea17760970fe": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "6541a0ad-b207-4b2d-a359-3650fafa8943": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "669d0023-9a92-4a21-b449-8fa1a6af2b88": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "a31427f5-6815-40aa-9f6e-73e09d22a931": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "7084c993-fcea-49d4-b9d7-2a857b9f45df": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "e6cc306a-b275-43af-a0a7-799fe2f76eb9": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "f593dd60-a1e1-4ced-b812-c96907870b2a": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "c8be6a16-64b0-4b54-98c7-079cb7ec7717": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "b5e8f4bf-d285-47ed-945d-af92d42d30ff": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "8e4c0306-cc08-4518-83d4-6f690dd17d67": ["7ee6f299-9e32-45b6-8a95-f63af7ab302d"], "a380299e-40fa-430f-992f-328134b228ce": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "7cfa0b20-7c5f-4732-bbfb-bb227472a8c9": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "fd5786c9-4268-4527-8e19-6afd150f1710": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "2ce45131-027f-4fee-8260-1dc2b57ba04e": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "8c04225d-b7a3-4ebc-825e-8b08770983d5": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "e0ebe468-8026-4719-8e3c-8f244e4b01e9": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "75212bde-5e1c-41e8-98c8-52a538a82b6a": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "e1c0318c-d858-4aed-b2c0-ef0e1203fda2": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "a96ba846-68e3-499f-be1d-9a65c95f7103": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "0ad0778a-9494-4e3e-af88-c4694c4eddcc": ["56387bf7-3970-47f2-a78c-1d685ff6ff3c"], "f974b90c-f825-4ac8-b640-a6316a1d595e": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "1fbd890e-3037-4f08-8ca6-d2ccc5b997e6": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "77cb5cc4-479c-46ed-b80f-f461753002f9": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "6dba3c3f-8828-4ed8-a5aa-eacdf7a2c278": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "d9669bfc-ca24-4606-a9da-8cdef4498029": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "decf6dd1-9ab9-49ac-b195-232ac4b5b5fb": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "e7207f21-6721-4ef3-9f8b-fcd88334e364": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "8db3af9c-f69b-43a8-8614-e16adacd3ea1": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "30fbacfb-b9b9-4404-99ca-aa7dd91fb39f": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "491c9db9-eec0-45a7-8d35-3b865c6cf096": ["9d4dae2d-1304-42fd-853c-5e8a87dfd8ca"], "d5096416-f388-46a8-ba7c-efe00591a79f": ["225dc155-5855-48c7-8617-a254366cfdbd"], "047f16f9-4bbb-46e2-bc04-02caae32fad3": ["225dc155-5855-48c7-8617-a254366cfdbd"], "cc1bd459-a505-4eb3-bb21-c7ab7b777125": ["225dc155-5855-48c7-8617-a254366cfdbd"], "6a4f1ce1-50e2-4a89-94f5-90f724c34e60": ["225dc155-5855-48c7-8617-a254366cfdbd"], "c758fef6-ab25-497e-8fcf-971551bff0b3": ["225dc155-5855-48c7-8617-a254366cfdbd"], "a39fde86-c926-4df1-9cf1-2c15908c1429": ["225dc155-5855-48c7-8617-a254366cfdbd"], "9c9c9015-c691-44e8-9284-f62ef5306243": ["225dc155-5855-48c7-8617-a254366cfdbd"], "417326fd-a953-4bf5-a161-0e5892bb974e": ["225dc155-5855-48c7-8617-a254366cfdbd"], "d89c4058-3ac5-421d-b581-879d16d5de44": ["225dc155-5855-48c7-8617-a254366cfdbd"], "8e1c792b-4ced-4778-8949-6daa22e033fc": ["225dc155-5855-48c7-8617-a254366cfdbd"], "2ea244fd-8aca-4018-b8b0-223714fba699": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "2fb245a9-6b49-454b-9ab8-d94912761aeb": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "dd48491b-a737-4616-be6a-745b00f5915b": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "f027880e-a213-48a4-9139-9393e699642e": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "c704d4cf-6b24-44ed-9113-aa42008d5f83": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "366bacdd-bb77-47f2-a39c-c7bf6169f8b7": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "25533810-cb28-4c5b-9792-7183df9cc659": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "26e99d87-0a41-4952-ae1b-35a9302a5920": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "907ab49a-3d4c-4679-8038-f781b13ee780": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "aaa30f81-e6c7-4103-8a63-152c74a2b091": ["4b9f6363-19fe-4e5d-b658-d8ffcacc03d5"], "b0182517-507f-402c-9d72-7b621f64e5b4": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "1c9c675b-2d74-4673-9aa9-cb84ac14fa32": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "6d42e83b-e788-4ab9-a002-cd545c1513d7": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "b6530c27-37b3-4728-bac0-f9d4473dbdbf": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "9179505b-25f1-43ea-8329-1f120ce64c61": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "d7d542fb-ce5a-4048-8f5f-b650b4c35bb2": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "e1052cc7-4c2d-41cb-92c3-998f3211584f": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "f0f3d3f7-c4a4-4fb7-acfc-b992f475242a": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "235fe3e8-a203-480d-b7b7-9f411ea2d613": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "46d01f66-a474-4e8e-ac51-c8d522ef006c": ["f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5"], "874107ea-6ed8-441f-968f-3704b981bf64": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "2cdd6080-bdc8-4e6c-b474-90b02b47b693": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "a0fb147c-38c0-4fef-9b75-18e76cf8a420": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "df1759f8-c9a0-4abf-9108-e0f81494dbaf": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "1cfd9941-237a-46a0-be9f-b60e2511126e": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "4d85eb98-cd48-4fdc-80d4-2923f78551af": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "3e96a9b6-2c8d-490f-906a-bf61de6087dd": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "55df2562-e98c-4baf-a911-341e5304f4a6": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "364d50e7-0d67-4aee-b2a4-4e278b9be1a9": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "759c5df7-8c61-4805-b255-184f63b68e04": ["1509d387-1fa9-4049-ade6-b412686b43fd"], "cf41653e-3c18-45ed-a00a-19afbd790804": ["66b30463-4011-43f5-8112-826208de6c25"], "8ae367c1-1ed9-4fe1-b0c5-25212e409675": ["66b30463-4011-43f5-8112-826208de6c25"], "624e6325-9549-4672-912d-2863775ecf64": ["66b30463-4011-43f5-8112-826208de6c25"], "c0e48c62-1775-4503-bc52-2b14d98033f4": ["66b30463-4011-43f5-8112-826208de6c25"], "8c95bd57-9819-4866-946c-34d2b7751cb5": ["66b30463-4011-43f5-8112-826208de6c25"], "9134a063-6c7d-43f6-b9f4-d7e0712c9e59": ["66b30463-4011-43f5-8112-826208de6c25"], "afa24cd5-0d80-4e11-bb7f-28b1ef159c0d": ["66b30463-4011-43f5-8112-826208de6c25"], "07d3959c-fd15-441f-a064-d94b454936be": ["66b30463-4011-43f5-8112-826208de6c25"], "accc5413-f9c8-492e-954c-db2935a83a8d": ["66b30463-4011-43f5-8112-826208de6c25"], "154a4887-57d4-48b2-a0e2-f01ef4a05f25": ["66b30463-4011-43f5-8112-826208de6c25"], "51d26025-f397-4282-ab4c-e614b1909136": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "e8ca0409-014b-43ca-aa66-18baf30c5856": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "618930f7-ea49-4631-8ec5-748399ab123e": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "0e830dc5-978e-40ef-b148-0701cdbac3f2": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "82ab0ec3-0073-41f1-a863-66c4ff3d8b4b": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "e774a66c-375b-45ad-8d72-28706054193e": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "babb4c10-950c-47c0-8a65-383a64edc068": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "30032779-8c22-4bdf-9a98-a280bd457baa": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "490002ec-c8f2-49a0-aca4-451a75374463": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "b9f8fe8e-7ff3-4431-89f5-73948788cb99": ["182ea1c4-9823-4498-8afb-5fed66f8afa4"], "787f8222-d352-49ea-b919-7db70989aa72": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "4172ceb7-f347-40ad-8c68-0fe2cc0a23e7": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "f4bfb069-4ab5-4b30-81a1-2ec30f5ec8ac": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "b419532f-ec57-4460-bf35-fcdb757ee380": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "a3239f78-3895-4866-83da-fa3a4f26ff43": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "a349e851-d625-49e2-a919-8040f988ec05": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "055cba72-28db-40d2-99ce-d761f2ee85b1": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "2bc860b5-65ed-47ff-800d-bace4aa01b70": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "4b57ffb5-80b3-461f-a283-70e5cf3971bd": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "cbad517a-de7d-4da6-9786-47e6dd684d94": ["4fa78531-4c21-4163-9d75-1fbae52d1d1e"], "2d9f919a-f792-4b1b-a10e-a0f578dfc7cf": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "476325e4-24b4-429b-9788-e84684fab8a1": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "6d1dbac6-21e1-487b-b666-a788c3d485db": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "58543296-0157-4a5a-9ddc-522f60ee6867": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "e9b7254b-b94e-4e18-a453-c58d8436079a": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "6a69d169-7541-4d51-93a8-35fb4587ee8e": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "ccca527c-b69c-4c41-b2ea-9aff9b44255f": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "c66d08da-f096-41ae-8483-61507f48f6c0": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "a2e8cb0d-b35c-4690-bb95-38babd0775ce": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "087fbc2e-973a-4823-936b-0d0243f5c688": ["dae39aa6-9cbc-4da4-8222-ceafa8a0a43d"], "3bcb7050-a5e3-4025-86e9-b9c7a3eb2196": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "0bc4535d-2b67-4804-be77-c8734789e122": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "c73bd3d7-f752-4f5f-9676-e8acebd65d4c": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "fdd55fcd-d79b-4a87-ae51-09c717172f37": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "3102119a-0e38-44ee-bcfc-a58a411db0ef": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "7b74d871-b727-476b-99de-f23821860093": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "5ef4015c-16b3-4fc4-983b-5266130165bd": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "e587f1fa-cbd2-454a-85bb-44b757a0046f": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "48910454-45b0-4445-9366-f056ee60b60b": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "7c2d9a6a-5497-4fdd-a880-598d0303db6e": ["33c2c28e-8966-4c61-8bd6-cc54ebe272fd"], "b252c530-ffef-4684-adb9-1fadc10df9a1": ["f926ae48-8dda-479c-9348-124b494c0446"], "0c235183-3b08-4ff7-9116-62f623af1175": ["f926ae48-8dda-479c-9348-124b494c0446"], "3d779012-1e50-47b9-a393-b2536d277ca6": ["f926ae48-8dda-479c-9348-124b494c0446"], "bb8eff08-42f2-4110-a840-121f230e4c81": ["f926ae48-8dda-479c-9348-124b494c0446"], "7dd59eab-597e-4402-bbde-5219259f1663": ["f926ae48-8dda-479c-9348-124b494c0446"], "8a1a0e9b-c1d6-43a4-b808-6de57e66e46e": ["f926ae48-8dda-479c-9348-124b494c0446"], "044efb34-2fe0-49fe-8893-7ccb84e34d86": ["f926ae48-8dda-479c-9348-124b494c0446"], "a3484e6b-9afe-4e3d-9b40-45c9b71a39f0": ["f926ae48-8dda-479c-9348-124b494c0446"], "8fdd2306-5671-47b8-a6a4-613dcd55f9fd": ["f926ae48-8dda-479c-9348-124b494c0446"], "319e4f98-b5a8-4b4e-968d-98c42d92e7b2": ["f926ae48-8dda-479c-9348-124b494c0446"], "76e21c77-5abd-4e59-8638-acbbb4e196f2": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "e92011ba-08e3-4f27-8a90-097f25c195ee": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "a127a266-08a2-4440-ad55-820b622a4116": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "036f4f23-f219-4b3c-a996-f38ce4a3735b": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "b59f0589-8212-4bea-bd66-233cd0e150df": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "4e38d0ae-2d12-48c0-b6ef-a1288551e7f3": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "b035b709-5421-4c16-9588-36f6df6dd376": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "3e8e3d35-e9b4-4d82-8549-e0076fb93944": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "ac6ecaeb-d774-42e9-a706-23571e9ca244": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "642dbeb3-b3f9-425b-adf3-f3fd7101c7f2": ["ad86da1d-b377-4e66-ae6e-bf0697f3c5ca"], "41b113fc-3f56-4b6a-b606-bacc03b2a8b9": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "95092336-d112-46e6-822a-88f81d722e72": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "4b019b57-09a1-4397-af47-7cf29ee23a69": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "1411f491-e75f-4111-a9e7-b927c26b8c3b": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "7caa53ec-adf7-4bd5-b0d6-16e1ec9de7af": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "724b5a82-32f3-49fd-8271-505c3b2a5866": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "1bba92c8-2cf6-481c-a916-e2b90863ada9": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "b57dc1d4-b29e-4d1f-81d7-93ab353d2990": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "241527a6-6383-42f6-80ad-d47448a4e5de": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "03c92229-db55-4afa-8ac5-34de6b0a3b79": ["b10e392b-f29b-4aa9-a4d6-339ee55d6518"], "abaa83aa-555a-4d21-acf1-7c73b0d057ff": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "073ea355-2455-462d-8d7b-c26913cb9ef8": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "17d41936-ead2-475b-b785-6dd826930ae7": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "f99de0df-3a30-4c03-be26-e66bce9f385a": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "7b39da4e-33ab-41c7-826b-5d251d01cc39": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "4c661a4d-46b0-4361-881d-a2e3bfffc226": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "f95da55e-afa4-49de-aabe-bcc697b0c48a": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "7654615f-4130-42ba-ab22-d08da8a0d3b5": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "154ad3bc-8ed0-45e4-9156-62d7e04da73f": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "c8c67888-5617-4df8-aa80-597113c355ee": ["6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a"], "e593b5e5-80a5-4368-8044-4401253e2242": ["0d87b088-8909-47ef-9705-c44f991c695e"], "58567b4a-dad8-4564-bb4b-78ed4e066585": ["0d87b088-8909-47ef-9705-c44f991c695e"], "980286b9-57b1-41cb-be5b-1c92d2cade95": ["0d87b088-8909-47ef-9705-c44f991c695e"], "c4dd04c4-3824-49be-bbb2-28e519e680e2": ["0d87b088-8909-47ef-9705-c44f991c695e"], "710a2983-1bea-4763-b367-819eea4ef3bc": ["0d87b088-8909-47ef-9705-c44f991c695e"], "91a4617d-ae27-46d6-967d-0120e9f36b4d": ["0d87b088-8909-47ef-9705-c44f991c695e"], "33034f48-184c-4594-80d2-8996aa2b9d20": ["0d87b088-8909-47ef-9705-c44f991c695e"], "fa471afa-5001-4d53-b1fc-23f57e65bb99": ["0d87b088-8909-47ef-9705-c44f991c695e"], "71a166e6-1296-4706-9677-80a0a0a51341": ["0d87b088-8909-47ef-9705-c44f991c695e"], "70bebbf1-bde6-4150-b4ef-a9a63f9f8a4c": ["0d87b088-8909-47ef-9705-c44f991c695e"], "d01efaa2-b7e6-48c4-9117-00d808d80d03": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "c50f5c77-e898-4823-a5d9-6ead49445ebe": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "7216a1fd-492e-4455-9b1c-87127423efe4": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "c4b916ed-6f69-42b4-a36d-b4b84b300ab8": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "e7b46646-126f-461b-8c4a-db9cd8cff6c2": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "d67307ed-cd5b-4202-b68c-269b3cb7e195": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "8185582a-dc07-4b2e-b832-9e3abf2e271b": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "655c7548-ad98-400b-a65a-0d9b4aebe77d": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "8024511f-e7d3-4a95-83f6-e77cedb7f0f5": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "a6cf7580-b17b-496c-a7cf-7bdb4ad5cec8": ["948acbdd-82ea-43ec-b9f4-c20ea2ce234d"], "1b8b06c4-65a8-4291-8125-da58b1a2fcf2": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "86c6a713-18f3-4788-94d7-798e37572f76": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "3b3ac587-1908-4993-ad69-1d204707f0ba": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "161b4640-9926-4d54-b332-3ae24fe928a1": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "5ef48cd2-e223-4cc4-935f-7b59b03953aa": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "36ac78ac-c399-4cc2-bba8-b4c88049ba0b": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "5e6dc05e-c94a-440c-badd-059d28a98ecf": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "b801b3ed-2645-4ab7-8eab-5f44c53f7cbf": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "953c7a65-0054-4509-b437-2ee2f041e51e": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "48e496f1-8380-4006-9052-8e63ea4fdc38": ["07b0149e-d8de-4787-a581-784029c5ddd1"], "2bd65018-80f1-4a1e-8ff0-c03d11d2f9ac": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "4eafde78-6428-426b-85a5-fd83ddfc8a8e": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "123f10a6-afad-4ad0-990c-17f252217093": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "f3cfe9a7-ea97-4243-a8c0-5cb436fb9198": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "3b9d6877-d127-4076-bbd6-390afed34038": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "f239aa99-3443-48c5-8df4-e7d753f4d1f7": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "b7ddcdb9-2b6d-4546-a92a-711d76916a7f": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "7baa6242-7aa9-410d-998c-d8d113ee3ed9": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "5bcd97f0-208e-48bf-b9c3-85ecfdd844d9": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "ffaea519-7960-4f2b-ade8-290a2a8dfeee": ["d1f75864-ad74-4188-a2f0-625b3b34dd62"], "7f2aa8e1-d759-4d16-835a-7d25726466be": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "5f1f09e0-afde-4833-8819-12492a28f526": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "7e7ae934-b104-42e1-a4ec-88b2b1c55cd0": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "4c2801a4-a960-4bb9-bb30-91106cebe0b1": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "2bc6c92b-c94f-406c-97c3-94ff5221a4a7": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "f15e8ee8-6df9-4c38-a7a7-724b35d7266e": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "1a097e10-917a-47d2-bbd0-f888b755d76e": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "83ce9f99-21b4-47f0-bf69-a1bdc6c6943f": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "c0058a46-7dfe-4578-a952-21be6f9e70dc": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "6d0329dc-668d-490c-8c24-42c4b1e1311b": ["d5430903-a32f-42c0-908e-1b1b0caa72eb"], "2f8589f2-71e9-45f2-ae32-f28ec7bf2c4b": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "8cbfa2ab-0832-4346-ac20-86d72deba93e": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "0424821d-48cd-42b3-a821-45f2d9a02ee4": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "fc1f5d65-4b48-44ba-bbc9-d567cad9bf4e": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "2bac89ba-56e9-4237-bbb9-2b5879e79287": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "dbd6b39e-eb46-4dcc-ae55-b3268513bb63": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "2a6d8358-e27e-4c93-986f-95adb301ee63": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "8cdabbb1-82d4-4201-914a-3a873fbcf68b": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "95cd7988-3d70-4b9a-9079-383a78c28a45": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "679bc28a-263c-454f-b2f4-3c5ccf1ae00f": ["8c7c6cd5-965a-408f-b1fc-cba59e93879d"], "e4229b12-3425-4bac-9532-22a0fd43652e": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "03a02bfa-6e55-4dd0-a51f-edce76b48834": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "825dd8bf-7c6c-4446-8011-c25d6f557389": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "e2909c32-1244-4a0a-a3fd-cf8797be04ac": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "af541196-0d8c-497e-a398-c3c9253cfec4": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "2855bf3c-9555-42a9-9e98-d7eda34003a7": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "5ac3f009-0665-4ce4-bcca-3c6faa028116": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "ba3a4a1d-711b-402c-bc07-9ec87c5389cb": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "a75d2170-8371-41de-9317-9b80e2823d28": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "eefc16a2-a6f8-499e-a2f7-12e4df65614d": ["e1311e28-6431-4f7d-b026-99aa986c05f9"], "f5398e05-e48c-4721-b625-1d00761faddf": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "7683e79a-d55d-446d-b33d-e51f78b017da": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "ed4ea430-7625-4ff5-96fb-d43600713d35": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "77d696c3-d291-4500-99d5-f940800f82f6": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "a3938f64-6ba3-4182-ad9a-f0a734f44349": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "65e0271e-8df4-4d79-b667-eb66dbe7884a": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "64ee0887-641a-423f-b769-1ceb31077cfc": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "8ca7525d-cf25-49cf-b7f2-3c8e0c68c8fd": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "fbce740c-e2f8-4a6a-b5cb-b8e609d9628b": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "a19e1eab-0f2e-4afa-8fe7-df69b9bee656": ["22760b2b-f237-4c9b-8e4b-6c86d4043388"], "95c335f7-1eae-4090-bbd3-d200657a199b": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "d4722ec4-cde6-4b3f-a742-a10c5e340440": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "f313296f-4197-4b95-a34f-da79b135a5ab": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "8a1416c4-b1bc-4eec-84af-787a472e11f5": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "c1dddd25-fdcc-449b-8e21-263bfadff302": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "0ddce7bb-a5c7-4fa6-bea6-da3737420a35": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "7acd2768-ac2c-4dbe-9c92-a27dcf7be867": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "0c799549-781b-482b-9074-635d45590cea": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "24353569-a8b5-4fbb-b2d8-0e04fd061aa6": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "cbda8aeb-bccf-46df-ba9f-12a9fc01b0b8": ["714b25fc-f9c6-4d83-b067-6f0c22b4a6f0"], "1e6a21b9-e745-404b-be65-8c7b9e4b0bd7": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "a0c122d2-8188-4702-8b56-6d423c5f74d3": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "a2d4766b-5e37-4f46-adac-a62b1c427c78": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "1e2f5c68-7e0e-479e-a952-e3d1c6caf4f7": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "63d84cc6-9adb-40a1-896f-b2e6034c339b": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "b66070c1-0ec6-46ae-9fab-b4445a76fc1c": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "fbe83be2-10ab-40ca-806f-d957f742ed90": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "24efaac2-edc5-4a7e-a8a9-2786e4c966e4": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "e9460513-d9f4-4ee8-a89e-9acdf8fdf716": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "6fff7468-40e8-4590-9f72-615e77d43bc3": ["08545214-7d84-4218-a3c8-92b8a4f288d8"], "deabd138-adc2-44f5-98ee-322a39c17a98": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "ee75ad83-a0d7-4713-99c1-7720e8e912b6": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "39d5c426-28b0-44e6-9fff-8fb668122c5c": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "70c28c2d-9d3b-4641-974e-a1804938ca0c": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "adf119f1-b069-4a65-87cd-ad53e5c43a5a": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "dd282118-49ce-47dd-87c2-d4156e944523": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "d771683d-c686-4a52-81ee-292ea31f22d4": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "f49bb512-05ef-4e2c-b6a6-091cbb905329": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "f863d5ef-78f1-4960-a14d-66a66caa16f6": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "828a7539-87cf-49c4-a17e-7be3ed25bfea": ["4318fa7a-1dda-469f-9fb2-e36092d0bb7b"], "56a2f1d9-fc30-46e6-9705-569c76ebb6c7": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "ae85135f-051c-4806-95a0-2cfaeb1b3924": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "6ee2f972-193a-4bfb-bd19-604bde142dc1": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "941d29d9-1e5b-4e48-add3-1024903e747a": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "cd19f434-9802-45cd-9417-bf530750bf85": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "17234b23-05e3-4796-9357-c3255713355b": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "2d3d1d60-0258-4016-b2e7-21895ee3da0d": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "b95decfd-0e22-427d-9a93-294412eab257": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "858d0487-9ef0-41bd-beb3-28e4bf4dbd52": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "58dd02ef-48f3-4312-83e5-1fb54e25c2cc": ["d282c21a-510c-4f1c-b774-28375bfd6162"], "2d3012f3-7feb-48d4-b54c-5107353c03d0": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "93acef7c-f3cf-4bda-848d-c79f08fefdcb": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "efa2b789-a244-4680-af19-b891e4c72987": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "7173f456-fedc-440d-b22f-efa9cf4e85f1": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "5d764367-0f8d-4efd-976f-e89b2c8f684c": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "95643527-800b-404a-a62c-a3f7390fbdf9": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "694a90b5-31cd-4f46-b221-2bbccbf2ea0d": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "0697ed1f-3d26-439c-9818-2971e3df5158": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "171d3b23-4f45-49d6-a317-a5fe6cf4016a": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "dc38fdd2-c4f6-490f-a7e5-5c3751e542f2": ["e24fe8bc-2648-4d4a-8e40-01161440f8fe"], "13d44da0-2fef-4c2f-b4ff-19599f9b2afb": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "fe155026-d872-40e9-8736-9e9a69d08e1f": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "d4c4faa8-502a-4bf2-b182-7aa689ad6ae4": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "46c0dbae-a959-405b-b3f9-8c2e83761656": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "116fa31d-f588-49de-8ed4-d15b3ba118e5": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "fc759a44-cbe0-404c-83b8-dcb5b364028c": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "4ee837f4-d5a5-4d90-a40a-019addf10b09": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "c824ae9c-741c-4730-a945-d591f13e0967": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "472d7e59-379f-4fa7-9356-fe2608a2ee59": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "079cadfe-b827-4d8b-8847-ce41fba6094c": ["a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c"], "01e765c9-5aef-4c7c-bca4-d9b4b94e54fd": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "33df19f9-df9a-4d75-b8cf-7b262bf4f8b3": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "c702da49-cb6a-498c-b6c2-a377e4973503": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "2f16d6ca-6f2d-4da2-8d77-9da96198f0aa": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "c5ff5f27-e556-4207-8151-5519b2fe84a6": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "9ec597d8-9559-44a5-b175-db525d6f9e63": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "f6dcae70-8e0a-4f47-9c30-589ffa82d968": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "d6cc4e55-78e6-4d3b-9425-9f0e1ffa648c": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "7ac7defe-9d7c-4996-bda6-56a4fe59a5c9": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "bb7c4021-0d4e-4b15-b28e-1d9d6664c577": ["c4246db7-6c73-48a5-aba8-2216ba9a71d5"], "e1aec18d-e71a-4d28-be59-e5be3881d70e": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "b2ab71bb-0a72-4068-b1c1-505a308496ea": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "eb859b33-b604-4d38-9b9c-36c1c453a82b": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "057efbeb-09ad-4580-a430-acb50a9681a6": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "94f7d7a1-6947-4bd2-b4fa-6d8db3896c41": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "c4bcd7fc-8a32-4295-a4f2-b6da221f7546": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "4d3140c8-cffd-4c71-ab31-dfbc34b26f50": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "7b4e0c55-fcda-4bcb-ab4a-e2333b46c568": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "fbd293d5-2f7d-46ba-8a9d-ba56d3e2690b": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "82a4bddf-bde1-4da4-8597-af2b5f75319f": ["d0d99450-83ba-48c6-8f9e-3935192d7a48"], "c8815a76-b3d3-4150-9fc2-ffeca9946282": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "e6706517-19c4-47c4-9d33-e88aaa79be85": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "c1f9af22-5965-4be1-86a3-e4a301b6af2f": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "551ffc98-e117-4c8c-aabd-84102ffaa8f7": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "5b9a4026-4170-4739-aa59-a82371bcde95": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "3a57b342-f32d-4571-bf11-0fd968d2f12b": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "bdc49d2f-f75f-409c-a019-6bd6e720673b": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "e354dc4e-3651-464a-85e7-a02d6e8c6fe5": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "e0faed04-d3ac-4ffd-847b-0eb7570099b0": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "25e2dfeb-bd2e-4183-bd34-d7a9fb889f4b": ["3836563b-34f7-43bb-be66-5bf8842759b9"], "9d781fd4-80b1-415f-bdc0-cb99a0ad57aa": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "5ab4424c-a307-47e8-9524-720e346f1103": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "3749d629-316f-4418-9f81-884d7050c768": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "76664a25-080a-4041-947e-a87349ea3aa4": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "a3d40842-deb4-459e-b156-f9ef4c243572": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "9ec62f10-d33b-4bcd-a708-9a6b8f4f52b7": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "0e343257-054b-4912-83af-419e09f10542": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "8d5d823e-8af6-4d8b-b8f1-c68b7d439a22": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "2521694f-ca6a-4805-97d6-166beb459f2c": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "5985031f-49b1-44cb-9355-1cbdf90a9ec8": ["0c4eee0b-95f6-4546-9277-8d41b2128ead"], "c5db5bcc-a2bf-498d-92ab-5bb0db131203": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "46b110ea-13a4-41c5-be85-368d573b2111": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "634c0a75-d94b-4dc9-9e9c-ecb9cfb11231": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "08fdf655-f40d-4a7d-8eec-5c6ca9a4754b": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "c63e76ef-0b53-439a-871d-3b04064a5530": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "9bdf0a7c-cc20-4518-b935-616a80516ebd": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "3fd33ba3-95e9-4be2-87dd-1dbcf4afee88": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "04331b6c-1532-4bcc-9a9a-f12352764d7f": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "18215d4a-212f-4664-a6f7-f25e3fa46f80": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "c9685d0b-7fe3-47f6-a4b5-99d6cf0b3123": ["03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348"], "313a9213-5dd4-4fd0-b6cc-5649c4da50f7": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "36918793-5d0e-48bd-b045-f8ff83ea055d": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "664af71b-506c-4536-aa63-5a4c95942aa3": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "0f5c4986-db15-4307-9880-e51f3bd60751": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "5dd96edd-ff11-42e1-8ea9-f87e9ae4b2de": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "a14927ac-5614-423d-a8f9-5c0853bab534": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "67b571d8-1ebc-4a74-bc63-de217aebedd1": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "176842e4-195d-47ea-bb47-4747e05ce154": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "d7798ca6-fc5a-4819-813f-1d68a7715e00": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "12f7ff31-5e11-4d2c-bedc-26c5cc7322d9": ["913ec1ef-ecb8-499f-bbd0-c79b8355fd1c"], "c21ceaec-63ff-45ce-a1c8-4c954a4d0cba": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "c6cf6391-dc4d-4c46-b3b5-fb5a40f28726": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "01e4a6f5-a4f4-4957-9cdc-2aaba3491a36": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "241aa1b9-216b-43a4-9f90-6610fe5c28b2": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "2a49cfa9-6bbd-43ad-bea4-28427449269d": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "72445f3e-2877-4f4b-9192-ba572bcdb530": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "422aafdb-ec57-4340-bd89-2fc20e3b0da0": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "30ac0cae-4e18-4ad7-bb21-0e34d9b27082": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "d14ef51f-6a7d-4b9b-8c2c-7ba562b3d774": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "d716e7c9-5860-48e4-914a-8c72625fdbbf": ["81258dfa-876d-4eae-befc-9ecdeb596c38"], "407a7ba6-d65a-4a11-afa4-c6ea6e4c2736": ["db999b51-bd0e-46d4-9202-cff81afad964"], "436567b2-4a53-418b-8002-08cb30c28f31": ["db999b51-bd0e-46d4-9202-cff81afad964"], "d6bd80dd-1e24-488d-a6e5-eb5aa8895943": ["db999b51-bd0e-46d4-9202-cff81afad964"], "84eeb2b5-483b-4794-8d69-a1fb7ca5b57a": ["db999b51-bd0e-46d4-9202-cff81afad964"], "1149e1f0-19fe-4af1-b052-9c5b0bf7b453": ["db999b51-bd0e-46d4-9202-cff81afad964"], "3e72a6bd-6a5a-4b12-8156-069a0f9382be": ["db999b51-bd0e-46d4-9202-cff81afad964"], "1e55727a-0e04-4def-9388-239c06d052dc": ["db999b51-bd0e-46d4-9202-cff81afad964"], "a76bd5b4-7983-42f7-98f1-6cf2180a7448": ["db999b51-bd0e-46d4-9202-cff81afad964"], "5bcfbe76-4c4e-4bf9-8a09-cd1dc9eb77c3": ["db999b51-bd0e-46d4-9202-cff81afad964"], "f8f78806-13b8-4acb-9826-c4408a3c523c": ["db999b51-bd0e-46d4-9202-cff81afad964"], "0e48d737-5d25-4c14-94d4-f0be919145c3": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "ee67a49d-fa40-4f72-bc61-a39c2144d958": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "963ca3e1-2fd0-4a20-85c7-d1bfe31c0bd9": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "133585cd-97fd-4343-9627-bfa3bdcaee9b": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "37f44acd-e83a-4327-9d26-ac7ce250879c": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "44946c07-c978-408b-9b84-a5fd16194ca2": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "20bc157a-c8bb-4ff2-9003-6f475b0d145a": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "bc817926-284f-402f-a8b4-8016544b7436": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "757d49ae-2c4c-4dda-b834-35d68feba859": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "ec4a9dd4-cf97-4b49-8736-19a432697eef": ["45268e73-21c7-4a9a-b026-a97f0f1e299b"], "5dc1404b-b9d5-4165-bfba-5ae18b4ef0a8": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "1b4835f2-59c4-4255-a964-61969ea8863e": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "8f94848b-91d1-4bd4-9095-8fab3fe85de3": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "be1037b8-1682-4b8b-ab5b-6bad9c9018d7": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "adb12b37-a721-476a-817d-6ae9d45f34bd": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "d9941951-34e0-4599-ba08-82c1823f7557": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "800426c1-0a93-4185-b638-fc754e6a3feb": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "6cf609a1-e1ab-4388-9eee-76603f895da4": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "03ffb16c-be24-46fd-a594-205d9e68e49f": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "6ff903dc-0e95-45b1-b7ba-5147dcdd7088": ["c8d3ea4b-63fa-4698-b7c2-63895c628511"], "90935253-ff29-43e9-9473-f71036e4e5f8": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "a87294ca-3f18-46f9-921e-10e6fa956d49": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "16605b19-6407-427d-8ad6-0bd123fef5ef": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "5cd233c2-fb54-486b-bec3-6c406a216562": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "799db38e-e7df-45c2-af2e-701b12bdbee5": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "7b243fdf-3ecd-4747-87fd-c6c9860cb3ae": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "d623ee79-0c8b-4ee4-a9ce-edbf633b20de": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "fe664b26-b505-4d49-b30f-fe252750d38f": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "d5d0438c-36f6-4328-90cb-4baa96b6a1bc": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "466b29fa-ac89-4e2a-8791-cfe170592641": ["51e515fd-fd17-447c-a07f-ff68b43cc751"], "39725f9a-68c2-4fe9-aa8d-9a4f08d023ae": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "ca7c2204-b6f5-4781-bf3b-8d9f02540ffd": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "f43c197c-7302-4f65-a850-8d1592d44eee": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "6d8e2c0e-e4cf-4ebb-a417-815a64668c79": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "14c91abd-8d01-4cf0-8523-0812a5c1c840": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "68d008be-5997-4d96-96e0-731c60a6c09e": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "cc4bb020-b92c-4f19-adc2-06b98ff1da33": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "e63cb0d8-46ce-4928-aef8-ccd1f1693044": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "6f3c8f9d-9bbc-44cd-92a3-95215c3cfee5": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "2f47fea5-c195-4bb6-889a-3d498268e761": ["89295981-fe45-4788-b206-98c5e10f6bcf"], "b23c2ac0-3c8d-418f-9d3a-4d355510c475": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "e341d43e-c5cf-470a-9937-1a0522709924": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "d32de1da-1496-480e-bfcb-a256b5aa2656": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "0fc878ed-8fa8-46f3-a457-943aac86deb4": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "62ea75e9-a1f9-482e-b748-93e00e24299b": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "d326718b-3c54-47fe-b90b-e026762b440d": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "8113c5c3-1121-4595-b984-3595f621efb3": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "1f685537-4d22-4888-90a8-cf32f7774031": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "32b71e7d-ef8b-49de-a3f8-f906cd313fe2": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "0ee66a25-f2c4-434c-b514-7affa3493429": ["d91a4739-1860-4e2f-a88d-921afc9e58ad"], "f150a1c8-dad3-432e-91e2-c40098a5ebd7": ["0c855fa7-f69e-4584-8664-653671650fc2"], "1d4e22f5-8de7-4f07-9526-61c7936f9459": ["0c855fa7-f69e-4584-8664-653671650fc2"], "7c6bd923-b691-426e-beb9-67b46c185a03": ["0c855fa7-f69e-4584-8664-653671650fc2"], "04d3f9c8-515a-4f01-8cae-62fb636fd1cc": ["0c855fa7-f69e-4584-8664-653671650fc2"], "b4f47764-9f91-49f1-88a2-221cc0e06715": ["0c855fa7-f69e-4584-8664-653671650fc2"], "3e394353-4b52-4a52-af48-562eea9f2152": ["0c855fa7-f69e-4584-8664-653671650fc2"], "36aee960-dcad-4e5c-97d0-05f4fc62b815": ["0c855fa7-f69e-4584-8664-653671650fc2"], "a4b69757-7dcd-4eff-90eb-a30ced99f719": ["0c855fa7-f69e-4584-8664-653671650fc2"], "72048f3a-c23d-42a8-8194-a14c109a5af2": ["0c855fa7-f69e-4584-8664-653671650fc2"], "2047cc7b-4522-4ef9-93de-8bc09531f843": ["0c855fa7-f69e-4584-8664-653671650fc2"], "ab3b72f8-672c-486e-bc6b-9461baa761da": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "8e05f5b4-738f-4015-9698-0d841d8727cf": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "e41871e4-fd71-4858-9f31-90e4bce717ba": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "0aee33e7-9f19-42ba-9f6f-020edc4551d5": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "780e0590-0606-443d-a7e8-2891d967ee16": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "85dfacbf-8094-47ad-80dc-7bbcc46712ca": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "6c98830e-c446-42a8-ac57-f8f46ee0a255": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "7abcd464-ee77-4ce1-8d41-57315a78fed9": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "cc012aac-5ce3-434c-8e51-35c1e8338ce4": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "51e0fc29-06be-4a85-a0ae-1fa144bff1cc": ["60b017ce-e6fe-4811-834f-795d4a12c364"], "b74eff6f-0348-4f4d-8455-ec94c409abc3": ["3d60073b-d973-4f20-8886-8710331b996e"], "0db718f0-8251-47d3-83c0-f9a203c06084": ["3d60073b-d973-4f20-8886-8710331b996e"], "84f786f2-b45d-4028-8864-a9c521374ba4": ["3d60073b-d973-4f20-8886-8710331b996e"], "39695c7a-9723-424f-a040-717c4cadc993": ["3d60073b-d973-4f20-8886-8710331b996e"], "48492acb-3e38-45a7-bcde-061e9fcbb695": ["3d60073b-d973-4f20-8886-8710331b996e"], "64e6cef4-2fdf-4121-b410-fc7168f68289": ["3d60073b-d973-4f20-8886-8710331b996e"], "71ec2ac9-cc52-4831-a314-2f92000494e4": ["3d60073b-d973-4f20-8886-8710331b996e"], "8dd38cfb-46a6-4d54-9560-fa7036a393c5": ["3d60073b-d973-4f20-8886-8710331b996e"], "40a38d86-7774-4a05-b3ec-46ee736efe3f": ["3d60073b-d973-4f20-8886-8710331b996e"], "5303236e-9b24-4300-a6eb-71fd2660ba8e": ["3d60073b-d973-4f20-8886-8710331b996e"], "68145309-ec97-431b-a2e4-ff7c84ae2b92": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "bc2c2f00-164e-4fb2-b04f-0a8e11a127b2": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "99c78782-3873-4806-b67a-5057aee1fe94": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "f1218cc9-8cf6-492a-87b4-e7e2b34939a8": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "550182c7-921f-4b50-a93a-089c2d1e4f06": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "38276fcb-783d-4a66-b4ab-96d3654c9707": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "64350a23-d7dc-4a12-b082-a57617fd4d14": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "db717bad-ccdd-4097-a630-6459902c89c3": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "bb183df4-5c49-4847-b777-9b605daaec7d": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "126df14a-0e79-4f7b-9ba1-5cb3197cef81": ["fee5c143-dc56-4cc6-ba07-552465ae2b72"], "bb600a12-cf3e-430f-aeac-ffb55080bc75": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "ab0afd54-b8f3-4255-bcea-00d7f469fa4d": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "4b17bc0d-84c9-44ce-b287-5873628bcb88": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "d8711a1f-0375-405e-88fb-47084c2dd456": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "4e3d1faf-aaf7-4382-bfdb-f07986da3774": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "01016449-dd51-43e2-8064-6163d76960ca": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "96010c49-fead-4764-a1cf-52f08f825c94": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "1a7dc66f-7d3c-449a-91a8-730ae4fbf5e0": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "618088f5-7d87-423c-94f7-092dad08e9de": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "7a5bd3f1-0117-49f5-847a-c98c3e1ed923": ["e1dc1672-fbcf-4551-aa8d-4896cc302aef"], "396fdf65-f8c8-44af-b354-a6a3a8080cf4": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "53b2c06d-8db2-4193-bad2-2cabf8dd64af": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "33223652-33b9-477b-a3f2-76a8fd138783": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "669ee0bf-cd38-407d-a36e-b02d7efaeba6": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "ba748242-63e0-4e5f-9536-5e37095f34d7": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "369d0faf-1db8-4137-8253-078bff29bf59": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "177fe09a-b7d7-45cf-a228-70fe1d4708d3": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "74ec3300-8c0b-4be9-9382-88a845824eef": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "5cf0dfa4-8cc2-49d6-a074-c9d97d87b51e": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "77d5bb4a-5a5b-46c2-987e-911e1f0d76e7": ["eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb"], "269c41f4-2524-4d35-9147-09e322f2504d": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "53226a81-7a32-4f91-a589-46450a0b7283": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "0d9724ae-bfe2-47f4-a021-1296db5dd676": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "db9015f0-bda2-4f91-9808-19738ae88078": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "b87fc7dd-8992-4529-85cb-066cbde37dcc": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "eecaaa8a-3251-4874-9a51-cd62286c5095": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "bfb13ae7-473a-4caa-968a-c783c333b1e6": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "030514da-a5fd-4a1f-ac10-cf8922a6150f": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "43d3ae5b-1657-46fb-920d-87bbf081d086": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "40e129bc-068b-44de-942b-641c29a2983a": ["44763663-ed9a-43f0-84a5-ada3a7da3066"], "3eda549a-f514-4a5a-8cef-68367b00516e": ["dcfe160e-c7b2-4341-8d39-762060034476"], "4359e62b-013c-45a6-b540-f16600b3d7c0": ["dcfe160e-c7b2-4341-8d39-762060034476"], "f8696290-800f-4952-81bc-3020e6f11c09": ["dcfe160e-c7b2-4341-8d39-762060034476"], "38c06f0b-84d5-48fa-ab9f-4a53a84e8916": ["dcfe160e-c7b2-4341-8d39-762060034476"], "b81f442e-1b5f-43da-8261-9b2c1363ea3d": ["dcfe160e-c7b2-4341-8d39-762060034476"], "dba6ad78-0e3e-4068-a3b5-38c5ab583a94": ["dcfe160e-c7b2-4341-8d39-762060034476"], "4b849fa7-2c5a-41a5-b197-62210fadaf66": ["dcfe160e-c7b2-4341-8d39-762060034476"], "6f3964c4-6694-4977-976d-770e0175af87": ["dcfe160e-c7b2-4341-8d39-762060034476"], "34e35d11-aa65-49a1-92b2-ab8eb6f362e6": ["dcfe160e-c7b2-4341-8d39-762060034476"], "77bca3a1-9687-4e84-8144-d0e76454c55c": ["dcfe160e-c7b2-4341-8d39-762060034476"], "adb044f3-b06c-4147-b270-388aa7ce28a1": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "2cc96dd4-379c-4ea0-8aab-bf6fcb0ee94c": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "6b0d4619-0b82-4e50-9d25-50215d1b4223": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "7f729371-7c4c-40c9-9651-c84ceaf4ab59": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "4bcda5c2-bc2c-4f9b-a31e-c90c1bffe24b": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "eab83c61-d25e-47e3-bf55-cec2cf22dd7d": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "80e574c2-a56c-4ade-b455-0eca52117b22": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "7af58ef3-a1c9-420e-9eec-b2eecfc7ba18": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "cb512b7c-e79e-479d-9557-8757c4fda4ed": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "16c95e95-db9f-48d8-9c3f-fbf32e49bf92": ["fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58"], "34bbf002-cd8d-4886-a398-74853f3697c0": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "332c9a40-a1fb-448d-9067-d81497b47a04": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "05415269-22c9-4836-aceb-97f1c11d2051": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "ac4195ce-e0d5-4720-a4ce-5bcc8078b64c": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "05f22b03-2ba9-4910-8467-4cb79c9a8206": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "d202eb6e-03df-49f4-88d4-b160a4626fcf": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "02abe32e-eeca-4b60-9b32-22ae7ce787a5": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "8afa840c-a2a0-42cb-95b1-8f1b404515e3": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "a0f7a50f-5987-4c5c-98a6-36316797bea8": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "817d708c-fe29-43ce-9478-5bc616838321": ["63006b69-c2c6-4bab-9d11-2b7976bea75b"], "9cb6e467-ad95-44c0-841b-777832cfa4aa": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "eb391602-3d68-4cb9-a04a-29b9595203c3": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "c3b9539e-277f-4737-9e5e-e386eeebd7a7": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "73302da7-3bef-402f-9bf6-f12df15c536e": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "58e6a606-3cd6-48fc-9dad-9c6fdc181f83": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "2d6ccf02-270d-43e3-9410-be3e588f9b6d": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "22b01cb1-5a1a-4789-8b56-4d7baa49fcc7": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "dbb74eb6-3fde-40d5-8df3-bcc934e6a158": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "7b7be751-057b-4a7f-88ad-cfe0a6255aca": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "0a96f62f-6fcc-42d1-a86d-2d07836fa4ad": ["0f4c5c5a-f8dc-43af-85f2-45a85fba36ca"], "c12924b2-38c5-459b-8743-4004840152f9": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "1ee45afd-2181-4a4b-8885-4e243fe3c9ba": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "064348ce-09cb-4cf5-9b13-efb66fb26273": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "07faf7be-a125-488a-8fc9-95d2a01f7983": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "0f2299ca-5f65-45fa-bec4-3d8a5dd6667d": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "082dc681-2582-47ff-9159-652af70f6d28": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "8476f9de-1c46-484a-9d42-63c47d4e3ddf": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "1930f79d-9ceb-4935-a643-c1d80c961fee": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "d28999ed-71ad-4d55-afa6-e2d551dffcee": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "968af77b-e22e-4bd8-b951-9a663a94877d": ["76064db3-f7c4-4ded-bee2-739ed588ce64"], "9c573068-8e35-41b2-879e-161babef96ed": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "ead604ce-72c9-4dc9-a2ec-008dc710ab0e": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "e690c10f-4933-40e7-835f-6034cc551627": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "ffc79ee0-1f3c-4956-b746-ac94aae246e9": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "bf06a17c-7eb3-4ac4-9260-bea0f24eedfd": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "97d2ebc4-8668-4f50-9e9d-4dec70f48766": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "87372dbb-28be-4288-810b-cdec1b6c9afd": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "2423bd23-5f6b-4730-a392-af960a751c0b": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "fd2d6cb9-81af-4234-a95b-65534c17a395": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "845cfb86-2eb6-43f6-bbc3-bfbd925cd21a": ["cfd1017b-b416-4a90-a926-b0851ad24488"], "4bdb3f89-538a-40e3-9935-8e433ab9e6e2": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "6905b587-39e3-4b09-ac42-6cded7de2b9c": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "13355254-5606-49a9-a3eb-4769a610066b": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "e2b55aad-14c7-43c8-bdad-c0dcd2877575": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "ad739be8-7f90-4ee1-817e-e3f5a425b4f2": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "ea9cb785-05ef-4f79-8053-0816fff5eb8d": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "0e7c873d-f587-4cfe-bb7e-5b4877644666": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "329c3806-ac7f-48ea-8945-74a6f91b5d0f": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "5a1e1ec3-fda4-453e-ab8e-ce1d9faad797": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "5e8537e8-4e6c-41f2-9b53-2a9692305988": ["0a829510-5812-4f88-bd78-9bd5eb4f0632"], "5c7e3648-de65-4b1e-b755-1e099598c19f": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "d119441d-bce2-47b9-9924-dbf87e4cc817": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "22519aa4-4650-4092-b1b5-e3ac5d8f725f": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "48b1844d-583d-4e62-908d-889b415b5c31": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "cfb67ae3-43b1-4450-9bb3-d7f29aa0e86f": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "2c4bf45c-f7e6-46e1-9e67-30c91c0444c8": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "ab2fee39-2a46-49d1-8cb6-55b4167b1b63": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "dc7ea38d-faed-4f3d-a7b0-8c1ae79788dc": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "7ec7f6e0-8490-4dab-ad43-7031aaff916f": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "3835091e-5a77-48ed-b786-16833c6dad03": ["1233b7f3-d931-4995-86d7-fcdf13a0ea42"], "14ce14f2-5924-440f-9f82-1d9518db6f9e": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "c102bbce-23e6-4b6e-a359-2de7b683d7f0": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "1bd749b4-cb8c-4f68-951e-4dc05f918076": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "4ba5ee61-6497-4e28-86f2-603d556d088f": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "17f3b4d5-ed49-486d-af7c-5c8150c925ef": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "9bf8ec79-8c71-49e3-9cf1-bfcf6e900eac": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "d4c8b3c6-aaa0-44ad-89d8-826c0e200e82": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "5724c74c-76af-4944-89d9-d6e91234c261": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "a22ca24b-e8c8-49a3-9ddf-5f838e432323": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "b00e06f3-0822-47bc-8d94-e046c58311d6": ["6adeab1c-726d-44b7-a091-4c8f57d78efe"], "2f3330e7-4aca-4708-bbc6-29c775344e94": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "24490295-2bcb-4b3a-b834-033104ba2420": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "b0e2eb81-c76f-407d-87a7-bf82bdc5c994": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "2dca1178-1147-434b-b575-69e710673993": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "93bf2a5e-1a9f-4aa4-a500-5dde06657e7c": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "f79d35e1-9c4c-471c-b976-8c3dae58e572": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "62517b56-3063-4614-8fd7-d987ee4bf4e2": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "c53f1315-615a-4dfa-a417-1ea3f2c92e3b": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "bd8dd202-37fc-4f0e-9788-e8deb33eabdb": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "fbf6c048-26ac-4ec7-84a1-ae7cc41ebfb7": ["d4dbc93f-1c23-4106-a8a8-aea5eab30df5"], "38534eb5-4881-4f58-be97-63238c6c479b": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "b27e8b4f-0fcc-41d9-a4e8-94ac31878ba1": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "991a0d40-0a1c-44ba-b955-3de8eb4bd067": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "e84751a4-90f8-413b-b3b6-e91f28305dcc": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "89c9205b-de28-4888-8c7f-79f9900b731b": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "8c67ad80-5cd3-4842-88e6-fc5e71086935": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "8b30e5d6-0420-4224-b85d-052d8d01a5b4": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "766d94d6-ff7e-4281-9d69-ba577c2b2228": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "0d7ff6a0-f0ab-42d2-bc69-3d81e2146c1a": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "03c6100c-5fe4-4dff-9b62-da870a412d36": ["37e1195e-1f2b-4a57-9c67-44f8fd137203"], "0f4996c3-02e1-4284-94a7-c3695bba5538": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "070d096a-c121-4062-9907-63cb49651bb5": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "5b9ce146-9406-4f12-8eae-194503dd5c93": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "77c334c1-312b-4323-9cf6-971bedb52b19": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "93b3a12a-1c0a-4a3d-8a80-a250055ab49f": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "0fcc07cd-f226-4132-b9bb-62b83ccc8c18": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "e6ab4ce6-4166-46a9-8f52-a88ce88d15f0": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "61dd4dc8-b98c-4c08-b83e-b7c8eb005bd6": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "28b5c99a-f4e0-41c7-b3c0-57d98c306d61": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "15bda80e-6fb3-4e9e-b3aa-343fbfc57016": ["8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe"], "b17b2a91-c2f2-421a-9161-f577f4f49902": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "1c591402-7c09-4342-9a4c-3e447029fa73": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "3f4d1548-89ee-4941-adf0-70889cee50a0": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "3451f2b4-c2cf-4c53-979c-50973acbb1bd": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "d0bc240e-3df8-4e3c-91e6-826ec2f75c9a": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "7edebb20-1da2-49a3-b7c1-035d6f51e95c": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "8aeea347-cc31-433e-b950-82f344182a53": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "e6649708-a5e7-4253-873b-745f594a79a0": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "550ed3d0-68d5-4c20-9af9-61b217f9c8ec": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "a1df848c-b03a-406d-a467-61ee5c89957f": ["4c2995c2-8cf4-4666-8cae-2cddab5c0aa1"], "50365036-5406-482d-a4f4-64f43b779b3a": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "fe6f7ccc-7cd6-4b50-8a46-8aba632ecc48": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "608884ac-731d-4bfa-b339-8c8d39749dd9": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "2372552e-4f0d-4f62-8715-f7845287595c": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "b404ae73-34eb-4434-82b9-0e2c6ab3bf2e": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "fdc418c4-4bca-46d6-95b4-c7d346c634ca": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "7e203668-296d-4740-807b-996e28110ecf": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "fb5db5ae-01cb-4cf0-8c7b-9193091f22ff": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "4f49fc89-1623-43ba-9f22-18c09e38f9ef": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "5f86869b-e204-4551-8742-3ac0a5c93510": ["ed293c39-a2c7-4e96-bdb7-43ea102ba174"], "92689af0-19a9-4930-8cc2-d7dcf81cda7a": ["9feea339-e568-4440-96ce-ba269c40347f"], "44aae3aa-24ad-4059-9e44-4c37b8d088b4": ["9feea339-e568-4440-96ce-ba269c40347f"], "cef22ba7-26f8-4a69-8040-adb5bf18fa6f": ["9feea339-e568-4440-96ce-ba269c40347f"], "cd099f8e-41d5-45f2-9799-b0ffd6d8b77c": ["9feea339-e568-4440-96ce-ba269c40347f"], "63ef9f64-d9a2-4b88-9c2f-0c99870fc0e5": ["9feea339-e568-4440-96ce-ba269c40347f"], "ab431b98-c209-4dcd-a050-4a14e1bdb05b": ["9feea339-e568-4440-96ce-ba269c40347f"], "802de2ca-5fa0-4365-94c2-2f48d2551dc3": ["9feea339-e568-4440-96ce-ba269c40347f"], "a7780055-4593-4371-baf2-0854c10b4668": ["9feea339-e568-4440-96ce-ba269c40347f"], "2eb881cb-2d98-4856-b5d4-062d6a883283": ["9feea339-e568-4440-96ce-ba269c40347f"], "1835f2f8-1619-4637-a8a9-fb620ed7cb26": ["9feea339-e568-4440-96ce-ba269c40347f"], "a5a1e993-48c9-46ea-8808-a32d93da94aa": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "69dbc047-26cf-443a-83ec-63b327f9646c": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "1bc73bf1-7e48-46ac-a612-80196cb803e7": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "c084d257-5a73-46df-8538-5073ed7a01c4": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "91cd1699-977f-4d73-8327-5dc62cff847d": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "2f1e2c40-8abe-446e-9ab4-5eb6690a678e": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "6ed87438-4cb5-4a0e-8495-7378ba140033": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "7cf57cf0-ea86-4802-a27d-d8900db77529": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "80b69831-2305-4880-94da-f11163809d19": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "8d50eaaf-61f5-49ac-a319-1c3d968c61a0": ["0dae095f-3616-47ba-9b4c-f495fbe42c2f"], "623d8b3e-7499-4bf1-b87b-e7a81b50ee4a": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "b45f8960-4fbb-403a-8415-36afec2cd758": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "64b731e3-b3d6-480b-9718-5237e4c7595a": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "c2a47cbb-9233-4979-8506-436f11a090b3": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "6ce7fe74-d363-4677-b304-583120b4f062": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "c44d6e5d-92e9-47d4-a053-1feb8c4ea992": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "272d11b8-e3c8-4c56-8cd2-61cec900ff82": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "3cca6e56-8ef0-4ae7-9605-f6f461bb0b87": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "b49893d8-5d05-416a-962b-90597b72602d": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "6e23f927-8119-4bf8-823e-e3ef9d26c60a": ["3061ca42-f18e-4a76-9b9b-e378dc162984"], "723651fb-9c2d-409b-b320-816fae7dec56": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "75f53b59-e2b8-4540-adea-9e72c300aab8": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "660d930f-69d6-4768-b0a1-44bb673dfacf": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "ca06fff7-b717-465a-91c0-f20cc32b3902": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "0d33d01e-495f-4254-8b8e-d7393c8fa678": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "43a76af4-542c-499e-81e3-23edeecb1692": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "f2963ebd-4268-48bc-99ef-b385c395b833": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "6007e7be-7afe-475c-8f2c-d189608a35f8": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "873995b6-97c4-4fbf-9d84-aafae525c908": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "6106aef5-150e-4d9f-b12d-839a1bc64d5e": ["c8465ca6-885a-48c9-9b62-5449f084b0ce"], "5d31f548-e180-4961-b02c-1026476307a5": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "41737fda-7f5e-4526-8ecb-d8a560682739": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "759a70d7-6d44-4ebd-b51f-264e7fd500c6": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "10bbc39b-cae2-4ff4-b61a-2c7fa87c5fd4": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "cf616e2b-26ef-43e4-9dc4-b7b9569b9f8a": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "9b2af444-94eb-4943-808d-8a01c74a7860": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "110eb68c-1767-4f4d-ba87-2028a2a1105c": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "1a06806d-fb35-4c87-8809-da5a08874714": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "5296dde2-bf5c-4f7d-b712-62c552801ef5": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "5b028681-df5b-4e75-a745-ef99535810a3": ["0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209"], "273f9105-5fd9-4850-ad10-dd9a9b8cc778": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "f2015027-8be4-41af-9324-6b28bc14ba10": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "1c39f437-06bb-4fe4-9973-5e2bdc4a8cb2": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "1e85d184-b2c1-4ff6-af08-8ea57ec19ad9": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "e0add861-37cb-4915-902c-04c52bc85356": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "b2c9c8be-32f2-487a-8dcc-b0edbbaec345": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "553df791-945d-4f86-8985-dd794f43f520": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "39d40845-848b-4f07-ac52-444d6f6dc6e4": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "d8eb671f-52dc-4612-9671-cf9c05b5beae": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "6d2fc2bb-39a1-4e50-b2f9-1579f87f3239": ["980b42a4-0ff2-437f-bc3f-e021380c4904"], "295453ca-009a-489b-9a3b-7f39d0aede8a": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "8a36fef4-811b-4fdd-80eb-a3d79e7b5155": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "417512e6-a1fb-48c7-b125-859ffc865ba6": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "39ea0fdf-a276-42b5-911b-4ec55c6e14c9": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "2d51294e-3f63-42ce-b8e7-80d8791bacfc": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "28211d1f-6063-4fe9-8a8f-56d2e0c9bd80": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "ffd0d540-b753-4764-918d-866af54215a0": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "a4e0cabf-38aa-4b49-920c-c6d52520a8fa": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "c63477fe-7ddc-4df4-8341-f70bcccc16f6": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "916eda70-3efa-4870-9261-7badb6422454": ["1f602c98-0f78-4112-bd8d-1948ae89882d"], "5928ad96-89e5-4214-a148-421437e5e16a": ["d0a26208-636c-4214-90a7-aa53131b8919"], "ccf1c7b4-aec1-4f63-b6b5-ee4c5681bfdd": ["d0a26208-636c-4214-90a7-aa53131b8919"], "3320cd92-d86b-448f-aca0-3a32e873afe7": ["d0a26208-636c-4214-90a7-aa53131b8919"], "eee8169c-4314-45e2-88bc-1d4fe146f56b": ["d0a26208-636c-4214-90a7-aa53131b8919"], "8a46d200-e514-4172-a7b4-199a50cfb153": ["d0a26208-636c-4214-90a7-aa53131b8919"], "4e95d3b7-08c9-4644-87d2-e0c03b0312ea": ["d0a26208-636c-4214-90a7-aa53131b8919"], "de79da97-0834-4ee6-b1f4-eb364ebbe1ac": ["d0a26208-636c-4214-90a7-aa53131b8919"], "ef810a8b-abbe-45ac-b82f-47d20dc71885": ["d0a26208-636c-4214-90a7-aa53131b8919"], "b0f0f94e-aa95-48c9-8e54-5d27b11c1ddd": ["d0a26208-636c-4214-90a7-aa53131b8919"], "a5863708-81c6-4a01-a207-62001c6f482f": ["d0a26208-636c-4214-90a7-aa53131b8919"], "b613cee8-31a4-4979-8cd1-7547aef01a05": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "93e8f04f-3a3f-4daa-8c55-9a697403cf18": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "4d63f90f-ffcf-461e-b0b2-d7443e9e0e53": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "87ebbea1-b1c2-4928-8cc9-72bec3599f01": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "383af2c3-7350-4f63-b41b-480703a2f318": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "db698d0d-61cc-4055-82ba-9e9f070b90a0": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "864c0429-b586-4dfb-9fd9-1122c038f71f": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "512bfcf4-61ac-46a5-bc16-91f0073ecef3": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "cf6c1f99-aa2f-4487-bf62-4d84073869b1": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "3ab8040e-18b6-4400-94bc-7f7d0d4b1b2b": ["ee563e5c-3618-435e-8a28-e9c8655c8d6e"], "789a4e6b-1b5f-4a2e-be70-4c60b2e169ef": ["73123dea-1304-4967-8f32-c8c9484ff477"], "d904c6a8-b7ae-43d7-8287-35c070601ea2": ["73123dea-1304-4967-8f32-c8c9484ff477"], "b604912b-4408-4b2b-8eb6-c1c90a9637c8": ["73123dea-1304-4967-8f32-c8c9484ff477"], "17717bc2-52ea-4d65-b273-956e440e43ef": ["73123dea-1304-4967-8f32-c8c9484ff477"], "26da822d-31fe-48b3-9ca8-50155d6fa977": ["73123dea-1304-4967-8f32-c8c9484ff477"], "c5d39949-dbb6-4c5e-b00a-b6fd7788ae1d": ["73123dea-1304-4967-8f32-c8c9484ff477"], "cfea209a-9830-4e58-b251-1f70f42f93f1": ["73123dea-1304-4967-8f32-c8c9484ff477"], "7a516f8b-a3c1-4579-a094-1302e810e841": ["73123dea-1304-4967-8f32-c8c9484ff477"], "acc90406-7fa4-487f-a601-6e3dc23145a9": ["73123dea-1304-4967-8f32-c8c9484ff477"], "3a91cd21-aa78-4c15-a216-a24057643605": ["73123dea-1304-4967-8f32-c8c9484ff477"], "8363b99c-85ab-4773-86a2-f63cac3d051a": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "f87db50c-422f-4529-a21c-78abd2d10fab": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "34ed1fca-1d3e-4fdc-b3a4-9281c24dd99e": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "4d916dd9-762b-4031-b912-5fc4222557c2": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "cb1a51e1-502e-4c12-b98b-10353458c6e6": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "09aabd10-bf20-4e36-a1af-b2389c95fefe": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "16ef8d11-a13c-44d4-a88e-868f36377f22": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "abb1af0f-005f-408f-aa68-27d64a96a71e": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "681af4b6-c008-47ac-87ac-859d9849d77d": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "c32d437f-b5b4-4d28-b7f8-1a2de95eb23b": ["41c0457c-0232-44b5-852a-d481ca70ce21"], "2792f4ff-31f6-48c7-9b05-a903e1fa4fdf": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "b4374d8c-f461-4c57-8c4e-4d9975359384": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "cd79ee78-f93f-4750-875f-33a59251f574": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "0135028f-ffa7-4189-92d1-0888059657c4": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "7c54c8a4-972c-48f7-9388-9640bbc920ce": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "7ab73fee-d39b-41c6-8cd8-222b44049d82": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "579d7a98-c43c-4d11-b24e-6d7275e820fa": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "427a0cc8-5c14-4b43-bcd0-372317b5a8e7": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "e7555614-9fb9-4861-b164-85033e7aeace": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "5c5b27b8-e80f-4e87-80a8-dda89bca7b12": ["96a6d0c5-2e52-417f-a2db-56555912e00c"], "4436c96e-b176-4846-b3be-120e574bbaae": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "53134aa2-f21c-4ea7-be6f-6b32937c51f0": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "89297186-beb5-4d42-b727-46821b71a4e0": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "ae608ad7-1d56-4091-a89f-36c7c99c6c23": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "666e61ac-70db-4dc3-8657-e4c29d144123": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "98137aae-d585-4b93-b931-6c0f0a022772": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "18d231da-3925-4aa0-8358-ad67f4ce8c1e": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "9f500df2-ad6b-42ed-8645-8a9e917bdf80": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "8d4c69ae-abc4-4f4b-961e-5d0f8194de75": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "4097b122-a3b3-4718-8577-48d550199b62": ["3118d718-c7c5-4e30-a48f-6da5e0f06528"], "1a442f2d-3986-4d5f-ab68-0ad589163a65": ["56e73190-3953-4683-bd63-8c163f90136b"], "d1108cc8-95d4-4e06-83da-0a5667e1679b": ["56e73190-3953-4683-bd63-8c163f90136b"], "0141b7f8-c7ab-497a-af5e-f0ad4e4ff34e": ["56e73190-3953-4683-bd63-8c163f90136b"], "04c27c24-9db1-48ea-9b81-876c36344898": ["56e73190-3953-4683-bd63-8c163f90136b"], "e6564ab8-5109-4a91-9546-08eef6406338": ["56e73190-3953-4683-bd63-8c163f90136b"], "6c438bca-254b-4856-9607-ce37450c6154": ["56e73190-3953-4683-bd63-8c163f90136b"], "b754ed6a-d92e-4edb-a8bc-d0eea1d29626": ["56e73190-3953-4683-bd63-8c163f90136b"], "56c9eed6-f23c-499e-8bc0-52b33a7b3e1b": ["56e73190-3953-4683-bd63-8c163f90136b"], "6f321352-c8fc-4909-9996-33dc8382f6aa": ["56e73190-3953-4683-bd63-8c163f90136b"], "5398f151-8b5f-42f5-b601-0b47b2d7ba49": ["56e73190-3953-4683-bd63-8c163f90136b"], "57eb4cf6-01d6-4cae-b638-975a2a0f2656": ["c3309604-8e53-406d-871b-1186ac575409"], "8a32a722-7f7e-40c1-967a-6c5bd05a1c88": ["c3309604-8e53-406d-871b-1186ac575409"], "e1b53808-6b10-44e0-882d-ba92d299d675": ["c3309604-8e53-406d-871b-1186ac575409"], "cad08b72-46c0-40b7-bbf7-613ff0ab5f7f": ["c3309604-8e53-406d-871b-1186ac575409"], "4da03091-d495-4661-9c76-eca9b1c37a74": ["c3309604-8e53-406d-871b-1186ac575409"], "53d8542f-d4e3-4349-bb91-b058abf20736": ["c3309604-8e53-406d-871b-1186ac575409"], "08b49066-3db9-4bb5-97a1-46a2adb0324f": ["c3309604-8e53-406d-871b-1186ac575409"], "ded819f0-0467-4497-a257-4b727219d0dd": ["c3309604-8e53-406d-871b-1186ac575409"], "51e0e9cc-e3b9-47a7-8bf8-405f02fa04e6": ["c3309604-8e53-406d-871b-1186ac575409"], "aec903ac-6dcb-450f-ad0d-c664a413a4b9": ["c3309604-8e53-406d-871b-1186ac575409"], "8d838d90-aa87-4ecc-9abc-0fcfa5a4d35d": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "a4c16234-c64d-454b-98d2-55d8906e1040": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "33c734e9-1fa0-4bd1-b225-487ee53c2b33": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "da018beb-b83d-4eaf-8c14-30d735aac789": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "93044246-0649-4ba3-b8b1-355b186025a1": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "df01e2f4-1283-4220-9353-4573fe7a1203": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "98fd7a77-ecb9-40ff-b731-37710588af27": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "37017d6c-9757-463e-b5e4-7df7f5a6a2cc": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "cc45a9c2-e146-4637-ae51-762e5cf9a713": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "a04f1bd4-fb2f-4020-a56f-07c5d12ee11c": ["0fdf9b30-0b9a-4ac0-b691-44357de98eb0"], "21b9f512-20cd-4b96-a8e5-824c5d788b9e": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "cbc08ff9-844c-48d2-bd2f-d965463662ff": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "62b31c74-49b3-45d8-958c-175dffe5e8da": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "2eba3ccc-3d6a-46ae-bebc-6cf52aea05da": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "a81db3fd-a4bb-4dd0-a446-4b3c34313263": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "cc016098-a84b-4347-a791-d173694b1fd8": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "61262c75-27f1-4742-a14d-7dfbb43c1f6b": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "48be4dc1-8648-4ae2-b813-f08d7321d697": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "d3c26eb4-5404-4906-8f26-12a930b32e4b": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "5cd15272-af41-4e9c-aa26-42043ce6088c": ["52a90a9b-df34-446c-99d7-913e6f4bffb5"], "7869bc66-d4ef-4fc7-9897-351302b9eae5": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "c13ee199-2840-46a1-ab92-c45e02f2f6b6": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "d612ed72-1fb8-4bca-9b74-b5243bf640ca": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "bbecd72d-7534-478d-a686-94b4ed9cb7c8": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "68914712-215e-4582-8129-f1f7db119cc4": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "5f684140-09b8-47a2-8459-aa9c40072ce2": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "b0deab21-1ef7-4bb8-bb66-bd0567448753": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "e6417fe3-22a8-4536-89de-555e6a9a171f": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "12de6c9e-e24e-4576-9d3c-1fcbd6ccc54b": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "522de666-2af7-4927-bf75-9d3efd25b34d": ["050a041d-1482-4d8c-afc2-0475b0eafa28"], "fe8e6e24-ac4f-4570-bb59-e53149f51c49": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "07b9522e-0eac-4459-85e1-b511e6b00056": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "55277100-414c-49c6-84ac-e6a011795719": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "d6a87b9f-e5f6-44ac-bc18-b4f1bc5515a9": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "fc2524b6-ed78-43d5-b6fe-a01bdd53e031": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "fd764fa7-268d-48fb-9ec7-3be9aa2eb183": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "202fc607-7128-470f-aed2-e848351cae24": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "e5097d86-a6bc-49d4-86a4-d058c5b193a1": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "e633d508-d253-4b7f-bec5-75b11962e65e": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "742f04aa-415c-4e14-98b9-5298de43c47d": ["2b47e851-40e8-454f-b9d4-bcc6db94d988"], "e1fd4aa1-c10d-4f71-bf77-b4cca9a6e0ed": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "9fbe4be3-06da-42bf-a241-95fc342604db": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "f1082088-e78b-42d5-9285-ae15731354be": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "e6f71c20-417c-46ac-8a42-10c97c50760f": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "2f6a4b04-9c59-4bad-8823-c165db75adf3": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "a64475ac-8c2d-48af-b278-eb91c933268d": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "0d873b42-d24d-4da1-89ff-6ccc15e84b6b": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "97de1dbe-9a2c-4a3a-9a0d-842bf1c1ad55": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "7e629132-bcc2-46fa-9815-95ccd889c845": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "070a1044-27d3-4b31-aa33-23e39882c9ab": ["4b3441f0-0254-424b-ba91-cba271cc448a"], "10ecba85-50b5-466c-92fc-326759a2a5ee": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "43072003-14fe-4f66-94c2-9ac121ef7717": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "98486804-fb18-4583-ab43-67b07b08efa6": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "54358f22-1102-42f3-b0f2-7227b0a0f0c2": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "ffba1d9d-ebb6-4955-a4fa-839e180301ef": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "99d80794-a2ea-449b-9af4-62646f316051": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "07f00fa3-c821-41ec-9ff1-a781bd1bd55d": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "e9dee9ec-012d-4ba0-a878-5dec4184c9a2": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "b922e48a-8d8e-4ae8-807f-5bb7477dd82d": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "75050f5f-7cfc-4adf-a91a-1d7fb4185c7c": ["fe0a25b0-d293-453b-82a9-e3b1fa922e0a"], "6b983f53-78fb-4bd3-be9a-cb6deb436d63": ["15634080-f043-4a89-8422-a66280a2b0e6"], "550b57aa-5cc7-44ec-b07d-c62cd7dd4668": ["15634080-f043-4a89-8422-a66280a2b0e6"], "65a3901b-9b76-4dcd-b3ba-d04968302b14": ["15634080-f043-4a89-8422-a66280a2b0e6"], "fe52b3aa-3245-48cc-86da-8f3f3225b4cf": ["15634080-f043-4a89-8422-a66280a2b0e6"], "b3b5db46-0009-42b6-9465-4b5f30fbe624": ["15634080-f043-4a89-8422-a66280a2b0e6"], "530d41df-1d98-428a-9449-1e199472e781": ["15634080-f043-4a89-8422-a66280a2b0e6"], "68889efa-4505-424f-ae65-dc462fc55044": ["15634080-f043-4a89-8422-a66280a2b0e6"], "5149446a-0fef-433d-a2ae-bfa7865ae0d0": ["15634080-f043-4a89-8422-a66280a2b0e6"], "0fba5c84-2bcf-4be8-9684-d1904eb861bc": ["15634080-f043-4a89-8422-a66280a2b0e6"], "5634f37b-9702-4b53-8dce-bfb21a101d03": ["15634080-f043-4a89-8422-a66280a2b0e6"], "8eb9d5d5-af51-4376-9b9d-1297edd11496": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "eaf23276-45c4-465b-a0a1-c347a8fc5b76": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "451adf54-6336-4465-b4de-b6390b383b02": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "f2489a19-b9ce-4941-a3e9-154a0ecad928": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "317d1c65-7185-452c-878a-1b16e14d3e4d": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "474c5dc3-a92f-40c7-b290-7529f751d77b": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "2e297de2-2657-4560-9b39-b6316e39d5b4": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "38c93c3c-8f95-4361-80e3-d401cdfb7a38": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "7a8d09ca-630b-49f2-921f-eb1e2dffc47c": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "2b74ec7f-f8f1-40ba-b970-2b4fbc944395": ["5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34"], "839d62aa-6521-47db-b195-5f55f3715e07": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "8c59f600-8f81-40e2-8f76-cad08254ac3a": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "5a461ec3-611e-43f6-af2a-7a7f88454170": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "16e67bb6-1d80-4b1d-b60d-cda158c2d378": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "ddf72159-1801-49ab-a221-531088d69221": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "f24bf2cd-d91c-4662-bfd9-31b03791b58e": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "c0e0ee49-2ab6-4eab-bafe-fec66c3023af": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "c7548d81-fe0d-4316-a260-2d342818a5c6": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "91e539e0-ee4e-4e3b-bc8d-b3794ca3798a": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "4c18144e-d6a3-4d13-a836-e55c43d2c5e2": ["00048035-8497-42d2-80a6-8e8be91e9e20"], "18472313-2f95-4239-88f3-4a75df9cf246": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "18e201b7-192f-4ea2-934b-df1184862535": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "07cfac24-a2ea-4bef-b577-15ed4935a4bd": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "86d77516-8a49-4005-97e2-fef5020a40b3": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "b2ac04a5-d89c-4fc8-8023-6bb6b8a814ab": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "03bc6211-f0ad-4a75-b7aa-92f96064ce4b": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "75513281-57cf-4e3f-8495-cafc4e79d242": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "28e98921-df31-486e-a4bf-cd2a8f018ee4": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "1942efe7-0de7-42dd-9f0b-23cdddc6969d": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "ba2751db-a092-4455-89ce-60ce0f2e5ff0": ["78698c18-fad9-4e5a-8247-59113eab63c6"], "43ab2e49-8484-42a0-b907-a326016b5fba": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "71c80195-c590-4a6a-8455-cc14b7d76775": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "bac1fcc0-33c2-4bd1-bcae-206282b1aebf": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "ded49efb-8ec5-43c7-90a3-414f486b61aa": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "aa5a5372-d095-4583-8dd2-04b30b8d377b": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "d364a35e-d9b1-4892-aeee-562d6fe3fb16": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "8e058945-6592-48f2-a7c0-b29f59239765": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "0586fc63-c513-4afc-a447-0f010dfa2c07": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "5723f355-fc6d-4188-a997-6fc9df87d7e0": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "b1450fe5-2a1b-49c3-8181-b0908b22ec94": ["4b917f4a-d442-4e69-a9a6-1ddff3471c4b"], "d6dc8fa5-06dd-4cb6-b9e1-3d99509fd36f": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "43f41ff8-54b8-405b-97e6-387ac8c232c9": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "def9231f-04c4-40b3-a87d-4a5d13f4664e": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "0abe2ec1-662d-4cd9-8f9a-28ce9be69bbc": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "36dbeaca-841b-42b4-a5fc-2f1795f02564": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "e49097b7-7ffa-4581-8515-1b30cfa7d7b0": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "f51fa6cc-e9f9-4425-b85d-44db74827ce4": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "b85af260-3cfa-4a9c-bc21-802cd691f871": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "7444a8c8-575f-4345-84f3-0124561379dd": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "84e38c59-60d0-4319-81ad-3888d9b12506": ["8d05f043-5126-4ba3-841b-52609cbb7433"], "80255fc3-d877-468b-97e9-7be818ed20da": ["76c4d456-13da-43be-890b-72a103ef2db3"], "9516ed5a-2087-413e-925b-26135b0f5d06": ["76c4d456-13da-43be-890b-72a103ef2db3"], "6fab3d01-f156-429d-831e-4ea194ff08bf": ["76c4d456-13da-43be-890b-72a103ef2db3"], "91adbba6-9d02-4f12-8ca2-25bf46b3776b": ["76c4d456-13da-43be-890b-72a103ef2db3"], "bbdcadfc-3db1-46fa-85d2-e443a126d455": ["76c4d456-13da-43be-890b-72a103ef2db3"], "36c3329b-2a66-4361-b5f7-7ce8ade47cec": ["76c4d456-13da-43be-890b-72a103ef2db3"], "aae224f2-ef20-4cd2-8f53-3e748c2dac1e": ["76c4d456-13da-43be-890b-72a103ef2db3"], "150d6339-723a-443d-bc9f-4b1be5ba3fb1": ["76c4d456-13da-43be-890b-72a103ef2db3"], "3779173f-0285-4630-b333-78420055bd18": ["76c4d456-13da-43be-890b-72a103ef2db3"], "2a0ab6da-3f10-4297-bb6a-887732c4d6be": ["76c4d456-13da-43be-890b-72a103ef2db3"], "66823427-2ffd-4c77-909f-5289272bdb4c": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "0ab73255-60d0-4d92-ac1c-3d0c7a6949a9": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "feeecc56-069d-45a3-88eb-3f4d74890fcb": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "88d366bd-b700-4c98-a848-aedce1be01ee": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "00fc2bf2-fbad-4580-bd4a-05669d18fc7c": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "2844838c-8c42-4c48-a44f-63715f685e1d": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "36ef20ee-eb6e-4f4e-9d65-e279ee7920d6": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "3302a720-0035-40c3-a9b6-017c0952d77d": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "1a75a965-db4c-48cc-a1ac-7e4bcb22e7a6": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "0e9714b8-1ef7-4875-9d9f-858297cad936": ["3db4d91b-029a-4782-8c7b-acecc3a4a75a"], "69b6d24d-4f77-4453-bd79-e8af8137c7e1": ["29854fee-d0ae-47b0-9612-fceb12868283"], "d3ffb77a-2b83-4c99-9923-bd42f0684d90": ["29854fee-d0ae-47b0-9612-fceb12868283"], "575075f0-9aa1-4135-87b7-2b392d2cda2b": ["29854fee-d0ae-47b0-9612-fceb12868283"], "c53668a5-710d-4526-98b9-af2788d4b125": ["29854fee-d0ae-47b0-9612-fceb12868283"], "84bee82d-ff9b-4d9e-8a7a-b6cbcb4db569": ["29854fee-d0ae-47b0-9612-fceb12868283"], "d74ae40a-97e3-4c00-bef1-b09032380ffd": ["29854fee-d0ae-47b0-9612-fceb12868283"], "7b48a310-6aed-4f43-a279-f37e09939bba": ["29854fee-d0ae-47b0-9612-fceb12868283"], "3db05dac-a97a-4b1e-9652-d76803a23c04": ["29854fee-d0ae-47b0-9612-fceb12868283"], "984009d5-34ae-416b-b936-3f783743a025": ["29854fee-d0ae-47b0-9612-fceb12868283"], "1fcf65a3-0d6b-4145-99cc-0bf2c3235ad1": ["29854fee-d0ae-47b0-9612-fceb12868283"], "66fd1727-6e57-49a1-ae65-9fdb471abd5a": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "4d10f264-ec1c-496e-8dbb-69f985ae5cf1": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "4f34a0ae-db65-487e-8242-a6f5458fa9de": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "4c4c9829-d71f-4438-a202-eaef5533887c": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "90a56106-7089-4755-973a-da4912ed88d1": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "0a37c463-1465-453d-bbbe-74e8a11fe10f": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "bc9e1c96-6a89-4d9b-b96c-71acb36031e0": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "037f5eee-4dd0-4bed-9151-ef0be3018604": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "06f79190-3996-4bfd-86e5-6065a4dc96ee": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "3df54901-e507-45fe-bd82-154e2d31aae0": ["b83ad31a-ed48-4afc-9e7e-8561388c0a01"], "c4da0ee6-b97a-4cac-839c-667bdb97a9d0": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "37843ed7-8a1c-4d60-818a-72b161e25abf": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "0944ea5d-dbdd-4f95-829a-6caacde3bed0": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "12d5b6aa-803b-4df4-8a6d-4fce86fdf65e": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "643c631e-a495-4f76-ac44-fc0226061601": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "6fbfb445-cd6f-4999-b0b3-48d65d58c0e6": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "9c0537b0-0ed3-4084-aefd-70d1001d512a": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "379808b9-e51a-42f1-b658-7f085dd496a5": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "f9ec3773-dc0b-474f-8e19-10f71c543467": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "caf8112e-e9f3-4acd-81fb-c3b49486c424": ["de752b0d-82c7-4ad6-b01d-86406280913d"], "581a5da7-213e-4a7d-9f8d-d4133dd57842": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "9d4abb67-4cfb-4342-9757-6880500e5378": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "6c77fa09-63b8-42b1-ab8c-8a0878fe99b4": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "a46e5b14-4ca5-47eb-b330-0d94b8f9f326": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "7fabb17d-bead-4be0-a042-a82244952276": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "b6bddb5b-9792-4d2b-8ddc-6bd3455e5a52": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "93e73653-3778-45ec-8b88-83ef0cee390f": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "0ce84b04-c919-4d13-a642-1c811d94eb54": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "8b538c35-c3ca-4876-93d6-3a9de1887248": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "07203f30-4f7f-4aca-8eca-7c309b4986f2": ["ebca47da-e839-4e2f-bea2-317ac8d5564c"], "b3dc9b16-8ecb-4c62-ab68-b96bed0dfc8d": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "02ced4ba-2675-4cba-9819-e8e00d3cd558": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "98bb75ed-7a8c-4dce-86df-07d671be2a37": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "da95bf2a-175f-49d9-b8a6-280808d008cc": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "f6187e72-cd60-47bf-8a65-87c6ff917a35": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "14dd793d-1318-45e0-bf65-ae5c770cde2a": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "d3a655b7-63e1-43d6-a477-2241c65e6d0a": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "777e4f21-26bb-4162-81e1-25f943df9293": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "d4ec38ed-55a8-425e-af0d-8589faeb46ba": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "2a40a8e1-c0b7-4eac-a84a-5d63fbdb6c9a": ["de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7"], "c54055bd-b859-4846-856f-3208fb311b0f": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "6f0ae6ac-c093-4863-8cf2-4ada7f9d42e7": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "b4852e25-ec60-482f-a671-b99fc5379d18": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "0d20124d-250f-42b7-a215-c52597d6ff03": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "477cd099-57f2-473f-9e76-397ae38e415a": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "dcf15838-d29f-4e4a-843a-165bf83a1bfe": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "9d4de1d9-a66b-4c82-935a-f7e97333ed5c": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "070ee859-7379-4692-a8af-44336782308e": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "639643b4-60e7-474d-aee6-3ee349564306": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "acb51a42-9248-4132-bcd5-aa4101f783dd": ["fc720adf-5fb7-4872-b139-9068225b9ec9"], "507836ec-d203-47f8-a680-09f83fa2a01f": ["10868542-61ec-47eb-ab65-71788898bc00"], "0712927b-b14b-48bd-8873-1773a4748151": ["10868542-61ec-47eb-ab65-71788898bc00"], "5a1bdd93-7277-481d-9646-c6981146f8d6": ["10868542-61ec-47eb-ab65-71788898bc00"], "4ce4e9f4-b9b5-4632-a08b-c5e41c2f4b8a": ["10868542-61ec-47eb-ab65-71788898bc00"], "d4e55ebe-4671-4b95-9b07-c9e5e246be26": ["10868542-61ec-47eb-ab65-71788898bc00"], "8b4d8ad8-933b-4290-9fb2-14ecdde49db9": ["10868542-61ec-47eb-ab65-71788898bc00"], "04d3166e-c3f8-442a-a5a4-36726190ef49": ["10868542-61ec-47eb-ab65-71788898bc00"], "da1e7ff4-4699-42f3-b32e-97305e4b357a": ["10868542-61ec-47eb-ab65-71788898bc00"], "81f01fdb-f899-4475-8c04-9e7fe197e698": ["10868542-61ec-47eb-ab65-71788898bc00"], "88a166d0-e31c-43cf-8f58-baefce9d284f": ["10868542-61ec-47eb-ab65-71788898bc00"], "9ed882bf-45cd-4218-a475-bb18625ade4c": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "56adf753-40ac-4593-bb8a-832feec13d31": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "19cd8cd2-a1f6-479e-8713-95e8683eee7f": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "3e165023-0718-43ed-99b9-10cbcee9f4e1": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "dfad7b64-3277-4f06-af37-d85af7857c4e": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "a41d0161-4967-4046-9c48-e87cea7d5bf8": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "bf872985-a9f6-4c68-ac13-c3442a951f5a": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "f53a3ba9-3917-4f06-a722-e4d54d8119c2": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "6fa39a37-eeaf-4520-b9bd-162fce45ab8c": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "f4bf60dd-02c3-4248-bed1-4831c9772821": ["b99b4a4b-8d84-4830-86d5-d78ebfff7f8a"], "63eb9983-de78-4519-b2c8-f22b454ee053": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "267043c7-c798-4c11-b2d1-9c33ddd4fb4e": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "d30f4bc2-2f5f-41a6-b0ca-38e7382ef444": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "fc798bdd-e26e-4ed1-8142-e17cd09368df": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "1fc3dca0-6426-432e-bf6d-db436aa4dfd0": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "3eb7529c-01dd-46f1-ba24-c428f4b98cbf": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "10147097-3b5f-4390-8fdb-61a55029b730": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "b73e2ec4-11cf-4e83-9ef8-b0612a7df8b7": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "3f2795a8-623f-4759-be58-bac1f301777a": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "1dacc26f-f637-4f67-b91a-d11102d1bb85": ["4f4414a0-d193-43d9-9f8d-d7d21770b1db"], "b5373d9d-ce4e-40bc-bcbc-4f675f59d1a8": ["39990922-0c0a-4008-a84a-ab498e27827f"], "029ed6d6-5b8c-45f2-8690-d47dc48f8232": ["39990922-0c0a-4008-a84a-ab498e27827f"], "3dd37534-24da-494e-b97b-5e3890f7f13e": ["39990922-0c0a-4008-a84a-ab498e27827f"], "a89ead0f-a8bc-4db8-adae-b72aa1d2c3c5": ["39990922-0c0a-4008-a84a-ab498e27827f"], "3babea8f-5928-47f3-962e-9b283207ab3e": ["39990922-0c0a-4008-a84a-ab498e27827f"], "0af422b3-b85e-4b51-94fa-71fc9ab1d8fa": ["39990922-0c0a-4008-a84a-ab498e27827f"], "1c9b32d4-4ae7-4a96-a889-035f67276bb2": ["39990922-0c0a-4008-a84a-ab498e27827f"], "9b7725c9-7b92-4fdd-a10f-8ec1530b228d": ["39990922-0c0a-4008-a84a-ab498e27827f"], "649f7414-376a-4e58-8d1e-23d5e7ed9d6a": ["39990922-0c0a-4008-a84a-ab498e27827f"], "e78a0693-7005-4bea-89f2-c3ea112bce08": ["39990922-0c0a-4008-a84a-ab498e27827f"], "9db21a04-c438-4476-ac0a-82338eae49b2": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "97081408-da30-4645-9afe-f3307ed6cdc7": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "289db7b2-4d79-467d-a5d9-9d5b231a0264": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "2d57ed17-a6f7-46f9-81ca-95cb462059cb": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "4922c3b7-23a1-4ecb-9156-b04943f93fab": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "f2e88045-e05b-41dc-8cec-46bbf532cb91": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "8e4237f7-4dab-4b3e-872c-636b750a8551": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "6a0cc42b-ef86-4bfe-8697-1dafa8d40ca3": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "a27db742-1147-4e73-b8f8-fb28f863182a": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "1da6cbec-681b-41b5-8fdd-7e46b4b39a69": ["0159af88-d877-4a4a-8cd2-c1341841cb58"], "dc7b408c-d2a4-440a-8bd3-382b37949adc": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "12c4f43b-f79c-40bf-89c5-5de9f0101a4f": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "928f7047-c52c-49a0-aad2-2d36efcef6dc": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "8ee048c2-ac9d-46c0-8558-0d5e5e8a8743": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "7785c38a-a968-421a-b0d8-c15fe4003fa3": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "5af5e28f-c346-483e-8b52-dabdb08f98cf": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "6eec9174-86c8-46fc-84dc-0a9dadffd18c": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "d9e3443e-a2be-4b7f-9ead-f1b68a8decec": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "ed5ff628-1277-45a4-a100-1c1d064b76a6": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "29499a6f-b07f-4cba-b4bf-90e40d038c36": ["62055a37-ec25-45b3-ba6c-327cd1a68fb4"], "dd201fcb-d5b4-47aa-9fa7-584fdc9a91d0": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "f95cd300-7368-4898-875c-8a16e2699308": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "08be409c-62b9-4046-a4e5-41f7aef94b1e": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "165031b8-01ee-4b7e-a254-95b7729cbac2": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "77d2f46d-9e77-41fd-8367-3d123120a90e": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "41496cd8-8df9-4ece-9b75-799e48764e3d": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "8e9ba476-d233-4ed8-8772-0fbd52872bb3": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "10c840b0-30ab-46e2-9109-d9b669bd57ac": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "6e44c18a-5777-4822-9e66-8bc1a5a2f194": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "85eab450-b260-459c-acfc-4403ea487fee": ["f2534337-c301-44f5-b42c-5472d130b0b2"], "93694198-599c-4796-8cf1-d528b4f3fb10": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "2c321c09-c163-4b0c-81f8-f536580d0caa": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "65b87922-c4d3-45bc-b744-c2b89f7b4788": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "cffd7b3d-0081-4a33-80e6-28b3fbf26823": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "20e03c70-f24d-4dba-9cb5-042b4cba722d": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "760cfb81-f3c9-4194-a7d3-602f41fab3e8": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "f709685b-47a6-44a7-8f9e-3aa0df73870e": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "16141df8-9867-4de3-bcb0-a2b9fac32680": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "a44f2530-9479-450c-9cfd-272c1f2071d6": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "38270bdb-0dd7-458c-b93a-daebec80be45": ["fbee7ad0-9a74-472c-9305-bef84494cbac"], "c699565e-04ef-419e-94cb-1b796a737007": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "547dfa62-9140-4639-82df-a9142ce5286b": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "9af0586d-74db-446b-8e26-38be4dd9afc6": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "eccf5725-f424-4268-b727-1f1a40978434": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "abeee4d7-e9fd-4575-a169-f5d27f7e36bb": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "66addcfc-c316-415e-b410-381db4715a69": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "a172a3a2-d810-4b13-909a-903ececb2cd1": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "a5209e31-d0db-40ff-9bb4-1750d45a7717": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "8ed364a4-97b6-4794-a155-78239f56aa6b": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "cf81588a-da58-41b7-8f19-e0ed9fa5cd06": ["ef45de44-4305-4bd2-9488-5cfefcb0e8f0"], "20cc838f-302b-4a97-b9c0-048b821e21d7": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "11441d7e-f3c5-47eb-9661-dc9501b61db8": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "96391b37-e002-4444-bc88-af07e80d1be7": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "f173dfc5-537c-4934-943f-6a4a8209556a": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "adb72817-95df-4fe7-be09-43be351339a0": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "98945bdc-d7f5-48af-828b-72fdff9b0735": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "ce1d92ca-dd4d-4c46-a861-5f738828da2a": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "6739f71c-fbe1-4126-86a2-75deefb42217": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "b3952c44-c80c-4aa5-b364-771c28f8d963": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "ab8785d8-fdea-42dd-9207-1331f4d206dd": ["9137db7a-37b4-4e69-bcdb-cb521f287b3e"], "ad91ce86-6dec-4abb-97c6-044f2e6af46d": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "2a47278f-8456-435f-a92f-ad1569f822a9": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "4baeb7cc-0f63-4432-a61e-2cca08742d2c": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "7229c62f-af34-49e6-a1b2-244381296eaa": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "a551e275-7aaf-4936-8b4a-335333fd8c65": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "3185d577-2897-470b-b66d-2b878c1635f3": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "6b956523-9518-4fff-b4c4-3cb46fe34560": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "f829362b-3144-4e70-9964-54ff3cf5f3c8": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "04775f45-fc8c-47ae-a74f-788e790dbb7f": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "ff3d43c5-1076-4f90-9595-d7507024df5f": ["f1df5b88-dbcd-43e5-9438-c511d20b3a85"], "7a87b77e-ba2b-4a75-85d3-eff3e7a4f1b3": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "2af440c3-e3bc-417b-9229-b989c418f726": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "f9ad7469-3e3c-4909-8679-251103726804": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "94623f99-aa39-458d-bf0c-452bdeae335c": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "a18f484d-0c10-474e-b56a-fc1b92977406": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "caabe18b-3881-404c-bba9-113c918db0f3": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "6cb41015-478f-4102-8f71-ca101130bef4": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "b45acd2b-d605-4e83-8246-a99cd8d2ffff": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "4f89b591-6f2b-4813-bd57-b6ee9e7cd684": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "259e3515-ab8c-4f3d-8cbe-178ee51a7ff6": ["70eba8ef-45d4-4747-9f08-4658cbede511"], "f7f7c66a-12a1-4a76-88e9-6719de9f8272": ["45015a61-5cbb-4192-a277-099739b8931d"], "0404c1f3-7897-4222-838e-d468ed48b862": ["45015a61-5cbb-4192-a277-099739b8931d"], "da09c33c-a2d1-4eba-b8bb-9cadf83a607a": ["45015a61-5cbb-4192-a277-099739b8931d"], "84e11e40-3503-4afa-9c0b-3bd17751dc5f": ["45015a61-5cbb-4192-a277-099739b8931d"], "fb6baf2b-0c39-43c6-8a23-5a6286b57d3c": ["45015a61-5cbb-4192-a277-099739b8931d"], "5ae8a14f-7af5-4fe1-ad8d-daa3b64b7412": ["45015a61-5cbb-4192-a277-099739b8931d"], "b86dc33e-c44f-40f8-9201-6e77c52b2d3e": ["45015a61-5cbb-4192-a277-099739b8931d"], "44365a55-f06d-44c0-86f8-5e766884f298": ["45015a61-5cbb-4192-a277-099739b8931d"], "cb8c33b7-5edf-4f02-827c-e51d99fe8c20": ["45015a61-5cbb-4192-a277-099739b8931d"], "57d78d01-d80c-4a0d-bffe-90e8c5435146": ["45015a61-5cbb-4192-a277-099739b8931d"], "a7190971-83cc-4e5b-ad51-d097c2d2416f": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "31534a0f-52cf-452d-9a29-f8ac561198f7": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "2c413bc7-6f0c-41fa-ac50-d2f146083ecb": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "f493ae8c-e533-406a-8fc5-5c6bdaad070b": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "589fdab2-8712-4a25-9c59-8f26e281a6dc": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "3f801419-15e7-46cf-8d7a-d2709b6b5792": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "2acf1a52-6ed2-4c87-9d62-518df86733d3": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "68b6149d-8a90-4b40-b32e-6e844ff70543": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "bfe3017a-f8b1-49b4-8022-1d6c6b18b595": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "66d721dd-e056-41a0-9177-35ec1a02f2a3": ["0614bcc7-9305-496b-bd3e-a1802f15a833"], "7994cad7-21e2-4e09-aca5-f56ef1ae8a5a": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "47e3ea93-fd88-4971-8203-e36fc3545d64": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "de90f1a6-2df7-4240-944b-87dad8ddb4c3": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "b0f19328-390f-4526-9bd5-58e3217d2939": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "3a98c1d4-e98a-4339-a8d8-666a62f395d1": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "3e20c7ee-70ab-4ad1-b14f-32dfe44d0c85": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "a13b0ccd-e3a9-4ab3-afc9-d9c863fbeb66": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "d904e7ea-ad3b-4610-ab7e-0e02f95f7b78": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "2fbedd50-8ea8-4da8-a425-802da3096391": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "1ec42e53-7159-49a1-9293-810d7a184a7f": ["3ba1fc2a-6637-4894-9ed8-e02f197b2e39"], "a0842cf3-00a4-4746-87d9-6ac45ada3ebd": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "b76e72b6-358d-4943-98de-cad3c4c392ea": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "4f6747b8-9ad2-45ed-8979-f605d5765654": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "8effaebc-2db1-4d37-852d-5b080b55344a": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "ba664a46-2556-466b-94a9-e6c9881f6bc5": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "412a6863-0745-4d66-8b48-137fc5c03a6e": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "a96cda8b-8d4a-4085-b297-fbf4fc69d244": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "c4c103d2-5129-4be7-bf42-4971da92e49a": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "4a2850be-c31b-461e-9165-f82e606b10ff": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "398d3a34-f7cf-44c4-81ea-d314e9f92196": ["a0ca0e56-c1af-4533-a94f-456e90ab52f4"], "9b1e7e8a-3e78-4c98-a0df-24b37ca791cf": ["f2565463-e689-404b-8348-06ab3fef9462"], "0a69cf57-ac9d-423f-896d-b9e4e02f762a": ["f2565463-e689-404b-8348-06ab3fef9462"], "164c840b-8cfa-4c20-a441-123e95824ffb": ["f2565463-e689-404b-8348-06ab3fef9462"], "e20bd84c-d838-4049-a2df-fba4b9d06ea1": ["f2565463-e689-404b-8348-06ab3fef9462"], "387e47d4-c086-4a07-aee6-28a9fa60cad7": ["f2565463-e689-404b-8348-06ab3fef9462"], "3700f1e4-cf35-4c27-b2c3-9101bf5d8dfd": ["f2565463-e689-404b-8348-06ab3fef9462"], "8c76e76d-83c9-4a57-a7df-b1c0e035ef8b": ["f2565463-e689-404b-8348-06ab3fef9462"], "7d2386c3-a371-45d9-965f-2c7a9c11567e": ["f2565463-e689-404b-8348-06ab3fef9462"], "badd4da0-e5f0-426d-ab2a-1019b483cc97": ["f2565463-e689-404b-8348-06ab3fef9462"], "4da36b86-9edb-4192-abf8-385aa94c36e8": ["f2565463-e689-404b-8348-06ab3fef9462"], "f377e766-01de-4a90-9986-ecb3ee8d61a9": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "4490f651-668a-4e60-9931-3c6e393d09b8": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "68e419bb-fa35-4890-98c7-4c8a11d9c78e": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "838be5c8-7e39-41a0-8cc2-5d54526523b5": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "8540243f-924a-413c-bdad-29d5f1240c58": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "c05097c0-b8be-4d9f-94ad-f28a74c0bbf6": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "ed30f33c-f72f-4ca9-a986-e78ea5a84f34": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "078872ce-c620-44bc-b9d9-715ca524c82e": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "c9c8531a-c33d-47fb-9c20-0a74872c9cd3": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "bd0d1036-d121-46a7-a63d-633b1fe52539": ["e24cd6c2-55a9-4c13-994c-7aa012bae660"], "64ded336-d26a-4bba-9264-29a782f46b17": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "a1524a0a-e5c7-4b05-9291-a9f012c2f8a7": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "69cb94c7-5d81-4222-9d53-052f0edd2720": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "10493f76-3a15-41b8-a590-28e28ca5d22f": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "0aa175b1-cf6c-4db9-95ae-790c02ed0420": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "e8a86a82-58ef-4852-8a9f-46998fe000e6": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "e0c9a867-2ae3-4d5e-b74c-b2145e48b693": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "f497ff9f-1f6b-49ff-92b7-b97bbf4e0d33": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "82c14235-06d7-4a04-8d16-eefd798350a9": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "6e3592cd-fc1e-495d-9976-3cea02adf35a": ["695adda2-bf04-4a04-9e8d-646121be4bbc"], "bb8fd71c-8305-452b-bcbf-823fa999f419": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "84ddfcae-5a84-4ca2-b23f-9f2993a78864": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "6afc1f62-ea09-46aa-a9b2-a2231efa41db": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "50dbd6b6-2f67-425a-8549-e938e1e443c4": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "33c03e38-7064-41dd-8c15-2570eff220f3": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "c062971a-0cf8-4ef1-93b3-5fca436d15d4": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "3664af6f-9a6b-4750-9157-5b8c10c049bf": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "57f12af2-eace-4916-bf6c-caa521375c02": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "3421dbb5-bc3b-4a71-9aba-9794e6c02c61": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "1ea5b81c-aa5d-4fad-851a-f32df353f699": ["fbac8185-c770-4b0d-bb3b-cdb1deafa299"], "bb9c137b-e957-4d1b-b681-1150450070b8": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "b0543347-b171-470d-ad58-452023d32540": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "420598ea-3a86-4086-a5a1-8ca8b6a218a1": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "b2067f9b-b092-45f0-9fef-ed8c776f2ec2": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "9d318383-e177-487e-b4cc-8d2e8e8fcc13": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "78e29c0f-c807-43a5-87c1-f4c5adab2595": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "1d9240a9-0d09-4243-869a-45ed6bd3e9db": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "0dd67cb2-1fd7-4f2a-8672-8384a5b08a73": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "558abc03-0ada-41ab-b040-b6a193e5b238": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "a03bd0ad-5366-46e0-b712-e2748c71adf5": ["6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37"], "7d190ccc-08eb-4795-975a-b70b5b5615ec": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "36548052-71c8-4515-ac0b-ad22a66ff05f": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "9ac8b717-1ae9-40dd-a3f6-5d158e342089": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "6b5a119e-29e8-4818-8d39-1e9518975f3e": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "5a0058ad-c9e9-4177-8811-35405c083ed1": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "a1134775-18f2-4e09-af57-88092617afdc": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "1dc6b0e5-60ff-4939-b09e-5d09b8373385": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "b0b4a730-7836-419d-9d06-8a4e167aa24e": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "3e475a7b-f183-48f1-9831-ecd371bfa399": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "91cdf914-e675-47bf-93d5-c257b52a9a51": ["54c7a44a-c1ee-4ef5-b189-1ef365323d0b"], "2e1a0ef5-cf28-488d-8c12-76d6037e5541": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "ea3e3a56-e8b5-4d12-9369-363db39673a1": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "dfaba32e-19b0-4171-9fff-cc08fe20d751": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "399b8802-887c-4184-b0f2-1a2bb5d07543": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "9dd9b476-9945-476b-9194-d8c83c4f9121": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "3ed61f5b-fb54-4c4c-b6e5-0883b23b939d": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "dc4b76d1-b12c-4871-9175-8b56b5ad43ec": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "a2334897-6fd9-497a-98b9-6686d453c2ad": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "0c5ef9a5-ec71-4016-8ae6-d82d2f95a1c5": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "7a2b2ba9-a488-4c90-b169-70168efcc780": ["1742ef3f-905c-4f93-b448-e55fe12fd9df"], "3fd5f3df-b1eb-4d8b-ab8d-07dddcc7fb48": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "3be1b1c1-3098-49a5-b204-34bfa97d73b3": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "7a009e96-556c-4842-92a2-a7b32f8146b7": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "25fac591-48bb-4ed2-bded-de52900f6c77": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "ec3564d2-3ce8-4426-b4d7-b66f34d730e8": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "8e35ecf3-062b-4a03-810e-8eae31b35e67": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "a0f073dc-3521-48db-9264-58f3bd671c3a": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "29b2f6cc-206a-4bd6-98c7-e2c851e3a8b2": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "a86dc0bf-80f8-4a7b-969d-cf58b6502c4d": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "481d542a-46dc-4b5e-8bbc-a6e1094820a1": ["ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500"], "4a6050b2-f6b6-4ac9-939c-2e1188e92e89": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "72d17156-9c97-4463-a90b-1619a6608c3d": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "85b5625a-d5ea-45b5-b7b3-e66d4556da73": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "8b7eeeff-ae14-458b-978e-5c1fc9573219": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "7b875bc5-cd9a-4069-bcfa-6de556eb6548": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "ee228074-989d-4a9b-bb46-df0a26dfda27": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "89b66459-d623-4a30-a195-8da50e72a096": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "9a2bb4f3-2d1a-4689-9dfd-70c1a4fcfade": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "ea968134-16f4-4d5c-810d-6049e27309d5": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "a62f4790-c903-4ecf-bd87-b92068d5138a": ["a4951da4-180d-4dde-b64b-6b70ab5c71e7"], "9e50e870-f706-492e-b58e-93871354422c": ["573cc872-ae49-416d-8065-83f04ba76d18"], "56433327-6403-42b5-bf0e-0d4ac4b47722": ["573cc872-ae49-416d-8065-83f04ba76d18"], "bfb3f439-fb7d-4a61-b36f-ce7e06ea95fa": ["573cc872-ae49-416d-8065-83f04ba76d18"], "2e31e7e4-1966-455f-9c93-2f0416756f97": ["573cc872-ae49-416d-8065-83f04ba76d18"], "552e3fce-d322-41d0-a8d1-3af4d9b6d239": ["573cc872-ae49-416d-8065-83f04ba76d18"], "9f9d57e8-6574-422f-a98d-9523410c912d": ["573cc872-ae49-416d-8065-83f04ba76d18"], "6f686792-3271-44f2-962a-d0c914f0a618": ["573cc872-ae49-416d-8065-83f04ba76d18"], "608ada78-40e7-49ac-a75a-e38f46e3d296": ["573cc872-ae49-416d-8065-83f04ba76d18"], "44ee99fa-4fa1-4199-a49e-31e3f890d5de": ["573cc872-ae49-416d-8065-83f04ba76d18"], "b9dd8dbc-8dcc-4aa5-ae17-13bc77130913": ["573cc872-ae49-416d-8065-83f04ba76d18"], "f49391ff-b450-458f-8e1d-f4b2b768a48b": ["9322efea-331d-4996-9474-cccaa4df697d"], "3c010abe-44a4-4a61-9b06-c9c60c98d68b": ["9322efea-331d-4996-9474-cccaa4df697d"], "815c1373-abe0-4296-ac84-38501b0c77cd": ["9322efea-331d-4996-9474-cccaa4df697d"], "9eec3afd-f85a-4f17-a4d9-2c0989096d84": ["9322efea-331d-4996-9474-cccaa4df697d"], "b795e850-79da-4183-9560-e011ed68a0a6": ["9322efea-331d-4996-9474-cccaa4df697d"], "0ed96dda-2f6e-4335-86b8-3a5d06011bbd": ["9322efea-331d-4996-9474-cccaa4df697d"], "2a6ac783-bbc1-4474-9864-2029c75aea04": ["9322efea-331d-4996-9474-cccaa4df697d"], "3e13f6fd-312b-432b-9012-933f8b0a169a": ["9322efea-331d-4996-9474-cccaa4df697d"], "7b11f5c5-827e-4e70-85f7-d83417de66c7": ["9322efea-331d-4996-9474-cccaa4df697d"], "fc17d202-010f-4306-916d-724cc681a109": ["9322efea-331d-4996-9474-cccaa4df697d"], "690220f2-52f5-463a-a2bc-b46c7e1aa79c": ["b4e35127-be78-433a-b6aa-b7e222864503"], "e2b24491-dd1a-45d0-a381-86ca5ec207df": ["b4e35127-be78-433a-b6aa-b7e222864503"], "56f6feff-327b-4075-ac32-9d237ff6468f": ["b4e35127-be78-433a-b6aa-b7e222864503"], "425a7779-bf1b-4757-8833-e612c9b1ea05": ["b4e35127-be78-433a-b6aa-b7e222864503"], "e03f414b-1fb3-4067-99fc-cd4b122f509d": ["b4e35127-be78-433a-b6aa-b7e222864503"], "0d06fec9-3c74-49df-bc13-1e3ac9679a8d": ["b4e35127-be78-433a-b6aa-b7e222864503"], "b13e829f-73e8-4b70-9adf-6ea6b7b298e8": ["b4e35127-be78-433a-b6aa-b7e222864503"], "ad29fd63-96d1-4788-8c4e-794dd0d3f326": ["b4e35127-be78-433a-b6aa-b7e222864503"], "cefe533c-58c1-4179-b1f7-9134e20f1d19": ["b4e35127-be78-433a-b6aa-b7e222864503"], "61db5427-3644-439f-9a42-012708bb8917": ["b4e35127-be78-433a-b6aa-b7e222864503"], "7d332e8c-9943-4cdf-8bb4-1a3b498766ea": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "b8324cd6-d521-45f3-8ba9-08ce631df758": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "05f09f2b-477a-4dbf-aaec-b5af1923c6b1": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "f022ec7a-8358-48d6-824c-0c26e658b35a": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "469f8389-8d8d-4b27-a69a-f16514c85c43": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "5e39df0c-36a5-4afa-a2f9-6c5e341815f9": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "8d3feca0-94b3-4b4a-a785-23a3b9b9a23c": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "3a3c3db2-4132-4478-a847-b36068c56e01": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "15467e60-3777-472b-bd0b-47bf9f467a78": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "9a7b141e-920a-487a-a055-e1e9f4f1b2e6": ["20f0980c-bd47-44c4-b835-82832c82da3b"], "ab11ab5b-818a-4880-8e91-ed269bd1af2c": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "df1199d7-da58-4fef-8f9f-07735134932e": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "5958036c-3258-4303-8b1a-1622e068421a": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "5a32b7cd-ddd5-4513-abdf-563e5cd76816": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "fee5f65d-cb22-46ee-8464-222e373ff06b": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "4a58ee08-f3de-45fd-907f-e153459dc81e": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "d42d824c-1120-43e8-9235-7572b76c40a4": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "4a312aa6-5cb4-4c06-8984-88e91ac45dc4": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "d04730cf-a6d4-47bc-a699-0e3df94761de": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "14564a8a-cdac-40c0-a780-78be519b65dd": ["ab80b8f9-d569-40f7-a6e3-8d305b06893a"], "569527ac-7ac6-431b-875e-d434eb402c21": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "8389f03a-5744-4818-89fe-c077ff7f056c": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "1ccccd94-2c43-4041-a4bc-08e24d811d35": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "93b0c907-004d-4f32-8b53-a052b75beeff": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "a7f6c8ab-1b04-4dfa-9f57-4235258646b6": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "526267ce-38f4-4e58-b51d-6ae5ff0cf0dd": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "b79cbec1-977d-4eb4-83ae-3d2d9ac42e08": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "9fa37ff2-7f29-4880-94c1-06fb3c6e92d5": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "93413343-a62f-4859-8ee9-a611536c2b93": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "48834fa9-9abe-4fae-a27c-636139f88c50": ["410d1dad-982f-4e6c-acfc-8c4393c47fb4"], "65958d5c-13fc-4718-8038-77c445872b42": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "ffe21033-700d-4f6d-aace-0e85d2620745": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "8c46db62-ddab-49e2-b983-32432a14f583": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "4a376bd0-31f5-4955-8cfe-1e0e65d8f7e0": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "7e329714-1d9f-4b04-926d-a8d79ddfdd9d": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "7d478e4e-d640-4a81-a971-a2d88d655d78": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "61151ee6-a28e-4a40-97ad-d1edeee36eb6": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "b3576af0-4c36-49f5-a0e8-247d2ec6f65d": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "b624d6cb-faa1-4468-96a8-136321416574": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "8ecd7751-64ef-421e-a44c-3231886cdc17": ["31f4ccdf-832e-4dfa-96e7-a006d2b27a92"], "632ebbb0-13c7-4a44-b49f-bb277513d09c": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "e06e7091-ac74-4abc-bc2b-da388c1149b9": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "64518426-3d37-4769-9d8d-826b0577f97a": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "dc8b4a4c-213f-4f2e-932c-a9a6f8cdadcc": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "1bcb29e6-07a9-43ff-a455-16802ef28195": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "19e66fef-7521-471f-82e2-f6884868acaa": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "488f2216-2ff2-4ee4-9851-37299ad2c6c7": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "64edf8a2-3462-4be0-976d-1fddeea48012": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "b841ebb5-559d-4fe8-881f-c79ff7004ee3": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "5a0bd7f7-a52e-418f-9ada-e1fe84848613": ["2542b1d9-a513-4744-a0e8-2073dd6969bf"], "64a056c2-8ae4-4280-874c-4a01461cc3e4": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "d69e8d1e-a600-475f-9486-61b3c75a2df4": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "2dbbbadc-b880-41d0-bb6c-e682dacb3df7": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "2827fc62-14af-4e2c-b663-ba391d6d25ba": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "7fb6fed7-6aae-4ccc-b597-ab7e62e9b21f": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "44938ddc-9358-4c82-94f2-840413742d55": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "61da46c4-d66a-4522-bc97-af4fc2456222": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "3f063cef-02a0-408b-98c7-56446e0fc13b": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "b4c11f6f-dc78-45df-9b02-6c9e97e63441": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "a88f3780-0edd-49b8-89f3-212ba3ce8b3a": ["97dc3796-6c1d-4071-a96e-444a6e76f758"], "ba2105e2-73d4-4990-9944-6d240e81707f": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "17ebc163-d3e6-449c-980f-7ed26f839df6": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "aab856e9-8ff2-46d9-893b-b6d07ab393c6": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "cbad6951-7c8a-46df-bb70-c92a3f311a56": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "79e16b79-9d36-4087-8659-146694974dfc": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "1b05d0a8-49d2-47be-ab48-25a75c6e77f6": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "a980423a-f92e-407d-8954-c365cd973ffd": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "70182aa9-6598-41d8-94c5-c86d69052289": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "c0cdbc54-4086-43e2-9a8b-0e45b61ecc4e": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "732b7212-2022-429b-9d2d-ea2f1ff7d3f2": ["a15c2582-15b2-4587-b765-f5f0457f9f91"], "9875b4af-3367-4c2e-9a31-faf2702adfa5": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "6fe66997-2216-429a-9ef4-7fe53eb306b3": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "36ec6e8f-35e5-4186-b6d7-f265dcdef11f": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "7d63e0e7-2c2e-4330-bdf2-4c7e114b7805": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "bfa45ef0-bea8-47fc-b980-0b69ed2e1008": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "6df2f1c4-ff8a-4107-a463-f56276352018": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "51560d91-654c-4d78-bfbe-98c8ec9efde0": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "9fde8cf0-b0b2-45f7-ad80-cadc55db3835": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "b5e1a791-bb5b-42e1-b095-4eddeb8cbe89": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "3d828c80-9b73-4da4-a5ca-9009d879a58e": ["93c33081-e2b6-4d06-afb4-8419e8ac265e"], "8fd42408-bbd3-4239-b478-4be231789ff6": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "41478b94-524f-4cb6-a05f-d28d7bf4f9c5": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "607f4dd7-ca7e-4ee9-b4a8-f5f9760aeb1f": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "dc9d51d9-e01e-4c24-8bc5-f2d310dd159f": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "d012d297-b626-462a-868b-099e4ebe5d08": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "19032e5d-0bf0-4167-86e2-54f143702e03": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "46b67447-c9d2-4689-b32c-eb292015ef9b": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "bbd073b0-20ee-4d44-ba79-37e22da7f19b": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "9fd04f5e-f2a7-4d3d-b5fb-3c5182e8df33": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "4a50706f-2be3-4191-a799-d03cfb6bd582": ["07584cae-264c-4ad9-abdf-caa426b1af9d"], "ef610b4b-0ce3-4569-b444-685ef6d6f71d": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "3332f97e-e957-4370-b156-5b2e74510318": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "38324b8e-0942-49f8-88c3-e261660ac848": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "9676f8ec-7e8e-461d-a77e-337c20095d6e": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "07c06943-1c60-4690-a161-f2c3d04c88b7": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "f8088e52-e35c-4f3b-8479-e530e41e83c5": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "d218c959-31ff-4b08-8feb-b6cd5b7f9808": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "fcda6d4e-26ec-4b88-b9e2-33410629db75": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "358806ca-03c5-4219-8f5d-7d6bd50c4a51": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "95dfc992-0553-4499-94a7-d42021d7a805": ["354bd225-ca88-41ce-8470-abeebfc7da3b"], "3c860073-dead-456a-9dd9-746a0c1f2cc0": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "6e31af36-593c-4b4b-a439-f6361f629e60": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "bcac432d-f310-4e4c-8294-803789746673": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "59c27c92-ad44-4eec-b9a6-c79134babf4b": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "753f8d63-ebe4-412c-8c70-4b6defdfe6bf": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "838c69d0-ff96-462d-a5ec-c67efa2bcca9": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "c7ec0382-b12d-4075-915b-a29a72506cfc": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "22887c27-fdf6-433a-9832-9a9115f9683a": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "fdad09f6-1e41-4f9c-9715-fffb065295c1": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "e1115d17-d43b-4b36-8792-e50c67b9bb8c": ["9cd96ad0-9958-440d-ac65-a2b4614d7539"], "ebd1eb6d-41f7-4335-b070-d78501a2156e": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "64a7ca44-9bdd-4f2f-bdb1-baed69c8d781": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "20367a8b-c64a-4864-bc1b-3d280f351a0e": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "d9858772-aa90-4a5a-be0c-fc5b5b333bd8": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "57ce6697-9b12-431e-8391-82fdd25840ee": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "39f694c3-4c98-4d86-b3e2-28fd601599af": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "14fb21ac-2111-40ac-bda5-782d92f9b5a6": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "fb146223-b6a8-49be-bb4d-631246b9b613": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "b11ec9e4-e31c-4b32-b7d2-e9801186fd91": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "8fd37fe4-2c42-4678-bce5-922ed9d2dae5": ["73903c5d-1dba-43d0-b85d-443cf758da6c"], "4457a37e-6d0e-411d-aea4-b1c32f5a127d": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "a7e21fa9-a576-4c9c-b7b8-cf1dc782ba81": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "bdaa7693-aa11-46bb-a39b-d7e351b3fff2": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "bc7e0482-14cb-4e29-83b9-d34e973bcfb4": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "06906bd2-d444-4b50-86fc-e7d4b6caa7be": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "e7276fb9-5e30-4e77-a07f-98fae0b0a30c": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "416fc8f9-4237-4877-a514-88013a360abd": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "35cbeeef-7459-4fe7-bb33-74fc872c987a": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "e5be6c98-b642-4a1b-968b-8d3d71eb0fd3": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "f6636c23-4d93-4bb1-abf3-5ef8d69586c7": ["9a9a9238-61cf-4a36-afef-3d7c4632f4ae"], "6e27bf91-4131-4e12-b6fa-cc6d5997f88f": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "ff520072-afb0-4b73-9f62-29ac419bf82d": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "4e3cb88c-db89-46db-bfaf-6efc6c06e573": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "1ba43e0c-45c7-42b2-9dcf-0edbb0e71b32": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "82d470a2-1e99-4a76-92f8-281573661995": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "2e5d1e86-46b3-4528-b9e2-cbe1e372e99b": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "c53e6933-dd22-42ec-865b-0cfc8a4a6f10": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "60fdcce3-24cf-46ff-bd37-71e20af72d5b": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "35c63a1b-39bb-4e6e-87d1-597fbe552fb0": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "7e800537-d1f9-48cc-b514-dc45ba1e805f": ["3ab19783-41cc-4d8b-9dd5-65034b1c3360"], "8caffdb8-0e56-4747-8e14-5076b635e19f": ["eadf745e-b710-4384-9248-a774a534b3da"], "7a804990-826f-4344-923d-164698caefdd": ["eadf745e-b710-4384-9248-a774a534b3da"], "1fefa53a-88bb-48ff-8f41-3aeae62bce74": ["eadf745e-b710-4384-9248-a774a534b3da"], "b4eb2632-537c-4adb-8f56-5f7392106c81": ["eadf745e-b710-4384-9248-a774a534b3da"], "3e0d09b6-f68d-4dc8-bef2-d2295751db89": ["eadf745e-b710-4384-9248-a774a534b3da"], "b9023848-3972-4a1d-9214-477117a0d3df": ["eadf745e-b710-4384-9248-a774a534b3da"], "8b69e61b-4fce-477c-8e17-3871ba07c8ca": ["eadf745e-b710-4384-9248-a774a534b3da"], "3003c533-bec8-424a-a54a-5bbe26b84144": ["eadf745e-b710-4384-9248-a774a534b3da"], "6e59a970-f171-4ff5-a512-b3d6396bb916": ["eadf745e-b710-4384-9248-a774a534b3da"], "30812fe9-ef65-459a-9731-9e837e6be7cb": ["eadf745e-b710-4384-9248-a774a534b3da"], "8abc3501-531b-42b8-9cec-6ed049ec1079": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "473df226-ad88-4f10-8241-ea6b0ab61506": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "a74c77b8-07cd-430c-8aac-7b1585bc505d": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "c4688abb-c2c2-46c4-86b1-0261e15da71b": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "003673d2-5ecb-440b-80be-5cd34bbaa356": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "cb249eee-5357-4ed7-99fb-8da55348c57c": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "af4ab8cd-cae8-4e7c-922b-8e193f1f5383": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "8fbca3b4-2fde-4348-a2e2-1dede9fbabb2": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "39179d19-89d2-4969-a2e9-b1c27831cd8f": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "4e933b6b-f6f3-45a9-aa9d-c54fc2d80ade": ["95c4db5f-ccbc-4cb6-937b-2e6690f89320"], "7a230dee-19b1-4409-ac9b-4fd593416a92": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "e0de42d1-9769-4cec-a48a-c883f8a5c316": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "1bbc381a-2090-45be-8cad-d9d9d862608d": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "93fdcd7f-ecce-4c63-80b4-7e3b65a0ec49": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "e3a6aac7-0d0f-47f2-a09a-5a902286de83": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "9343529d-ade3-4b58-8c53-8a84ae0b6620": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "1402d638-7b8d-4525-9c64-93611b63063a": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "2b45e986-94f6-441b-a5b6-ad7d5710929e": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "60e72442-44d0-4e8e-8f79-1d3e0616d45d": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "5f17138e-15e8-4f1b-9538-d62720ee1357": ["28b833ba-4e82-47fc-9206-2b9e593d4569"], "5c5c28fe-f220-4cfb-b5dd-59fa4fc0ed70": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "1ecd21e2-5962-46c3-8ac6-39f368d94cb0": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "9a291c46-0178-49d0-a45a-eb38560f5715": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "df46e7f9-256e-4668-a774-41a48f74c576": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "3bac4694-ad52-44b3-a9b8-bdd45f2f3526": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "caf5e917-228f-452a-aa7e-5e4722643f18": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "d769a613-d040-42c5-b109-da27d5f9787e": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "2f8b1ef4-df6f-4ba6-9615-e1a85909f4af": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "ace15994-5883-41c6-a269-9979b4205fbd": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "e1eb277b-b995-4e63-a1eb-9180ffd6f139": ["38bddc3e-d00f-4b32-8f06-51bf7ee089bd"], "076a1fc1-5b6b-46f0-b2b6-48cfac0d15e7": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "64993f9a-4b76-4a8f-874c-3d273367f43f": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "153b18fe-97ea-4516-a51c-8aee0b2f8f07": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "99dc15b6-9a6f-46c8-ac5b-2de91142fa28": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "ee485024-fbdf-4772-9401-42eb2a5b5603": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "94cb95a8-ec57-470d-b1e8-3959d8963369": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "af4cbd1c-b744-4bab-97a4-ec1f323b9260": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "0325a366-a9b0-45a5-87cf-7107248c17f6": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "a2a6f02b-4f15-4743-a2b5-3f2025414a01": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "835ee7d9-eea0-48db-a932-b70328a2b80f": ["2524a0ed-cf1b-48e3-a937-7968cf441df0"], "2184e32c-4f19-4057-964f-48f6090c4d64": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "426c71f4-c8da-4c45-b83c-0cb9d64407ac": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "4f9726e7-c892-47d5-941c-423f9a84d608": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "89c9ee09-99a0-44e8-9140-622366b15748": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "02099e7a-c0f9-4fec-8ac9-5f7fe1eab08c": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "38cff01a-3e4d-4f0d-84b0-6e9383d80e02": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "2c7799cd-0027-4338-a539-47cd43e086c6": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "dc59187d-f64f-4de6-af9d-5e6e93133b18": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "6f52455b-2a16-4f6c-94cb-d9ba727691af": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "16af67cc-0dfc-4203-815d-8b6bf5ef6277": ["30631e3d-b2e2-4cd1-a586-710dc192aae0"], "555d774d-d331-4349-951f-af19f683089a": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "91ef15f4-5185-4c16-9ef0-4bbb37697969": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "1c1560c2-164b-4de3-9972-65d5c636bd55": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "b0939336-ece6-40eb-82f5-1ec5f0e3b997": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "e8beb28b-df88-4857-a896-9492bc49fe69": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "d799c0a7-a358-47d3-8fc2-0b8da05d95fa": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "4f6a16c0-eb08-4d81-9e2d-968a9c7bbe5a": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "8029f69a-cddb-4697-8f5f-a117433fee74": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "66f00bc6-ab1d-43e9-9153-e2c75516e556": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "5d336249-8464-410f-8952-8b2a4b802ba1": ["4495ecb5-1247-453e-aa68-1f4395d7fa9b"], "cca47a77-0520-490a-ba33-7fc205506cff": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "193d2e40-1043-47cd-a7ed-4c589f9ed491": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "1377b78c-14b8-431e-bbcf-a61ca726482b": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "b20bfb27-3464-4d36-af79-19e0efb7136b": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "b63ce3cf-4572-4655-8f8c-f0590a07b1dd": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "518edd21-747b-416c-a5e4-eded44945fd5": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "f29882f4-75bf-4ba7-8c1a-4908f184296a": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "0477b1d1-9503-4a65-9872-d4e96a8ba1b1": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "cb0e5e06-fc4b-4849-a9f5-cc5a5541d9c4": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "675faee1-4b6f-46f9-9004-bfea75569eee": ["6c879ac1-2edb-4dc7-aa39-4b70b06464e0"], "e3b5b92f-9f7b-4c54-9240-dfaff35e382f": ["35c66776-cca0-4456-a747-9298ba25ad21"], "50ddae98-3e18-4f13-87df-bdf5c87c3e49": ["35c66776-cca0-4456-a747-9298ba25ad21"], "857942ee-2e35-4394-93cc-4a1070f88dbf": ["35c66776-cca0-4456-a747-9298ba25ad21"], "039a84a0-ad22-41a4-9569-a900c59cd915": ["35c66776-cca0-4456-a747-9298ba25ad21"], "730583a5-c594-465e-b9db-d94372af5b92": ["35c66776-cca0-4456-a747-9298ba25ad21"], "a399efb4-2fe3-4b7d-bb74-468cb88c6692": ["35c66776-cca0-4456-a747-9298ba25ad21"], "25b7a363-4a6f-4e92-bf19-820353d20eda": ["35c66776-cca0-4456-a747-9298ba25ad21"], "e89b7e4e-9d0c-49e9-ba4b-d96fb7f38d1d": ["35c66776-cca0-4456-a747-9298ba25ad21"], "43939503-05a7-4f81-a001-b61fae0e6cf3": ["35c66776-cca0-4456-a747-9298ba25ad21"], "741c8a8a-a15d-4b18-b19a-a167119ce939": ["35c66776-cca0-4456-a747-9298ba25ad21"], "ce483416-c348-4911-9628-813050e4eeab": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "2ca2c34f-c0e3-4a49-bb51-b639dcabcfe2": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "361b48b8-e93c-4e16-91a5-cc5e07f1fe3d": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "ac634aac-bdc9-4b09-83c7-537d7fab01bb": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "7f844336-cb98-4ae3-b02c-d29460977665": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "09a89a5d-baf9-44e7-97ce-9150f52dcddb": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "145c8074-9a0f-4eeb-a364-f358745607c7": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "9f2b34d5-6f6f-477a-95d2-34caa90de98d": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "1e82f71e-654d-42ec-9c14-6dadbe1f6a21": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "99c21c13-0f1b-4e41-ba73-66b99a72b9f5": ["7ce689a3-f85d-494d-895c-ec39ee9cf4c1"], "3be3f630-af00-421e-9293-5b36b1457f21": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "15856cf5-933b-4a51-9b87-1a41b7989c41": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "f8eeb340-f824-4954-8bba-97e13a1eaaa4": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "41e2ede8-bed6-4392-91c8-4d6e11fd5723": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "8d49c1c5-f878-4154-9587-d0639e096bfe": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "256b9f00-7e6f-424f-a18c-4c07a21a9c0a": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "2fefdcdf-4aed-4bf3-a395-cca2e7e6f51f": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "5073c0c1-051a-4a7f-8466-425db5f7c409": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "6a8808c6-ab61-4357-826e-0a9c216b874d": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "ac5de47b-969a-4190-8637-c26262014360": ["4094bae5-4a0f-4a9a-9b12-570c472171b2"], "2d85a293-4a92-4d77-a148-01e6bdb2e171": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "fa278b40-585c-4815-898c-0b82776e868b": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "d2d872e7-55fe-471f-8a3c-444fd24feaeb": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "ce2f6805-1d0b-408b-93cb-df89733e20d7": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "f0e5e018-68e2-4930-9f82-18a05118dd73": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "aaf22726-5e88-48f2-9816-290e976a2c52": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "621c3d3b-6588-4b26-817f-34cf97d21faf": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "d4ebcdf7-e442-4f4e-ae79-bc5b7049698f": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "1ed4e09e-46b0-42fb-8f66-ad56d901feb1": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "4ee25323-db4d-4006-941f-65f9ef0a30a2": ["55a2ceec-a531-4aac-946c-eabc0b541811"], "c87b4890-2e5b-4bac-9ab2-38345179a167": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "a5cec61b-fe2c-4f0a-b43c-0bcfda0a4e61": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "cd837571-a21f-4d34-be84-a254ecaabd8e": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "bf81addc-77e6-4962-a28d-6a88b104c798": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "0a2781a2-70af-4a14-833a-be0db30d26de": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "70ebdeb5-d993-421c-98de-6f8c8a151dcc": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "c3a7e646-03e3-4fcb-a49d-f6aeb95cd652": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "b2069dc4-e694-456f-ac4e-bc92042d8f57": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "eaf0aa98-7660-4a3d-9f6e-c63c68dafdbc": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "d8553bac-b1a1-4c02-9567-b5f42bb59bee": ["c9d0711a-004e-4887-bef2-e4af6d5b3361"], "6a84a0e6-451d-4969-8be6-245780aeb397": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "7b29eb41-5e52-429f-af23-ae265e916f3f": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "f9c5155b-5175-472a-bbc4-0b5013dfe6b9": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "8318048b-c34f-44ee-a2ba-511717b16077": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "0eb2f6c5-07ed-4df1-b0ee-9772ba08a943": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "1d52cdc1-e84a-4d67-80f9-7540a2133c4c": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "7b697db4-b43e-445e-bdaf-c9ceb08eb05e": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "41daecd9-dceb-4f72-a42a-e18ea7fda937": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "fd25cb30-3e1b-4f40-bd7a-02df040ec85c": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "c5b9acfc-ac53-4dbd-85d3-f28fa4f32c26": ["a09d22ae-57cd-4d06-a965-34970ccc4ea9"], "6d74ac43-d15a-4127-8db7-cef0fdb9d105": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "c71c8dd4-63a7-4818-b770-97043dc5007e": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "2d5bfd5e-dbdc-4c0c-818e-615f91006738": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "0029c940-ec04-411f-a02c-3d2ed811162a": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "a07d446c-3b58-4fde-86ea-42a39458f472": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "c3e83063-2c07-421c-8e37-a16fff3f39e8": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "606bee4d-8e16-4cd6-a403-2f19c040f21c": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "553267de-5d7a-4b26-91a6-2066782e1eca": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "2663ee64-79f0-4c1f-8410-651d596a09cb": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "dddf4214-014b-4cae-a517-b49cd94d6dbb": ["d2d7fc43-f0e0-4838-86a3-f9dcbd56064c"], "791a3b0f-42da-4f1e-8230-e984b9e83a5d": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "2e9b844a-3516-48c7-9857-37ddb2b13880": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "41172f4c-70e5-4cec-9baa-a1647ae750e3": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "e28c299b-5a05-4d08-b18b-a133fd152cee": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "89e46e02-8e3a-48fb-aee7-332e3cb87322": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "d4f21fa8-9d60-4e63-b735-56afa6012b7e": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "6605c4f9-86eb-48ba-970b-2ec5cbda43d5": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "ed41893b-1bb9-4e32-bf94-8499f5845e84": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "06f4e666-7e80-47d4-920c-020d8e66d063": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "d8fdf8f4-8a77-4d9d-9d04-353cf7713d24": ["86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7"], "94bdbb5b-6d65-480b-8601-b46cc88fc568": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "455d3295-e1b6-4838-9757-ee00fbf6fc3f": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "dcad9f20-5d3c-4fa0-a3da-0807a6e8e29c": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "a9fffb40-a9a9-44a4-8bd7-139528a1ab89": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "d0babf07-9aad-4ae7-a6cd-85ecdcba8291": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "89597661-548a-4a06-a978-89132aba1eb0": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "f9670d38-896e-4fbc-8f5b-b9d0037c76ff": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "a0b6a03d-c670-468c-bce1-9294b6d93b53": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "86dbbc9d-37e8-4fa1-87dc-808e4eb2b54e": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "001c0aca-6ef2-4987-9528-4e438dccb697": ["4c0a384a-4c92-42cf-a63d-a48998f437d3"], "ff4430a8-31f2-4f0d-a6e0-5c8649634b27": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "e535b78c-e479-4e22-9856-3ae2f3a944b8": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "07577988-b22e-464d-a114-5a3836a9b93e": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "dbb54f3d-f426-42fa-85ac-fdf51750e91c": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "7fdfe1a9-36d4-4691-885b-a3946764874a": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "7cae1803-9318-4bf5-a8b5-7013514edc4f": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "692e4a6c-a9d5-46ca-a0d6-9a31d7f06f67": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "9601cee0-678d-42b5-a2a6-8db03a41e877": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "087c8f2b-0171-4cb4-81de-f085a3a439ca": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "9ed4da28-5f9b-4191-8f88-b3d3f17c019c": ["1a1445dc-a483-4689-a196-3fe2e2191059"], "1a5f3970-e2ad-4c5a-b2f2-464b42ba6670": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "1f4eaffe-4d1e-4e65-927f-b30861a0c252": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "7108defb-707d-4f17-be57-997ab61d8de5": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "dbfa0169-4237-4e27-a0a2-6f6add38775e": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "786162d0-4467-47da-a17d-83e793b39e99": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "2dccbfb2-4d4e-4e46-bb34-553b404afd69": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "2026e84e-f0b0-41f3-a034-e2ee060d5e42": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "af8f1310-f89e-41d6-a386-57d253594be4": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "cc1890d2-8f2e-48d0-abcb-8f49bfb2f5b2": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "2ee7b790-cd75-4629-a52f-8036fad31546": ["7e2c9d15-c47a-4453-961d-8f3eaca062a8"], "53c25f8b-f67e-480c-ab2a-951cfbd311df": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "8be2ee75-d646-4383-b30c-966d720b0fa8": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "f18be8d3-47f5-4006-83fa-afb8ce01872b": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "4a8b4330-7921-4bb8-becf-4ea1bb6b54e3": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "ce71108e-36bc-49d1-a29d-19023f609a26": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "4a77b200-1ee4-4bbb-b131-970c384060be": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "3aa2d071-ce40-40d5-be9e-8219f64dbef9": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "73bb4bbd-de45-44ae-9b2f-d51c7dc9c39c": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "e5a3d5df-7333-4465-9aef-c982135b06e1": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "92a08a8e-413f-431b-92b3-ab96effc5cfb": ["c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46"], "4e3b03a7-bfc2-43b1-b05b-f1a0c38eff6c": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "70f62e50-5170-4ad9-928d-15660d1f86b5": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "674c4e49-a7d5-47f2-bc90-df8320d389ce": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "c61b9e7b-0fb9-4e82-a7c4-64d87b67c0b2": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "2e142a47-da67-4d69-b1f4-c340f690597a": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "4e2fb6ff-c26c-4485-adb5-69581eb0a253": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "114af13c-f1ec-49c5-a9a2-f604e098e204": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "454c3073-ee6a-49e6-b85d-712601950be6": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "890c20df-f21b-467f-9126-136a56509c15": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "ff14594e-b5c4-4958-99b5-c3c354b9f502": ["73710384-8ad5-4f95-a5aa-6db21daa851a"], "4b585781-3f5c-4022-b77a-4d5874711f71": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "c3bfc377-21d8-4ea9-aa9d-28bf11fa7174": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "04ce1a4c-76ce-4640-9775-83d32fb8e781": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "ee94abbb-0f06-48b9-944e-c9be35cad15f": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "d9f37ef0-5071-4f8e-9872-db98336653a5": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "66ebcef0-85e0-4e6d-afc7-bdd573d73837": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "508213e7-c376-4ead-bb89-162fc3fef0e7": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "67c9bee6-f621-4966-ba5b-b12a16c01f45": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "061e1226-744d-4d65-8114-0fbadd9b3d06": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "af11a645-0690-4cf6-929f-476f6853ee58": ["44a8cf7b-bf66-4022-ac2c-84893ff96c86"], "d7605540-f9a3-4e51-9036-60f1c4df408b": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "379f0256-6648-4343-82bd-85d0ba551fe2": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "097159ce-edec-42b3-8e7d-f7eaa02730ca": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "80dcda41-28de-4d4b-a5f6-d968bf53fca2": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "ada88ac3-f4cc-4c8c-9572-1eef6b100f8c": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "a9cc0b3d-ed88-41d5-9b61-453f88b305d5": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "465a72fd-3256-436b-8e18-d0a099b3fc3a": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "8ffdf82f-7776-4d09-8d87-dd3b9b2d6dce": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "81883056-15e0-4dac-a219-beb17eae9f58": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "30810cb5-f31d-468a-b938-a53597ae1d54": ["3bc0010a-dd64-4488-aa13-f4356b278088"], "e7dc392f-0994-4434-83ca-5a2b95bfb984": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "d17c2c7a-b481-4a0a-8c1f-ecce10c054d1": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "1fd47749-e6b5-4a4e-9982-0d7a4096be47": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "886cbf14-7c71-4800-97a2-a3f8f91c9aef": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "c68e9ed0-dbc2-47cb-8531-79bafa03558c": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "009e2017-2f03-4ae5-80e7-1e266e24c0c1": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "7e569e34-0129-4adc-9ffa-13874c019581": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "97e257bf-87aa-4bbd-a07a-8ebb0d758152": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "78b69abb-f1c6-4398-9e4e-619d89054b29": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "9ede1912-c15f-41cf-83b6-4ac0a0be6c0f": ["9b458a26-9b4a-4d70-917c-6720e9cdbac8"], "1c3e63ce-81d0-49de-920c-d4c81fd0d223": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "729f05ea-87c5-43a9-8190-306ccc177546": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "99785c94-c276-4ed1-8039-13f1d0943fdd": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "0c4d45a4-e1e2-46e5-b8cd-59ac132cfb62": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "fd23ff35-ce12-4ab5-8f00-6522d5466c82": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "68940d08-efcc-42d0-96b0-7e26d8c550cc": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "b8ac00ac-57f4-4259-aead-6d6f608ccea9": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "fe589612-ce8a-4fec-bfb6-4a9173dff0a2": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "ae7f2e11-db6c-470f-9093-9964aba79b39": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "9ccf00e2-cd6a-4824-8e75-cb8c6bc51c37": ["4275e423-4206-4cdc-8dcf-31803c65173d"], "d7947ff9-c5a1-430f-a50d-48369fe5d238": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "da12cd0e-8846-4a5c-84a5-84fc2b21b168": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "aad8a5bb-bb98-4c8c-942a-60d55b2f3a3a": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "62e9df72-50c1-414a-85e4-ec1b02cc7b3a": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "e688d828-a12e-4b21-a574-d41223a895e4": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "d9a5744c-7e1e-4f1c-bfe4-9ccafe96cd82": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "bc9af756-894b-4f0e-936f-fc6f94db6b7b": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "c24ff655-ee05-4769-96a1-1598937bb5d5": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "6c682518-b844-4763-a6a6-0c784122c7e9": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "02b62987-5164-4ddb-b1ea-ea1c5ae2bfa2": ["6cd901b5-7e7c-473d-8147-818cf0bc5701"], "9d1e0807-b733-47c7-aab7-6db6d469a140": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "11e017f1-450d-484e-80d0-fd445c1441ff": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "09d9a596-3e41-43ef-83c1-590975022445": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "1845dce6-8092-4224-b27c-4416e17dad7c": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "aa8a4f38-7a8e-4236-a7d0-6066bfef485e": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "31b0b161-5d09-4e06-8df0-3cd3719087e0": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "eba60a78-3558-4289-b6c4-c260c5ddd08d": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "3e075c52-14a9-4ce8-a841-7081ebf2773f": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "76205606-28f9-4aa8-b954-f88b34b55c55": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "da56e9b1-2461-4b61-9841-d2518ab0182c": ["c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5"], "9c269a12-6410-4ffb-b5cc-7e74a8f47bd2": ["c0a4891f-9c78-444c-b309-abba336abc39"], "b2d39dd5-4224-4f84-9380-d8ed71a03606": ["c0a4891f-9c78-444c-b309-abba336abc39"], "607bb0c5-d732-4661-89d2-8c8968dc8954": ["c0a4891f-9c78-444c-b309-abba336abc39"], "137febbc-d1a5-48ce-ae7b-8fbe207161b9": ["c0a4891f-9c78-444c-b309-abba336abc39"], "0dad671a-1be1-4246-86ac-e3a70cf4fa38": ["c0a4891f-9c78-444c-b309-abba336abc39"], "5e1f7127-82d6-481e-b40c-00469b2a02f1": ["c0a4891f-9c78-444c-b309-abba336abc39"], "ff09d5ba-b0ce-431f-a9e6-6443aef1d947": ["c0a4891f-9c78-444c-b309-abba336abc39"], "ab935b74-ec3e-4c32-9554-a212ec3eac3e": ["c0a4891f-9c78-444c-b309-abba336abc39"], "77e27420-8614-491c-870b-1f18f4bc11ea": ["c0a4891f-9c78-444c-b309-abba336abc39"], "5c9a5427-5b74-4e51-97bf-ff3fe62e0773": ["c0a4891f-9c78-444c-b309-abba336abc39"], "ff62ad50-d65f-41bd-aee2-94ee02783edd": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "fae2091d-78df-4aeb-84f0-c4796d91062a": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "1174d0d9-9f4e-48c8-b445-47e1b63e081e": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "a7594475-953f-4c1b-ba9b-7e06027df340": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "38758d4b-c5d6-4e3f-a96c-ca37a83ffa88": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "8ff16b60-4549-4d28-a5f8-d09dc421e48d": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "1e5dacbd-129e-4493-b540-f6526c84911f": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "23c3efca-d4e8-49c3-b686-8b432530b4f6": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "70b624ad-4e3d-49ea-b622-028680721234": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "53b65fc2-c35d-42d3-bd12-9411f43f20fe": ["2f83cb98-b4f1-4c32-85c6-9372b78cdd82"], "79527f3f-64a1-47c2-8080-526e87d6de07": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "6ecf170a-bccc-40bf-a94b-cdee16acc4b6": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "2a759b35-2e4f-4be9-b7cd-da0671a48a2f": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "05a5cf4b-6f21-4f0d-a726-782a1b5f0055": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "7464ee39-be9e-4591-9805-942ced6f3320": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "623fdaff-f939-45e1-b135-53865f42ef09": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "cec41fa7-ba2b-4f27-b233-a38c64d2be39": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "4e306b7e-b18d-408a-a7ff-ff6ef4bb5ef0": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "861cc15d-b987-4ae4-8da8-87774f1fd4a2": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "9e1b0b50-9c70-4f88-b9e9-1bce21f959c9": ["b83971f2-8769-4a69-b89a-4eb71d1a867b"], "312e6911-48ad-414d-b366-80e0cca7c405": ["93135040-675d-4b43-855e-a1606a30e1a7"], "822e0d55-b6d6-4fd6-b0aa-970bda988794": ["93135040-675d-4b43-855e-a1606a30e1a7"], "b39af8a9-fdbb-4957-b79a-1ff8ff6bebfd": ["93135040-675d-4b43-855e-a1606a30e1a7"], "789e4c72-791b-4dea-8f9f-92ecbfb598d6": ["93135040-675d-4b43-855e-a1606a30e1a7"], "f4b466db-87c7-4ab8-8d5b-cf86ef2d84e5": ["93135040-675d-4b43-855e-a1606a30e1a7"], "e5767722-faaf-40d4-a45c-7a1913c24ca6": ["93135040-675d-4b43-855e-a1606a30e1a7"], "e81ed8e0-0b64-4bec-b518-b4f8140f20b6": ["93135040-675d-4b43-855e-a1606a30e1a7"], "583946a4-f8da-4e37-9457-b91e6a6ee12b": ["93135040-675d-4b43-855e-a1606a30e1a7"], "63097304-7442-4cef-88eb-325c2ccf4638": ["93135040-675d-4b43-855e-a1606a30e1a7"], "e17ed95d-b504-4234-b183-12b62f58b109": ["93135040-675d-4b43-855e-a1606a30e1a7"], "3f995ade-9508-4868-9f0c-834fe1fedd5d": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "faaf64ee-40f7-4b25-9728-d640745e0a90": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "03221c60-4936-437c-ad35-65b5cb1935c1": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "d909e267-78c1-4718-abc3-4c5eacc1c332": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "4953d062-f6dc-4435-8edf-ed0f4e8baf36": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "a1f064d0-fe0e-4009-ad4e-da914ad74ab1": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "90c10536-7c1f-4080-96dc-6558f5fb05e1": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "9d896814-0ecb-4f79-9aee-8558f5e95807": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "9fba01ca-f4ac-40a5-845e-7e7798ee3c4a": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "9c45750d-00eb-4f95-b273-e109238629c4": ["0e9f5217-a16b-4485-b733-ab139fed5db0"], "9dfeb337-4771-4307-8f63-5a084befadf1": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "5db1d569-a143-4b95-a780-abd6a626429c": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "36cb7b8b-ec8a-4240-932d-f4d013ec010c": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "4e1b4a56-946d-4b57-97a8-a7e7043aafbd": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "ba44fb84-6b28-4604-b4cc-4e0cb6156a75": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "709b6157-47f9-4f13-b778-73ea7b5cd999": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "b831d5ce-83d9-44cc-8451-30165592bc6a": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "48591766-4e73-4127-9689-ac3ec34938b0": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "70259660-a0cb-4dad-9af2-f1552f5c5405": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "5da3d175-a205-4f12-9bc7-b0694cc97663": ["f4d5686d-1caf-40d9-9401-c84e128b0a30"], "ba9a37a2-ff2b-496c-b75a-bea68b9b4d20": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "77397e84-ee4c-4691-9eea-95549a0f45b1": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "c4c9deb3-0d0c-4b68-a899-07733f551dce": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "2af51b13-b8e3-4522-b662-cbd6f11fa87c": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "203b4219-e580-474c-8c70-588f4c24500f": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "c20f89ba-0198-4bb6-83d4-436159a5fecb": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "beea4d1a-fc3a-4f4c-b9fa-a3c73abf6041": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "b49c589c-51d8-4cdc-93e1-6d74c67948f7": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "71a79413-b607-4093-91a6-cedb011b3b57": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "89504f33-70c7-44e7-acee-ad69679cf410": ["46776766-3bdc-4e9c-a580-130ca26e7504"], "abccae6f-f846-42a0-ae0c-e7553ed3bcf9": ["36392958-09c9-4300-9271-e4655423509a"], "1fe6aca7-365b-48fd-840b-21cafc375daa": ["36392958-09c9-4300-9271-e4655423509a"], "35d9684c-2d7c-462a-888f-687e5c1743d1": ["36392958-09c9-4300-9271-e4655423509a"], "f3af11a5-dee0-41ce-9ec8-2befaa2c4b82": ["36392958-09c9-4300-9271-e4655423509a"], "f6a36e8c-7547-40e4-ab95-7e3640f7d9b0": ["36392958-09c9-4300-9271-e4655423509a"], "fd9e9dca-1dea-4621-9bc7-a973840546ac": ["36392958-09c9-4300-9271-e4655423509a"], "86f7a45a-b678-4ca0-9231-2659114e545f": ["36392958-09c9-4300-9271-e4655423509a"], "2807de8e-8f7e-4f04-9938-9baaf2698edf": ["36392958-09c9-4300-9271-e4655423509a"], "82042ccd-7ba3-45de-be88-dd7926ed2fa7": ["36392958-09c9-4300-9271-e4655423509a"], "3f49ac16-f537-46ae-ac21-b82e769969c2": ["36392958-09c9-4300-9271-e4655423509a"], "ad65e418-3028-4caf-8dda-28cfc82b6e65": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "8a14b547-517f-4f3e-b937-5205af638a25": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "e996fac3-d63c-4fec-bdd3-645e2052ce20": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "2fff0be9-8a59-42da-9d8f-9bf01c03c3a5": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "ff4c692f-1cfe-467e-9925-e44da6bac8a6": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "77dc336f-61a8-4597-8e60-7d6a28b2750a": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "992eb21e-18d3-431b-82ee-6c8845b57e84": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "754a638c-4963-45fc-b877-baf962d095cd": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "7837880f-1aaa-4a74-a820-8b848142069f": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "83e7fc2a-5676-4a2d-9b53-fe9dc216d52d": ["b6584983-93a2-4fe2-bdca-aa01408352d1"], "da4c933d-5790-469e-8c9c-08a80e387962": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "b85c7dc7-598b-4f88-a94c-845f6bfd5cec": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "e973c5e2-042d-4219-9ab3-9357946c544f": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "3750af0a-f7d0-4923-8075-a53beefd9441": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "5d13e05d-ba03-4e6f-8b60-c6aba8ce6ab7": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "de4376c0-1172-4302-94d6-b2308ff91785": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "35b33780-ab39-406c-a73a-5af626b322df": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "2da11dfc-4bf8-44c6-b11b-bb784841bfa5": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "9c4e8887-02a6-46a2-855f-dd49cdc870b1": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "c3b4dffe-1622-4cde-b35d-0384b5fe8b90": ["dd97552e-cc7d-4c14-a4cc-1c23c836536c"], "ee6a09cb-bc0e-4337-94c7-9f98ea4e5ee3": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "b23e7b4f-f930-4e25-9deb-614c17f86250": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "40981bd2-f90d-4edb-a063-87981f5fad81": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "dc149b68-bca5-4e5e-912d-d7be8e71e054": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "d5696d57-18c7-4cce-961f-32fcfcc85df9": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "61c0fd7b-0e62-4a0b-9328-ce82b486e261": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "6c72d4a0-152b-4b01-afbc-bbec5f6e4b2f": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "8478afea-bd7b-47d0-8e3b-4ade048996f5": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "f855fd92-7288-4188-bcc8-9e9d6658cb4c": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "9363da58-a7e3-417b-8b3f-f36f8cc73b2e": ["3380d726-bb80-484e-ae21-fb3eac80028a"], "c7a8a9e5-f64e-4726-a954-752b9bfa62d8": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "d4f81aa1-536d-4926-ab69-7bf7a5842d58": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "6ec865eb-0ef1-4d0f-a574-83c9bcb8e93a": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "10621bd5-3020-4fd2-8090-ee0a5a365370": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "8a985b69-031f-42d8-b2fe-c3c003206f80": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "d55c5fb5-723c-4f9c-b466-fbe12ec885fb": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "87cdba18-ff3f-490d-97f4-3679a7ab585f": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "d1a0a484-a7da-40ad-b494-d8fba48d4cc8": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "d95e85a2-6162-47ed-bc37-6e7bf3a90580": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "c8a9e9a9-ec2d-4b97-a906-cc52fec59a02": ["9fff3b99-08a8-4b76-bff6-66d7574d47c2"], "050deb8f-2d38-4b19-abce-8d42367a3897": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "e4ead17c-b324-49df-9768-46f3947244e3": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "2e67721a-7a35-4081-a350-c289a7b797bb": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "ffcf5bdb-0269-4d50-a0fc-e0f5cd5d6cf0": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "805ffbfc-6fde-4a13-af7b-92d425ac5ff2": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "b7173668-8845-4ff4-a8bf-82d58d4670cf": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "f786de70-6184-40ef-bcd4-dbc7e418ec99": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "dd27c837-114d-4771-b470-8edadae0e05e": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "f986530c-70e1-4674-a4a3-d2c2fa9f3c48": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "d6f7085a-3f9d-41d4-a61c-de22f689406b": ["8e850453-d994-4bbd-9857-bff1ebc1c2f0"], "69e6c713-aa1a-4a39-b3b8-5a9c3d192560": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "b9ded17f-ece6-4a9a-83db-3d1c1ce214b6": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "118c93d6-f413-43f8-9eb0-5322555c7701": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "378ad341-25e9-4a99-82e3-2d3b20c79183": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "3e2c5916-b757-4486-950c-91ae7c275a13": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "604a356d-af0d-4af6-ae0a-2362a5215a8e": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "70c87bb5-4afa-45a5-a192-51c40b99026a": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "6246f0e9-a17a-44cc-99f0-30db573afe58": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "e8751b61-9fee-43e4-99ce-3c16b3f81807": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "5e9a5f8c-765c-428a-a81c-62574fa7770e": ["3a4199c4-2b01-412a-b7c5-d9e83cddf2f9"], "8fd0cff6-85a3-4dba-abda-91cdb762349a": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "5cd61fe8-2405-49cc-93bf-9ad9a656887b": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "05c34830-40b3-4fe5-9695-44a5b4ddb868": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "a5419133-45d4-4c99-9e69-273c2f4058cb": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "ef754679-4152-42b3-a445-a7b434eebddd": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "832fc999-15fc-4b42-9654-2177da3e8011": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "15f4dc2c-db75-4f81-a8cf-149786ddd579": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "e4d93cdb-eeac-42f2-9ae7-a646b63d3e7c": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "ec919833-ed12-42e2-9839-457e42e0568f": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "3dd2b1f9-4fae-4eb2-8ead-27f913346a48": ["3732bb56-dad5-418a-bc2f-796ccdd31a0e"], "8eb81f04-21af-4b5e-b8bc-5dd9ddf12a19": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "7c17281e-f1f1-4b05-a681-4f085c6d7b0d": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "57455a6a-fdc6-40a8-810e-ce67e69a459c": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "cf632d03-f1b6-4eac-82f3-c0eeb5c6a7b7": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "cbedd326-5ed2-4e1e-8b52-9599a3a3ef5a": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "e199ba84-6648-43a1-8075-da2bccf8135f": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "42cbc313-aee1-442e-9707-f8a4cda1b659": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "f174193d-52c9-4008-9de8-a830c2ab8659": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "447c315e-07a6-4f78-bbc8-b783bcbf758a": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "3b87a3bd-9810-4130-9a77-016a6194b4af": ["8ef39111-e7ab-41c7-894a-f4b19f7e92fd"], "6d54ebc0-4ddd-48e8-ac25-0f3b41399d6e": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "6126ed1b-d1c5-4b25-8dc0-506e2f075845": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "4e4463eb-454a-4f19-83a0-710cb38be7ed": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "0875bf4d-94e5-4bc1-b7bd-a0618b496482": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "c4015147-e2ab-4686-b432-838d5504d8f4": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "8f24b9bc-1fb7-45c6-9731-cce8c51d4bd2": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "ce820b6d-3bab-4138-b21e-3c94a05b131b": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "415c6ec6-f835-405b-bbcc-1e0ed22ea3e8": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "7f81c5f0-eec3-40da-88f8-3a10b20297e9": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "9fa50cab-5394-4aec-916b-5e47d663a1fb": ["b4dfe088-8bb2-4a98-b320-22a7dd07c65c"], "ca32a8ce-0352-48e1-b059-40b626805076": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "c303bf4a-19e7-45a2-a17a-3a2cbb7c518b": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "3248fe7a-931f-43b9-a807-be31c9cba85c": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "2541eb4b-0325-4e71-a932-aa9c2240ff5f": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "f12e1350-6a69-48ac-9443-7bdf2fe8a551": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "d6014c0b-7667-4f4c-8c17-3d64e472ae45": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "caf9948f-d28b-4304-b2a9-7b8099401937": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "aa086a0f-d233-467a-a4d1-307fd662e78e": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "1ed1b3ca-848b-43f1-9413-11eb5e39d5cf": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "30377abc-d189-4360-8bda-75aa620ba449": ["a0be6da2-7bf3-45f6-81b9-30d724b4dd3f"], "5c610b06-5ba5-458d-96b7-23660211a937": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "f2bd0bb9-78d0-463e-9e19-206aca0bdab8": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "9a107472-63da-4150-b45b-9ee5a0f906d1": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "38c99ae1-7267-4154-86cf-10c475dfab87": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "1fdcc7e8-a5a5-4068-84b8-8860d4fae85a": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "fabe7ff6-7665-4ee8-9e67-18e690b5c99a": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "4f7e6756-8436-4690-bb5d-8dd5e1dc19d3": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "5c5aab7b-a9cb-4e35-9853-670e020c2aff": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "110bdd1f-0551-477c-8ad2-fe3771e46d11": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "d584dcfb-bb0c-406b-813d-7ed4f0a60630": ["e1358306-678d-4e98-9775-74065ea4ad8e"], "6f2db439-4624-4bad-9cfd-f22bb387bf87": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "71f13148-d4ab-498f-ad28-d437b31d648c": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "915bdb26-45ca-41a4-a5c4-026de9664a4d": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "3f1a9bb4-4f35-4ca2-8c26-6d0fe7f70c0c": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "562ffbd0-42ee-4f9d-a966-3fb61c224bdf": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "6ca39f09-5de1-4418-b8c2-8326d872cf08": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "47ffecb1-e88b-42b1-aff7-8feb6a9ed56a": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "7d104609-63ee-48d6-a2e0-efeef372ac3e": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "5d2f156d-e171-4aef-b290-48f2732097ef": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "4fed3fad-982b-4a59-8f81-d2bad9cb3670": ["702acaa9-737f-44b6-a74c-929662b0fa66"], "a063b509-7e7c-4889-9a12-8706969e2004": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "1c222789-364a-4695-8083-7136f31694e0": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "350eaf07-f263-4761-aa56-cb44ce3ea3d1": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "aa8cb59a-2ee8-4000-a39f-520d7f5ab6b3": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "55a7d693-1922-4dd6-8840-e8017fd5d458": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "088e1b4b-78b3-4dca-a354-7414b72febcc": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "4979e972-3cfb-4a75-b2e2-92fc4640b928": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "826ee64d-fab2-40f1-a6c0-911a7a68785a": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "4216a185-7dcb-478c-b3cb-1cf32485b29f": ["657cf38d-9798-4c12-83e4-12695c9d7a54"], "1f50cb43-e63f-46c6-8a05-c03b8d9f604c": ["657cf38d-9798-4c12-83e4-12695c9d7a54"]}, "corpus": {"70527d5b-6c55-4982-ae73-eb6cbd605bc1": "15 \nGV-1.3-004 Obtain input from stakeholder communities to identify unacceptable use, in \naccordance with activities in the AI RMF Map function. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nGV-1.3-005 \nMaintain an updated hierarchy of identi\ufb01ed and expected GAI risks connected to \ncontexts of GAI model advancement and use, potentially including specialized risk \nlevels for GAI systems that address issues such as model collapse and algorithmic \nmonoculture. \nHarmful Bias and Homogenization \nGV-1.3-006 \nReevaluate organizational risk tolerances to account for unacceptable negative risk \n(such as where signi\ufb01cant negative impacts are imminent, severe harms are \nactually occurring, or large-scale risks could occur); and broad GAI negative risks, \nincluding: Immature safety or risk cultures related to AI and GAI design, \ndevelopment and deployment, public information integrity risks, including impacts", "e1c576fd-2981-4519-abe0-a6954ac7bfc7": "55 \nDe Angelo, D. (2024) Short, Mid and Long-Term Impacts of AI in Cybersecurity. Palo Alto Networks. \nhttps://www.paloaltonetworks.com/blog/2024/02/impacts-of-ai-in-cybersecurity/ \nDe Freitas, J. et al. (2023) Chatbots and Mental Health: Insights into the Safety of Generative AI. Harvard \nBusiness School. https://www.hbs.edu/ris/Publication%20Files/23-011_c1bdd417-f717-47b6-bccb-\n5438c6e65c1a_f6fd9798-3c2d-4932-b222-056231fe69d7.pdf \nDietvorst, B. et al. (2014) Algorithm Aversion: People Erroneously Avoid Algorithms After Seeing Them \nErr. Journal of Experimental Psychology. https://marketing.wharton.upenn.edu/wp-\ncontent/uploads/2016/10/Dietvorst-Simmons-Massey-2014.pdf \nDuhigg, C. (2012) How Companies Learn Your Secrets. New York Times. \nhttps://www.nytimes.com/2012/02/19/magazine/shopping-habits.html \nElsayed, G. et al. (2024) Images altered to trick machine vision can in\ufb02uence humans too. Google \nDeepMind. https://deepmind.google/discover/blog/images-altered-to-trick-machine-vision-can-", "b0c72830-d6f3-465d-9db2-3a0fe4eca18d": "40 \nMANAGE 1.3: Responses to the AI risks deemed high priority, as identi\ufb01ed by the MAP function, are developed, planned, and \ndocumented. Risk response options can include mitigating, transferring, avoiding, or accepting. \nAction ID \nSuggested Action \nGAI Risks \nMG-1.3-001 \nDocument trade-o\ufb00s, decision processes, and relevant measurement and \nfeedback results for risks that do not surpass organizational risk tolerance, for \nexample, in the context of model release: Consider di\ufb00erent approaches for \nmodel release, for example, leveraging a staged release approach. Consider \nrelease approaches in the context of the model and its projected use cases. \nMitigate, transfer, or avoid risks that surpass organizational risk tolerances. \nInformation Security \nMG-1.3-002 \nMonitor the robustness and e\ufb00ectiveness of risk controls and mitigation plans \n(e.g., via red-teaming, \ufb01eld testing, participatory engagements, performance \nassessments, user feedback mechanisms). \nHuman-AI Con\ufb01guration", "c8f55f69-05d8-4ee1-a313-d97e683e995c": "MEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signi\ufb01cant AI risks. The risks or trustworthiness characteristics that will not \u2013 or cannot \u2013 be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modi\ufb01cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises", "0c405046-7c29-4c97-9cd3-b0fa85f4e26e": "https://www.whitehouse.gov/wp-\ncontent/uploads/legacy_drupal_\ufb01les/omb/circulars/A130/a130revised.pdf \nThe White House (2023) Executive Order on the Safe, Secure, and Trustworthy Development and Use of \nArti\ufb01cial Intelligence. https://www.whitehouse.gov/brie\ufb01ng-room/presidential-\nactions/2023/10/30/executive-order-on-the-safe-secure-and-trustworthy-development-and-use-of-\narti\ufb01cial-intelligence/ \nThe White House (2022) Roadmap for Researchers on Priorities Related to Information Integrity \nResearch and Development. https://www.whitehouse.gov/wp-content/uploads/2022/12/Roadmap-\nInformation-Integrity-RD-2022.pdf? \nThiel, D. (2023) Investigation Finds AI Image Generation Models Trained on Child Abuse. Stanford Cyber \nPolicy Center. https://cyber.fsi.stanford.edu/news/investigation-\ufb01nds-ai-image-generation-models-\ntrained-child-abuse", "f477adb6-0df5-47c6-abe8-a23fe8b6feec": "false, illegal, or violent content related to the GAI application, including for CSAM \nand NCII. These \ufb01lters can be rule-based or leverage additional machine learning \nmodels to \ufb02ag problematic inputs and outputs. \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMG-3.2-006 \nImplement real-time monitoring processes for analyzing generated content \nperformance and trustworthiness characteristics related to content provenance \nto identify deviations from the desired standards and trigger alerts for human \nintervention. \nInformation Integrity", "77cdea0d-2124-4f93-8aa1-8bca37d473cf": "guidance/\n54. Ziad Obermeyer, Brian Powers, Christine Vogeli, and Sendhil Mullainathan. Dissecting racial bias in\nan algorithm used to manage the health of populations. Science. Vol. 366, No. 6464. Oct. 25, 2019. https://\nwww.science.org/doi/10.1126/science.aax2342\n55. Data & Trust Alliance. Algorithmic Bias Safeguards for Workforce: Overview. Jan. 2022. https://\ndataandtrustalliance.org/Algorithmic_Bias_Safeguards_for_Workforce_Overview.pdf\n56. Section 508.gov. IT Accessibility Laws and Policies. Access Board. https://www.section508.gov/\nmanage/laws-and-policies/\n67", "ac99c8aa-92a2-4280-93c2-b20c7b858026": "yet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\u00ad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\u00ad\ning that confirms that the system is safe and effective, including re\u00ad\nporting of steps taken to mitigate potential harms, should be per\u00ad\nformed and the results made public whenever possible. \n15", "4b7be742-d2a1-4cc5-9bae-6c4cdda913c5": "Models Using Complex Algorithms. May 26, 2022.\nhttps://www.consumerfinance.gov/about-us/newsroom/cfpb-acts-to-protect-the-public-from-black\u00ad\nbox-credit-models-using-complex-algorithms/\n93. Anthony Zaller. California Passes Law Regulating Quotas In Warehouses \u2013 What Employers Need to\nKnow About AB 701. Zaller Law Group California Employment Law Report. Sept. 24, 2021.\nhttps://www.californiaemploymentlawreport.com/2021/09/california-passes-law-regulating-quotas\u00ad\nin-warehouses-what-employers-need-to-know-about-ab-701/\n94. National Institute of Standards and Technology. AI Fundamental Research \u2013 Explainability.\nAccessed Jun. 4, 2022.\nhttps://www.nist.gov/artificial-intelligence/ai-fundamental-research-explainability\n95. DARPA. Explainable Artificial Intelligence (XAI). Accessed July 20, 2022.\nhttps://www.darpa.mil/program/explainable-artificial-intelligence\n71", "2bbd9e32-096d-4731-9cca-baad231face1": "robustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21", "170bd80f-9f9f-4392-a542-6c6048951770": "use within sensitive domains, including, but not limited to, criminal \njustice, employment, education, and health, should additionally be \ntailored to the purpose, provide meaningful access for oversight, \ninclude training for any people interacting with the system, and in\u00ad\ncorporate human consideration for adverse or high-risk decisions. \nReporting that includes a description of these human governance \nprocesses and assessment of their timeliness, accessibility, out\u00ad\ncomes, and effectiveness should be made public whenever possible. \nHUMAN ALTERNATIVES, CONSIDERATION\nALLBACK\nF\nAND\n, \n46", "b3c303db-afa6-45c9-b4c4-9081fde8c735": "\u201cquantified number of tasks to be performed or materials to be produced or handled, within the defined \ntime period, and any potential adverse employment action that could result from failure to meet the quota.\u201d93\nAcross the federal government, agencies are conducting and supporting research on explain-\nable AI systems. The NIST is conducting fundamental research on the explainability of AI systems. A multidis-\nciplinary team of researchers aims to develop measurement methods and best practices to support the \nimplementation of core tenets of explainable AI.94 The Defense Advanced Research Projects Agency has a \nprogram on Explainable Artificial Intelligence that aims to create a suite of machine learning techniques that \nproduce more explainable models, while maintaining a high level of learning performance (prediction \naccuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging", "4fe6d91a-9662-4530-83d4-30121fbaa3e1": "assessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeople\u2019s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse", "65f61b92-532d-4937-a252-a07f66dc8b6d": "system is introduced or a large change implemented. This consultation should directly engage diverse impact\u00ad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\u00ad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\u00ad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation", "4bcaf9fc-e04a-4901-a926-656d64bc2300": "APPENDIX\nPanel 3: Equal Opportunities and Civil Justice. This event explored current and emerging uses of \ntechnology that impact equity of opportunity in employment, education, and housing. \nWelcome: \n\u2022\nRashida Richardson, Senior Policy Advisor for Data and Democracy, White House Office of Science and\nTechnology Policy\n\u2022\nDominique Harrison, Director for Technology Policy, The Joint Center for Political and Economic\nStudies\nModerator: Jenny Yang, Director, Office of Federal Contract Compliance Programs, Department of Labor \nPanelists: \n\u2022\nChristo Wilson, Associate Professor of Computer Science, Northeastern University\n\u2022\nFrida Polli, CEO, Pymetrics\n\u2022\nKaren Levy, Assistant Professor, Department of Information Science, Cornell University\n\u2022\nNatasha Duarte, Project Director, Upturn\n\u2022\nElana Zeide, Assistant Professor, University of Nebraska College of Law\n\u2022\nFabian Rogers, Constituent Advocate, Office of NY State Senator Jabari Brisport and Community", "be74e45d-d511-47b2-96c9-b56ff1e3ef92": "Human-AI Con\ufb01guration \nGV-2.1-004 When systems may raise national security risks, involve national security \nprofessionals in mapping, measuring, and managing those risks. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Information Security \nGV-2.1-005 \nCreate mechanisms to provide protections for whistleblowers who report, based \non reasonable belief, when the organization violates relevant laws or poses a \nspeci\ufb01c and empirically well-substantiated negative risk to public safety (or has \nalready caused harm). \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent \nAI Actor Tasks: Governance and Oversight", "56e2a4fd-2a2a-444b-83a2-afa40b190add": "which can be used to evaluate gender bias in natural language processing coreference resolution systems.", "cb3de03f-35e4-4032-8357-256a59e540bf": "helped to reshape power and give more voice to those lacking the financial or political power to effect change. \nIn discussion of technical and governance interventions that that are needed to protect against the harms \nof these technologies, various panelists emphasized the need for transparency, data collection, and \nflexible and reactive policy development, analogous to how software is continuously updated and deployed. \nSome panelists pointed out that companies need clear guidelines to have a consistent environment for \ninnovation, with principles and guardrails being the key to fostering responsible innovation. \nPanel 2: The Criminal Justice System. This event explored current and emergent uses of technology in \nthe criminal justice system and considered how they advance or undermine public safety, justice, and \ndemocratic values. \nWelcome: \n\u2022\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\n\u2022", "3c605102-fdfc-4821-b1f4-70662c729e24": "Jersey man was accused of shoplifting and trying to hit an officer with a car. He is the third known Black man\nto be wrongfully arrested based on face recognition. New York Times. Dec. 29, 2020, updated Jan. 6, 2021.\nhttps://www.nytimes.com/2020/12/29/technology/facial-recognition-misidentify-jail.html; Khari\nJohnson. How Wrongful Arrests Based on AI Derailed 3 Men's Lives. Wired. Mar. 7, 2022. https://\nwww.wired.com/story/wrongful-arrests-ai-derailed-3-mens-lives/\n32. Student Borrower Protection Center. Educational Redlining. Student Borrower Protection Center\nReport. Feb. 2020. https://protectborrowers.org/wp-content/uploads/2020/02/Education-Redlining\u00ad\nReport.pdf\n33. Jeffrey Dastin. Amazon scraps secret AI recruiting tool that showed bias against women. Reuters. Oct.\n10, 2018. https://www.reuters.com/article/us-amazon-com-jobs-automation-insight/amazon-scraps\u00ad\nsecret-ai-recruiting-tool-that-showed-bias-against-women-idUSKCN1MK08G", "79c6fb87-7d88-4ad2-81ff-29bd976fa9d3": "53 \nDocumenting, reporting, and sharing information about GAI incidents can help mitigate and prevent \nharmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness \nand standardization of GAI incident reporting could promote this transparency and improve GAI risk \nmanagement across the AI ecosystem. \nDocumentation and Involvement of AI Actors \nAI Actors should be aware of their roles in reporting AI incidents. To better understand previous incidents \nand implement measures to prevent similar ones in the future, organizations could consider developing \nguidelines for publicly available incident reporting which include information about AI actor \nresponsibilities. These guidelines would help AI system operators identify GAI incidents across the AI \nlifecycle and with AI Actors regardless of role. Documentation and review of third-party inputs and \nplugins for GAI systems is especially important for AI Actors in the context of incident disclosure; LLM", "6f9495a3-d6c2-4cc8-9349-19f6faddcec6": "AI technology can produce varied outputs in multiple modalities and present many classes of user \ninterfaces. This leads to a broader set of AI Actors interacting with GAI systems for widely di\ufb00ering \napplications and contexts of use. These can include data labeling and preparation, development of GAI \nmodels, content moderation, code generation and review, text generation and editing, image and video \ngeneration, summarization, search, and chat. These activities can take place within organizational \nsettings or in the public domain. \nOrganizations can restrict AI applications that cause harm, exceed stated risk tolerances, or that con\ufb02ict \nwith their tolerances or values. Governance tools and protocols that are applied to other types of AI \nsystems can be applied to GAI systems. These plans and actions include: \n\u2022 Accessibility and reasonable \naccommodations \n\u2022 AI actor credentials and quali\ufb01cations \n\u2022 Alignment to organizational values \n\u2022 Auditing and assessment \n\u2022 Change-management controls", "e8e842a4-5f1e-4c1f-8a44-ebd6ae57a409": "57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Di\ufb00er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Arti\ufb01cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-arti\ufb01cial-intelligence", "2a987bbc-787b-4a9c-b59c-595f5b9c2fc8": "vators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \nfrom unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consis\u00ad\ntently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harm\u00ad\nful outcomes. \n\u2022\nA proprietary model was developed to predict the likelihood of sepsis in hospitalized patients and was imple\u00ad\nmented at hundreds of hospitals around the country. An independent study showed that the model predictions\nunderperformed relative to the designer\u2019s claims while also causing \u2018alert fatigue\u2019 by falsely alerting\nlikelihood of sepsis.6\n\u2022\nOn social media, Black people who quote and criticize racist messages have had their own speech silenced when\na platform\u2019s automated moderation system failed to distinguish this \u201ccounter speech\u201d (or other critique\nand journalism) from the original hateful messages to which such speech responded.7\n\u2022", "1bf50b19-5134-4a8f-be9a-e74affa8cdd0": "primary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of o\ufb00ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nO\ufb00ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system", "3203966f-15b4-425b-b621-fd964e3bd46f": "defense, substantive or procedural, enforceable at law or in equity by any party against the United States, its \ndepartments, agencies, or entities, its officers, employees, or agents, or any other person, nor does it constitute a \nwaiver of sovereign immunity. \nCopyright Information \nThis document is a work of the United States Government and is in the public domain (see 17 U.S.C. \u00a7105). \n2", "4bcc930c-273e-4cbd-bba5-a297e9f9e390": "should not be used in education, work, housing, or in other contexts where the use of such surveillance \ntechnologies is likely to limit rights, opportunities, or access. Whenever possible, you should have access to \nreporting that confirms your data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or access. \nNOTICE AND EXPLANATION\nYou should know that an automated system is being used and understand how and why it \ncontributes to outcomes that impact you. Designers, developers, and deployers of automated systems \nshould provide generally accessible plain language documentation including clear descriptions of the overall \nsystem functioning and the role automation plays, notice that such systems are in use, the individual or organiza\u00ad\ntion responsible for the system, and explanations of outcomes that are clear, timely, and accessible. Such notice", "e4abdb97-5b2c-4dc9-974e-3357719369a9": "in-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\u00ad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and", "e1c3047a-75b7-40a8-b937-22cfe0b3f2b1": "delivery, various panelists pointed to a number of concerns including access to and expense of broadband \nservice, the privacy concerns associated with telehealth systems, the expense associated with health \nmonitoring devices, and how this can exacerbate equity issues. On the issue of technology enhanced care, \nsome panelists spoke extensively about the way in which racial biases and the use of race in medicine \nperpetuate harms and embed prior discrimination, and the importance of ensuring that the technologies used \nin medical care were accountable to the relevant stakeholders. Various panelists emphasized the importance \nof having the voices of those subjected to these technologies be heard.\n59", "48516a8d-e470-431b-ab6f-2b090a40107b": "confidence that their rights, opportunities, and access as well as their expectations about technologies are respected. \n3\nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE: \nThis section provides real-life examples of how these guiding principles can become reality, through laws, policies, and practices. \nIt describes practical technical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe examples provided are not critiques or endorsements, but rather are offered as illustrative cases to help \nprovide a concrete vision for actualizing the Blueprint for an AI Bill of Rights. Effectively implementing these \nprocesses require the cooperation of and collaboration among industry, civil society, researchers, policymakers, \ntechnologists, and the public. \n14", "7b016e8e-1c9f-40b9-b7f5-246b6e7584d5": "56 \nKarasavva, V. et al. (2021) Personality, Attitudinal, and Demographic Predictors of Non-consensual \nDissemination of Intimate Images. NIH. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9554400/ \nKatzman, J., et al. (2023) Taxonomizing and measuring representational harms: a look at image tagging. \nAAAI. https://dl.acm.org/doi/10.1609/aaai.v37i12.26670 \nKhan, T. et al. (2024) From Code to Consumer: PAI\u2019s Value Chain Analysis Illuminates Generative AI\u2019s Key \nPlayers. AI. https://partnershiponai.org/from-code-to-consumer-pais-value-chain-analysis-illuminates-\ngenerative-ais-key-players/ \nKirchenbauer, J. et al. (2023) A Watermark for Large Language Models. OpenReview. \nhttps://openreview.net/forum?id=aX8ig9X2a7 \nKleinberg, J. et al. (May 2021) Algorithmic monoculture and social welfare. PNAS. \nhttps://www.pnas.org/doi/10.1073/pnas.2018340118 \nLakatos, S. (2023) A Revealing Picture. Graphika. https://graphika.com/reports/a-revealing-picture", "498bff21-8221-4f12-b4c1-93f47337fba6": "content performance and impact, and work in collaboration with AI Actors \nexperienced in user research and experience. \nHuman-AI Con\ufb01guration \nMG-4.1-004 Implement active learning techniques to identify instances where the model fails \nor produces unexpected outputs. \nConfabulation \nMG-4.1-005 \nShare transparency reports with internal and external stakeholders that detail \nsteps taken to update the GAI system to enhance transparency and \naccountability. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization \nMG-4.1-006 \nTrack dataset modi\ufb01cations for provenance by monitoring data deletions, \nrecti\ufb01cation requests, and other changes that may impact the veri\ufb01ability of \ncontent origins. \nInformation Integrity", "ef7137f6-e670-41e6-8905-2d7c0a133bea": "NOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \n\u2022\nA predictive policing system claimed to identify individuals at greatest risk to commit or become the victim of\ngun violence (based on automated analysis of social ties to gang members, criminal histories, previous experi\u00ad\nences of gun violence, and other factors) and led to individuals being placed on a watch list with no\nexplanation or public transparency regarding how the system came to its conclusions.85 Both police and\nthe public deserve to understand why and how such a system is making these determinations.\n\u2022\nA system awarding benefits changed its criteria invisibly. Individuals were denied benefits due to data entry\nerrors and other system flaws. These flaws were only revealed when an explanation of the system", "d3ad3601-20e3-41bb-bc05-092a46287a0d": "https://www.dol.gov/agencies/olms/laws/labor-management-reporting-and-disclosure-act (Section\n203). See also: U.S Department of Labor. Form LM-10. OLMS Fact Sheet, Accessed May 2, 2022. https://\nwww.dol.gov/sites/dolgov/files/OLMS/regs/compliance/LM-10_factsheet.pdf\n82. See, e.g., Apple. Protecting the User\u2019s Privacy. Accessed May 2, 2022.\nhttps://developer.apple.com/documentation/uikit/protecting_the_user_s_privacy; Google Developers.\nDesign for Safety: Android is secure by default and private by design. Accessed May 3, 2022.\nhttps://developer.android.com/design-for-safety\n83. Karen Hao. The coming war on the hidden algorithms that trap people in poverty. MIT Tech Review.\nDec. 4, 2020.\nhttps://www.technologyreview.com/2020/12/04/1013068/algorithms-create-a-poverty-trap-lawyers\u00ad\nfight-back/\n84. Anjana Samant, Aaron Horowitz, Kath Xu, and Sophie Beiers. Family Surveillance by Algorithm.\nACLU. Accessed May 2, 2022.\nhttps://www.aclu.org/fact-sheet/family-surveillance-algorithm\n70", "d1e994ed-591e-4c95-a8d4-cf84a7926602": "should be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with", "aa34d6fa-7664-4579-90fa-4f3a7ef8610c": "HUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably effective mechanisms to opt out in favor of a human alterna\u00ad\ntive, where appropriate, as well as timely human consideration and remedy by a fallback system, with additional \nhuman oversight and safeguards for systems used in sensitive domains, and with training and assessment for any \nhuman-based portions of the system to ensure effectiveness. \nProvide a mechanism to conveniently opt out from automated systems in favor of a human \nalternative, where appropriate \nBrief, clear, accessible notice and instructions. Those impacted by an automated system should be", "8e92bbe0-47e0-42c1-acae-c650f210bf9f": "You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\u00ad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\u00ad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\u00ad\ncate user choice or burden users with defaults that are privacy invasive. Con\u00ad\nsent should only be used to justify collection of data in cases where it can be", "64a6a839-474e-4542-96a8-cb04179fdd27": "system is in use. \nLimited and proportionate surveillance. Surveillance should be avoided unless it is strictly necessary \nto achieve a legitimate purpose and it is proportionate to the need. Designers, developers, and deployers of \nsurveillance systems should use the least invasive means of monitoring available and restrict monitoring to the \nminimum number of subjects possible. To the greatest extent possible consistent with law enforcement and \nnational security needs, individuals subject to monitoring should be provided with clear and specific notice \nbefore it occurs and be informed about how the data gathered through surveillance will be used. \nScope limits on surveillance to protect rights and democratic values. Civil liberties and civil \nrights must not be limited by the threat of surveillance or harassment facilitated or aided by an automated \nsystem. Surveillance systems should not be used to monitor the exercise of democratic rights, such as voting,", "cdc23111-1d88-49c3-80f3-9ebd70b44b12": "WHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \u00ad\u00ad\u00ad\n\u2022\nAn automated sentiment analyzer, a tool often used by technology platforms to determine whether a state-\nment posted online expresses a positive or negative sentiment, was found to be biased against Jews and gay\npeople. For example, the analyzer marked the statement \u201cI\u2019m a Jew\u201d as representing a negative sentiment,\nwhile \u201cI\u2019m a Christian\u201d was identified as expressing a positive sentiment.36 This could lead to the\npreemptive blocking of social media comments such as: \u201cI\u2019m gay.\u201d A related company with this bias concern\nhas made their data public to encourage researchers to help address the issue37 and has released reports\nidentifying and measuring this problem as well as detailing attempts to address it.38\n\u2022\nSearches for \u201cBlack girls,\u201d \u201cAsian girls,\u201d or \u201cLatina girls\u201d return predominantly39 sexualized content, rather", "5741a856-e0d3-4918-ad9e-a70c052fa59f": "need to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40", "416b3a7a-8d1a-4742-ab01-b6663ae8e0fa": "54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Ex\ufb01ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-ex\ufb01ltration-poc-and-\ufb01x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.", "f6481b29-2176-4fe7-9fbc-c0b492535374": "ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americans\u2019 lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4", "8cddb4be-1854-4ac9-96d8-27f9a2fb057c": "25\nAlgorithmic \nDiscrimination \nProtections", "6dbc971e-9d84-465d-bdc4-ea5d2474f896": "data or observations, or otherwise interact with individuals and/or communities. Automated systems \ninclude, but are not limited to, systems derived from machine learning, statistics, or other data processing \nor artificial intelligence techniques, and exclude passive computing infrastructure. \u201cPassive computing \ninfrastructure\u201d is any intermediary technology that does not influence or determine the outcome of decision, \nmake or aid in decisions, inform policy implementation, or collect data or observations, including web \nhosting, domain registration, networking, caching, data storage, or cybersecurity. Throughout this \nframework, automated systems that are considered in scope are only those that have the potential to \nmeaningfully impact individuals\u2019 or communi-ties\u2019 rights, opportunities, or access. \nCOMMUNITIES: \u201cCommunities\u201d include: neighborhoods; social network connections (both online and", "dfcc2c8b-5dca-42d3-ac73-d3d69301e1bc": "bute as relevant to the specific use case. Data included should be carefully limited to avoid algorithmic \ndiscrimination resulting from, e.g., use of community characteristics, social network analysis, or group-based \ninferences. \nTailored to the situation. Human oversight should ensure that automated systems in sensitive domains \nare tailored to the specific use case and real-world deployment scenario, and evaluation testing should show \nthat the system is safe and effective for that specific situation. Validation testing performed based on one loca\u00ad\ntion or use case should not be assumed to transfer to another. \nHuman consideration before any high-risk decision. Automated systems, where they are used in \nsensitive domains, may play a role in directly providing information or otherwise providing positive outcomes \nto impacted people. However, automated systems should not be allowed to directly intervene in high-risk \nsituations, such as sentencing decisions or medical care, without human consideration.", "cf94ff83-f7a3-4d72-9761-ee38111d903e": "or that can be lawfully used to detect, monitor, intercept, collect, exploit, preserve, protect, transmit, and/or \nretain data, identifying information, or communications concerning individuals or groups. This framework \nlimits its focus to both government and commercial use of surveillance technologies when juxtaposed with \nreal-time or subsequent automated analysis and when such systems have a potential for meaningful impact \non individuals\u2019 or communities\u2019 rights, opportunities, or access. \nUNDERSERVED COMMUNITIES: The term \u201cunderserved communities\u201d refers to communities that have \nbeen systematically denied a full opportunity to participate in aspects of economic, social, and civic life, as \nexemplified by the list in the preceding definition of \u201cequity.\u201d \n11", "e65f76d3-eed7-4ce0-8cc4-9638ba09e5aa": "where it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable \nin plain language, and give you agency over data collection and the specific context of use; current hard-to\u00ad\nunderstand notice-and-choice practices for broad uses of data should be changed. Enhanced protections and \nrestrictions for data and inferences related to sensitive domains, including health, work, education, criminal \njustice, and finance, and for data pertaining to youth should put you first. In sensitive domains, your data and \nrelated inferences should only be used for necessary functions, and you should be protected by ethical review \nand use prohibitions. You and your communities should be free from unchecked surveillance; surveillance \ntechnologies should be subject to heightened oversight that includes at least pre-deployment assessment of their \npotential harms and scope limits to protect privacy and civil liberties. Continuous surveillance and monitoring", "31953e75-65ce-4852-9a87-bdbefefcead4": "Records, Computers, and the Rights of Citizens (July 1973). https://www.justice.gov/opcl/docs/rec-com\u00ad\nrights.pdf.\n5. See, e.g., Office of Mgmt. & Budget, Exec. Office of the President, Circular A-130, Managing Information as a\nStrategic Resource, app. II \u00a7\u00a03 (July 28, 2016); Org. of Econ. Co-Operation & Dev., Revision of the\nRecommendation of the Council Concerning Guidelines Governing the Protection of Privacy and Transborder\nFlows of Personal Data, Annex Part Two (June 20, 2013). https://one.oecd.org/document/C(2013)79/en/pdf.\n6. Andrew Wong et al. External validation of a widely implemented proprietary sepsis prediction model in\nhospitalized patients. JAMA Intern Med. 2021; 181(8):1065-1070. doi:10.1001/jamainternmed.2021.2626\n7. Jessica Guynn. Facebook while black: Users call it getting 'Zucked,' say talking about racism is censored as hate\nspeech. USA Today. Apr. 24, 2019. https://www.usatoday.com/story/news/2019/04/24/facebook-while-black\u00ad", "59cc349a-6628-4b71-8f4f-56af9bbddfdc": "Health and Human Services. \nPanelists: \n\u2022\nMark Schneider, Health Innovation Advisor, ChristianaCare\n\u2022\nZiad Obermeyer, Blue Cross of California Distinguished Associate Professor of Policy and Management,\nUniversity of California, Berkeley School of Public Health\n\u2022\nDorothy Roberts, George A. Weiss University Professor of Law and Sociology and the Raymond Pace and\nSadie Tanner Mossell Alexander Professor of Civil Rights, University of Pennsylvania\n\u2022\nDavid Jones, A. Bernard Ackerman Professor of the Culture of Medicine, Harvard University\n\u2022\nJamila Michener, Associate Professor of Government, Cornell University; Co-Director, Cornell Center for\nHealth Equity\u00ad\nPanelists discussed the impact of new technologies on health disparities; healthcare access, delivery, and \noutcomes; and areas ripe for research and policymaking. Panelists discussed the increasing importance of tech-\nnology as both a vehicle to deliver healthcare and a tool to enhance the quality of care. On the issue of", "ae1b2df2-0052-44c0-9465-17e88b67c991": "26 \nMAP 4.1: Approaches for mapping AI technology and legal risks of its components \u2013 including the use of third-party data or \nsoftware \u2013 are in place, followed, and documented, as are risks of infringement of a third-party\u2019s intellectual property or other \nrights. \nAction ID \nSuggested Action \nGAI Risks \nMP-4.1-001 Conduct periodic monitoring of AI-generated content for privacy risks; address any \npossible instances of PII or sensitive data exposure. \nData Privacy \nMP-4.1-002 Implement processes for responding to potential intellectual property infringement \nclaims or other rights. \nIntellectual Property \nMP-4.1-003 \nConnect new GAI policies, procedures, and processes to existing model, data, \nsoftware development, and IT governance and to legal, compliance, and risk \nmanagement activities. \nInformation Security; Data Privacy \nMP-4.1-004 Document training data curation policies, to the extent possible and according to \napplicable laws and policies. \nIntellectual Property; Data Privacy;", "69b93abd-4e87-4056-87c5-e5819c310f8e": "existing human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\u00ad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on people\u2019s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\u00ad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention", "757f2dc8-3aa9-4e16-9c81-12c6b390f008": "23 \nMP-1.1-002 \nDetermine and document the expected and acceptable GAI system context of \nuse in collaboration with socio-cultural and other domain experts, by assessing: \nAssumptions and limitations; Direct value to the organization; Intended \noperational environment and observed usage patterns; Potential positive and \nnegative impacts to individuals, public safety, groups, communities, \norganizations, democratic institutions, and the physical environment; Social \nnorms and expectations. \nHarmful Bias and Homogenization \nMP-1.1-003 \nDocument risk measurement plans to address identi\ufb01ed risks. Plans may \ninclude, as applicable: Individual and group cognitive biases (e.g., con\ufb01rmation \nbias, funding bias, groupthink) for AI Actors involved in the design, \nimplementation, and use of GAI systems; Known past GAI system incidents and \nfailure modes; In-context use and foreseeable misuse, abuse, and o\ufb00-label use; \nOver reliance on quantitative metrics and methodologies without su\ufb03cient", "fc97d203-7885-4077-a742-eef7cbd16ef1": "NOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAn automated system should provide demonstrably clear, timely, understandable, and accessible notice of use, and \nexplanations as to how and why a decision was made or an action was taken by the system. These expectations are \nexplained below. \nProvide clear, timely, understandable, and accessible notice of use and explanations \u00ad\nGenerally accessible plain language documentation. The entity responsible for using the automated \nsystem should ensure that documentation describing the overall system (including any human components) is \npublic and easy to find. The documentation should describe, in plain language, how the system works and how", "b10395f5-dd7d-4bdd-8b79-b1049adaf1cc": "offline); families (construed broadly); people connected by affinity, identity, or shared traits; and formal organi-\nzational ties. This includes Tribes, Clans, Bands, Rancherias, Villages, and other Indigenous communities. AI \nand other data-driven automated systems most directly collect data on, make inferences about, and may cause \nharm to individuals. But the overall magnitude of their impacts may be most readily visible at the level of com-\nmunities. Accordingly, the concept of community is integral to the scope of the Blueprint for an AI Bill of Rights. \nUnited States law and policy have long employed approaches for protecting the rights of individuals, but exist-\ning frameworks have sometimes struggled to provide protections when effects manifest most clearly at a com-\nmunity level. For these reasons, the Blueprint for an AI Bill of Rights asserts that the harms of automated \nsystems should be evaluated, protected against, and redressed at both the individual and community levels.", "7b72f3dd-06a6-4760-ade3-b68fa700d859": "Confabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (\u201cgo\u201d/\u201cno-go\u201d \ndecisions), monitoring, and decommission decisions. \nHuman-AI Con\ufb01guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV", "fbc5530f-3dc1-4c5a-9119-a4b13018ba7d": "Connected Health Initiative \nConsumer Technology Association \nCourtney Radsch \nCoworker \nCyber Farm Labs \nData & Society Research Institute \nData for Black Lives \nData to Actionable Knowledge Lab \nat Harvard University \nDeloitte \nDev Technology Group \nDigital Therapeutics Alliance \nDigital Welfare State & Human \nRights Project and Center for \nHuman Rights and Global Justice at \nNew York University School of \nLaw, and Temple University \nInstitute for Law, Innovation & \nTechnology \nDignari \nDouglas Goddard \nEdgar Dworsky \nElectronic Frontier Foundation \nElectronic Privacy Information \nCenter, Center for Digital \nDemocracy, and Consumer \nFederation of America \nFaceTec \nFight for the Future \nGanesh Mani \nGeorgia Tech Research Institute \nGoogle \nHealth Information Technology \nResearch and Development \nInteragency Working Group \nHireVue \nHR Policy Association \nID.me \nIdentity and Data Sciences \nLaboratory at Science Applications \nInternational Corporation \nInformation Technology and \nInnovation Foundation", "403a6510-6af3-4ee5-bbbe-b97a65a94130": "https://arxiv.org/pdf/2305.17493v2 \nSmith, A. et al. (2023) Hallucination or Confabulation? Neuroanatomy as metaphor in Large Language \nModels. PLOS Digital Health. \nhttps://journals.plos.org/digitalhealth/article?id=10.1371/journal.pdig.0000388 \nSoice, E. et al. (2023) Can large language models democratize access to dual-use biotechnology? arXiv. \nhttps://arxiv.org/abs/2306.03809 \nSolaiman, I. et al. (2023) The Gradient of Generative AI Release: Methods and Considerations. arXiv. \nhttps://arxiv.org/abs/2302.04844 \nStaab, R. et al. (2023) Beyond Memorization: Violating Privacy via Inference With Large Language \nModels. arXiv. https://arxiv.org/pdf/2310.07298 \nStanford, S. et al. (2023) Whose Opinions Do Language Models Re\ufb02ect? arXiv. \nhttps://arxiv.org/pdf/2303.17548 \nStrubell, E. et al. (2019) Energy and Policy Considerations for Deep Learning in NLP. arXiv. \nhttps://arxiv.org/pdf/1906.02243 \nThe White House (2016) Circular No. A-130, Managing Information as a Strategic Resource.", "fc60770b-eee5-46f9-aa49-803aa5ea6dfd": "conversations. 2019. https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification\n38. Lucas Dixon, John Li, Jeffrey Sorensen, Nithum Thain, and Lucy Vasserman. Measuring and\nMitigating Unintended Bias in Text Classification. Proceedings of AAAI/ACM Conference on AI, Ethics,\nand Society. Feb. 2-3, 2018. https://dl.acm.org/doi/pdf/10.1145/3278721.3278729\n39. Paresh Dave. Google cuts racy results by 30% for searches like 'Latina teenager'. Reuters. Mar. 30,\n2022. https://www.reuters.com/technology/google-cuts-racy-results-by-30-searches-like-latina\u00ad\nteenager-2022-03-30/\n40. Safiya Umoja Noble. Algorithms of Oppression: How Search Engines Reinforce Racism. NYU Press.\nFeb. 2018. https://nyupress.org/9781479837243/algorithms-of-oppression/\n41. Paresh Dave. Google cuts racy results by 30% for searches like 'Latina teenager'. Reuters. Mar. 30,\n2022. https://www.reuters.com/technology/google-cuts-racy-results-by-30-searches-like-latina\u00ad\nteenager-2022-03-30/", "dba635cf-41a7-47e0-820b-1d2232b26d3e": "HUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nImplement additional human oversight and safeguards for automated systems related to \nsensitive domains \nAutomated systems used within sensitive domains, including criminal justice, employment, education, and \nhealth, should meet the expectations laid out throughout this framework, especially avoiding capricious, \ninappropriate, and discriminatory impacts of these technologies. Additionally, automated systems used within \nsensitive domains should meet these expectations: \nNarrowly scoped data and inferences. Human oversight should ensure that automated systems in \nsensitive domains are narrowly scoped to address a defined goal, justifying each included data item or attri\u00ad", "aa38da46-37c1-4f6e-b21d-0f01b6463f05": "provided instructions and material to complete tasks which may elicit harmful model behaviors. \nThis type of exercise can be more e\ufb00ective with large groups of AI red-teamers. \n\u2022 \nExpert: Performed by specialists with expertise in the domain or speci\ufb01c AI red-teaming context \nof use (e.g., medicine, biotech, cybersecurity). \n\u2022 \nCombination: In scenarios when it is di\ufb03cult to identify and recruit specialists with su\ufb03cient \ndomain and contextual expertise, AI red-teaming exercises may leverage both expert and", "cf169e60-9a05-44b1-8364-9e7cefe6a856": "49 \nearly lifecycle TEVV approaches are developed and matured for GAI, organizations may use \nrecommended \u201cpre-deployment testing\u201d practices to measure performance, capabilities, limits, risks, \nand impacts. This section describes risk measurement and estimation as part of pre-deployment TEVV, \nand examines the state of play for pre-deployment testing methodologies. \nLimitations of Current Pre-deployment Test Approaches \nCurrently available pre-deployment TEVV processes used for GAI applications may be inadequate, non-\nsystematically applied, or fail to re\ufb02ect or mismatched to deployment contexts. For example, the \nanecdotal testing of GAI system capabilities through video games or standardized tests designed for \nhumans (e.g., intelligence tests, professional licensing exams) does not guarantee GAI system validity or \nreliability in those domains. Similarly, jailbreaking or prompt engineering tests may not systematically \nassess validity or reliability risks.", "e188de33-eaa4-4379-ae37-a335ce7e69b7": "DATA PRIVACY \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe Privacy Act of 1974 requires privacy protections for personal information in federal \nrecords systems, including limits on data retention, and also provides individuals a general \nright to access and correct their data. Among other things, the Privacy Act limits the storage of individual \ninformation in federal systems of records, illustrating the principle of limiting the scope of data retention. Under \nthe Privacy Act, federal agencies may only retain data about an individual that is \u201crelevant and necessary\u201d to \naccomplish an agency\u2019s statutory purpose or to comply with an Executive Order of the President. The law allows", "032e77bb-7ee8-43a8-b13d-4f1aea5a1084": "data privacy violations, obscenity, extremism, violence, or CBRN information in \nsystem training data. \nData Privacy; Intellectual Property; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nMS-2.6-003 Re-evaluate safety features of \ufb01ne-tuned models when the negative risk exceeds \norganizational risk tolerance. \nDangerous, Violent, or Hateful \nContent \nMS-2.6-004 Review GAI system outputs for validity and safety: Review generated code to \nassess risks that may arise from unreliable downstream decision-making. \nValue Chain and Component \nIntegration; Dangerous, Violent, or \nHateful Content \nMS-2.6-005 \nVerify that GAI system architecture can monitor outputs and performance, and \nhandle, recover from, and repair errors when security anomalies, threats and \nimpacts are detected. \nConfabulation; Information \nIntegrity; Information Security \nMS-2.6-006", "8a259709-51c6-4a1a-bd12-461abc901ca1": "44 \nMG-3.2-007 \nLeverage feedback and recommendations from organizational boards or \ncommittees related to the deployment of GAI applications and content \nprovenance when using third-party pre-trained models. \nInformation Integrity; Value Chain \nand Component Integration \nMG-3.2-008 \nUse human moderation systems where appropriate to review generated content \nin accordance with human-AI con\ufb01guration policies established in the Govern \nfunction, aligned with socio-cultural norms in the context of use, and for settings \nwhere AI models are demonstrated to perform poorly. \nHuman-AI Con\ufb01guration \nMG-3.2-009 \nUse organizational risk tolerance to evaluate acceptable risks and performance \nmetrics and decommission or retrain pre-trained models that perform outside of \nde\ufb01ned limits. \nCBRN Information or Capabilities; \nConfabulation \nAI Actor Tasks: AI Deployment, Operation and Monitoring, Third-party entities", "18bfbbbc-c65e-4bb3-a596-44dd8f4584fd": "procedures for communicating incidents to relevant AI Actors and where \napplicable, relevant legal and regulatory bodies. \nInformation Security \nMG-4.3-002 Establish and maintain policies and procedures to record and track GAI system \nreported errors, near-misses, and negative impacts. \nConfabulation; Information \nIntegrity", "b8198357-3185-41a6-a9b3-5f2d5839c91c": "43 \nMG-3.1-005 Review various transparency artifacts (e.g., system cards and model cards) for \nthird-party models. \nInformation Integrity; Information \nSecurity; Value Chain and \nComponent Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, Third-party entities \n \nMANAGE 3.2: Pre-trained models which are used for development are monitored as part of AI system regular monitoring and \nmaintenance. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.2-001 \nApply explainable AI (XAI) techniques (e.g., analysis of embeddings, model \ncompression/distillation, gradient-based attributions, occlusion/term reduction, \ncounterfactual prompts, word clouds) as part of ongoing continuous \nimprovement processes to mitigate risks related to unexplainable GAI systems. \nHarmful Bias and Homogenization \nMG-3.2-002 \nDocument how pre-trained models have been adapted (e.g., \ufb01ne-tuned, or \nretrieval-augmented generation) for the speci\ufb01c generative task, including any", "424352b3-190e-433d-89f6-beb5f2519530": "of the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\u00ad\nmation about training and governance procedures for these technologies. Reporting should also include docu\u00ad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51", "6b457a05-02a8-43a9-b7bd-39e2c396f577": "for GAI risk measurement. Address general risks associated with a lack of \nexplainability and transparency in GAI systems by using ample documentation and \ntechniques such as: application of gradient-based attributions, occlusion/term \nreduction, counterfactual prompts and prompt engineering, and analysis of \nembeddings; Assess and update risk measurement approaches at regular \ncadences. \nConfabulation \nGV-4.1-002 \nEstablish policies, procedures, and processes detailing risk measurement in \ncontext of use with standardized measurement protocols and structured public \nfeedback exercises such as AI red-teaming or independent external evaluations. \nCBRN Information and Capability; \nValue Chain and Component \nIntegration", "c62b39ff-7e0d-437c-a0cf-e5f30c3aabc7": "policing, health care, and the workplace disproportionately harm disabled people. Center for Democracy\nand Technology Report. May 24, 2022.\nhttps://cdt.org/insights/ableism-and-disability-discrimination-in-new-surveillance-technologies-how\u00ad\nnew-surveillance-technologies-in-education-policing-health-care-and-the-workplace\u00ad\ndisproportionately-harm-disabled-people/\n69", "4b37fe12-0be1-495d-bba0-f49ca7f5df94": "59 \nTirrell, L. (2017) Toxic Speech: Toward an Epidemiology of Discursive Harm. Philosophical Topics, 45(2), \n139-162. https://www.jstor.org/stable/26529441 \nTufekci, Z. (2015) Algorithmic Harms Beyond Facebook and Google: Emergent Challenges of \nComputational Agency. Colorado Technology Law Journal. https://ctlj.colorado.edu/wp-\ncontent/uploads/2015/08/Tufekci-\ufb01nal.pdf \nTurri, V. et al. (2023) Why We Need to Know More: Exploring the State of AI Incident Documentation \nPractices. AAAI/ACM Conference on AI, Ethics, and Society. \nhttps://dl.acm.org/doi/fullHtml/10.1145/3600211.3604700 \nUrbina, F. et al. (2022) Dual use of arti\ufb01cial-intelligence-powered drug discovery. Nature Machine \nIntelligence. https://www.nature.com/articles/s42256-022-00465-9 \nWang, X. et al. (2023) Energy and Carbon Considerations of Fine-Tuning BERT. ACL Anthology. \nhttps://aclanthology.org/2023.\ufb01ndings-emnlp.607.pdf \nWang, Y. et al. (2023) Do-Not-Answer: A Dataset for Evaluating Safeguards in LLMs. arXiv.", "6745762d-e5d8-458d-be17-641b6b4d516e": "the Joint Center for Political and Economic Studies, New America, the German Marshall Fund, the Electronic \nPrivacy Information Center, and the Mozilla Foundation. The purpose of these convenings \u2013 recordings of \nwhich are publicly available online112 \u2013 was to bring together a variety of experts, practitioners, advocates \nand federal government officials to offer insights and analysis on the risks, harms, benefits, and \npolicy opportunities of automated systems. Each panel discussion was organized around a wide-ranging \ntheme, exploring current challenges and concerns and considering what an automated society that \nrespects democratic values should look like. These discussions focused on the topics of consumer \nrights and protections, the criminal justice system, equal opportunities and civil justice, artificial \nintelligence and democratic values, social welfare and development, and the healthcare system. \nSummaries of Panel Discussions:", "56a48ef8-56cc-4d56-82a4-b0c6d34bab3d": "their automated technologies. For example, a collection of non-profit organizations and companies have \nworked together to develop a framework that defines operational approaches to transparency for machine \nlearning systems.88 This framework, and others like it,89 inform the public about the use of these tools, going \nbeyond simple notice to include reporting elements such as safety evaluations, disparity assessments, and \nexplanations of how the systems work. \nLenders are required by federal law to notify consumers about certain decisions made about \nthem. Both the Fair Credit Reporting Act and the Equal Credit Opportunity Act require in certain circumstances \nthat consumers who are denied credit receive \"adverse action\" notices. Anyone who relies on the information in a \ncredit report to deny a consumer credit must, under the Fair Credit Reporting Act, provide an \"adverse action\" \nnotice to the consumer, which includes \"notice of the reasons a creditor took adverse action on the application", "677e974b-bbd2-4dc0-be4a-146bbf69bcb2": "results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20", "ad920bc1-0052-4c4f-bf89-3468cb95f316": "additionally wish to allocate risk management resources relative to the severity and likelihood of \nnegative impacts, including where and how these risks manifest, and their direct and material impacts \nharms in the context of GAI use. Mitigations for model or system level risks may di\ufb00er from mitigations \nfor use-case or ecosystem level risks. \nImportantly, some GAI risks are unknown, and are therefore di\ufb03cult to properly scope or evaluate given \nthe uncertainty about potential GAI scale, complexity, and capabilities. Other risks may be known but \ndi\ufb03cult to estimate given the wide range of GAI stakeholders, uses, inputs, and outputs. Challenges with \nrisk estimation are aggravated by a lack of visibility into GAI training data, and the generally immature \nstate of the science of AI measurement and safety today. This document focuses on risks for which there \nis an existing empirical evidence base at the time this pro\ufb01le was written; for example, speculative risks", "50e8d128-6b17-41b4-9e92-5de8e14f82c7": "employers to verify resume information, every former Apple employee\u2019s title gets erased and replaced with\na generic title. The Washington Post. Feb. 10, 2022.\nhttps://www.washingtonpost.com/technology/2022/02/10/apple-associate/\n78. National Institute of Standards and Technology. Privacy Framework Perspectives and Success\nStories. Accessed May 2, 2022.\nhttps://www.nist.gov/privacy-framework/getting-started-0/perspectives-and-success-stories\n79. ACLU of New York. What You Need to Know About New York\u2019s Temporary Ban on Facial\nRecognition in Schools. Accessed May 2, 2022.\nhttps://www.nyclu.org/en/publications/what-you-need-know-about-new-yorks-temporary-ban-facial\u00ad\nrecognition-schools\n80. New York State Assembly. Amendment to Education Law. Enacted Dec. 22, 2020.\nhttps://nyassembly.gov/leg/?default_fld=&leg_video=&bn=S05140&term=2019&Summary=Y&Text=Y\n81. U.S Department of Labor. Labor-Management Reporting and Disclosure Act of 1959, As Amended.", "22b8897a-4de2-4c91-bd41-9e32a6c4fdfe": "Disinformation and misinformation \u2013 both of which may be facilitated by GAI \u2013 may erode public trust in \ntrue or valid evidence and information, with downstream e\ufb00ects. For example, a synthetic image of a \nPentagon blast went viral and brie\ufb02y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature \ufb01eld with widely accepted and \nstandardized practices for o\ufb00ensive and defensive cyber capabilities. GAI-based systems present two", "b3f535d6-c485-498c-8c1f-b421042bb5b4": "on people of conscience to act to preserve civil rights\u2014including the right to privacy, which he has called \u201cthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.\u201d2\nTo advance President Biden\u2019s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threats\u2014and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanion\u2014a handbook for anyone seeking to incorporate these protections into policy and practice, including", "d53154bd-1a82-4a47-9c10-600a528e532e": "Innovation Foundation \nInformation Technology Industry \nCouncil \nInnocence Project \nInstitute for Human-Centered \nArtificial Intelligence at Stanford \nUniversity \nIntegrated Justice Information \nSystems Institute \nInternational Association of Chiefs \nof Police \nInternational Biometrics + Identity \nAssociation \nInternational Business Machines \nCorporation \nInternational Committee of the Red \nCross \nInventionphysics \niProov \nJacob Boudreau \nJennifer K. Wagner, Dan Berger, \nMargaret Hu, and Sara Katsanis \nJonathan Barry-Blocker \nJoseph Turow \nJoy Buolamwini \nJoy Mack \nKaren Bureau \nLamont Gholston \nLawyers\u2019 Committee for Civil \nRights Under Law \n60", "6048d14c-7855-4647-b087-57ae0d713497": "describes three broad challenges for mitigating bias \u2013 datasets, testing and evaluation, and human factors \u2013 and \nintroduces preliminary guidance for addressing them. Throughout, the special publication takes a socio-\ntechnical perspective to identifying and managing AI bias. \n29\nAlgorithmic \nDiscrimination \nProtections", "bbba260e-ff51-4950-bfd6-f97904f8b366": "established, may also count as human subject experimentation, and require special review under organizational \ncompliance bodies applying medical, scientific, and academic human subject experimentation ethics rules and \ngovernance procedures. \nData quality. In sensitive domains, entities should be especially careful to maintain the quality of data to \navoid adverse consequences arising from decision-making based on flawed or inaccurate data. Such care is \nnecessary in a fragmented, complex data ecosystem and for datasets that have limited access such as for fraud \nprevention and law enforcement. It should be not left solely to individuals to carry the burden of reviewing and \ncorrecting data. Entities should conduct regular, independent audits and take prompt corrective measures to \nmaintain accurate, timely, and complete data. \nLimit access to sensitive data and derived data. Sensitive data and derived data should not be sold,", "9e642b01-e82f-4f7e-ab17-f89df81f45ed": "SECTION TITLE\nAPPENDIX\nListening to the American People \nThe White House Office of Science and Technology Policy (OSTP) led a yearlong process to seek and distill \ninput from people across the country \u2013 from impacted communities to industry stakeholders to \ntechnology developers to other experts across fields and sectors, as well as policymakers across the Federal \ngovernment \u2013 on the issue of algorithmic and data-driven harms and potential remedies. Through panel \ndiscussions, public listening sessions, private meetings, a formal request for information, and input to a \npublicly accessible and widely-publicized email address, people across the United States spoke up about \nboth the promises and potential harms of these technologies, and played a central role in shaping the \nBlueprint for an AI Bill of Rights. \nPanel Discussions to Inform the Blueprint for An AI Bill of Rights \nOSTP co-hosted a series of six panel discussions in collaboration with the Center for American Progress,", "4d8ac59c-02fb-403b-bfba-6d1c62c8513b": "\u2022 Change-management controls \n\u2022 Commercial use \n\u2022 Data provenance", "8ad112fa-a36b-4a31-aa34-22d98017a0bd": "them\n10. Samantha Cole. This Horrifying App Undresses a Photo of Any Woman With a Single Click. Motherboard.\nJune 26, 2019. https://www.vice.com/en/article/kzm59x/deepnude-app-creates-fake-nudes-of-any-woman\n11. Lauren Kaori Gurley. Amazon\u2019s AI Cameras Are Punishing Drivers for Mistakes They Didn\u2019t Make.\nMotherboard. Sep. 20, 2021. https://www.vice.com/en/article/88npjv/amazons-ai-cameras-are-punishing\u00ad\ndrivers-for-mistakes-they-didnt-make\n63", "491f5c27-32de-47cc-97aa-a62540e2c12a": "50 \nParticipatory Engagement Methods \nOn an ad hoc or more structured basis, organizations can design and use a variety of channels to engage \nexternal stakeholders in product development or review. Focus groups with select experts can provide \nfeedback on a range of issues. Small user studies can provide feedback from representative groups or \npopulations. Anonymous surveys can be used to poll or gauge reactions to speci\ufb01c features. Participatory \nengagement methods are often less structured than \ufb01eld testing or red teaming, and are more \ncommonly used in early stages of AI or product development. \nField Testing \nField testing involves structured settings to evaluate risks and impacts and to simulate the conditions \nunder which the GAI system will be deployed. Field style tests can be adapted from a focus on user \npreferences and experiences towards AI risks and impacts \u2013 both negative and positive. When carried", "ef089202-5f7b-40dc-a4d3-19c9a7065d3d": "ENDNOTES\n107. Centers for Medicare & Medicaid Services. Biden-Harris Administration Quadruples the Number\nof Health Care Navigators Ahead of HealthCare.gov Open Enrollment Period. Aug. 27, 2021.\nhttps://www.cms.gov/newsroom/press-releases/biden-harris-administration-quadruples-number\u00ad\nhealth-care-navigators-ahead-healthcaregov-open\n108. See, e.g., McKinsey & Company. The State of Customer Care in 2022. July 8, 2022. https://\nwww.mckinsey.com/business-functions/operations/our-insights/the-state-of-customer-care-in-2022;\nSara Angeles. Customer Service Solutions for Small Businesses. Business News Daily.\nJun. 29, 2022. https://www.businessnewsdaily.com/7575-customer-service-solutions.html\n109. Mike Hughes. Are We Getting The Best Out Of Our Bots? Co-Intelligence Between Robots &\nHumans. Forbes. Jul. 14, 2022.\nhttps://www.forbes.com/sites/mikehughes1/2022/07/14/are-we-getting-the-best-out-of-our-bots-co\u00ad\nintelligence-between-robots--humans/?sh=16a2bd207395", "18a7df82-ff90-4408-a0ee-a2d91effd2a2": "could reduce environmental impacts at inference time, but training and tuning such models may still \ncontribute to their environmental impacts. Currently there is no agreed upon method to estimate \nenvironmental impacts from GAI. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe \n2.6. Harmful Bias and Homogenization \nBias exists in many forms and can become ingrained in automated systems. AI systems, including GAI \nsystems, can increase the speed and scale at which harmful biases manifest and are acted upon, \npotentially perpetuating and amplifying harms to individuals, groups, communities, organizations, and \nsociety. For example, when prompted to generate images of CEOs, doctors, lawyers, and judges, current \ntext-to-image models underrepresent women and/or racial minorities, and people with disabilities. \nImage generator models have also produced biased or stereotyped output for various demographic", "81cbeaae-c1fe-4914-aebd-a0670f500364": "SENSITIVE DOMAINS: \u201cSensitive domains\u201d are those in which activities being conducted can cause material \nharms, including significant adverse effects on human rights such as autonomy and dignity, as well as civil liber\u00ad\nties and civil rights. Domains that have historically been singled out as deserving of enhanced data protections \nor where such enhanced protections are reasonably expected by the public include, but are not limited to, \nhealth, family planning and care, employment, education, criminal justice, and personal finance. In the context \nof this framework, such domains are considered sensitive whether or not the specifics of a system context \nwould necessitate coverage under existing law, and domains and data that are considered sensitive are under\u00ad\nstood to change over time based on societal norms and context. \nSURVEILLANCE TECHNOLOGY: \u201cSurveillance technology\u201d refers to products or services marketed for", "10009fcf-4d1c-4343-948a-2ed6fd9bcc09": "33 \nMEASURE 2.7: AI system security and resilience \u2013 as identi\ufb01ed in the MAP function \u2013 are evaluated and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.7-001 \nApply established security measures to: Assess likelihood and magnitude of \nvulnerabilities and threats such as backdoors, compromised dependencies, data \nbreaches, eavesdropping, man-in-the-middle attacks, reverse engineering, \nautonomous agents, model theft or exposure of model weights, AI inference, \nbypass, extraction, and other baseline security concerns. \nData Privacy; Information Integrity; \nInformation Security; Value Chain \nand Component Integration \nMS-2.7-002 \nBenchmark GAI system security and resilience related to content provenance \nagainst industry standards and best practices. Compare GAI system security \nfeatures and content provenance methods against industry state-of-the-art. \nInformation Integrity; Information \nSecurity \nMS-2.7-003 \nConduct user surveys to gather user satisfaction with the AI-generated content", "6d448984-e848-470c-8ed7-a1a740d3544d": "APPENDIX\n\u2022\nJulia Simon-Mishel, Supervising Attorney, Philadelphia Legal Assistance\n\u2022\nDr. Zachary Mahafza, Research & Data Analyst, Southern Poverty Law Center\n\u2022\nJ. Khadijah Abdurahman, Tech Impact Network Research Fellow, AI Now Institute, UCLA C2I1, and\nUWA Law School\nPanelists separately described the increasing scope of technology use in providing for social welfare, including \nin fraud detection, digital ID systems, and other methods focused on improving efficiency and reducing cost. \nHowever, various panelists individually cautioned that these systems may reduce burden for government \nagencies by increasing the burden and agency of people using and interacting with these technologies. \nAdditionally, these systems can produce feedback loops and compounded harm, collecting data from \ncommunities and using it to reinforce inequality. Various panelists suggested that these harms could be \nmitigated by ensuring community input at the beginning of the design process, providing ways to opt out of", "2ff90b7e-f382-4179-9da8-628b5a1a1b92": "safe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulations\u2014and \nmeasures to address harms when they occur\u2014can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturers\u2019 ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate", "9f8f273e-5d23-4eb9-89b0-255c934e24af": "or \nbe designed to proactively protect you from harms \nimpacts of automated systems. You should be protected from inappropriate or irrelevant data use in the \ndesign, development, and deployment of automated systems, and from the compounded harm of its reuse. \nIndependent evaluation and reporting that confirms that the system is safe and effective, including reporting of \nsteps taken to mitigate potential harms, should be performed and the results made public whenever possible. \nALGORITHMIC DISCRIMINATION PROTECTIONS\nYou should not face discrimination by algorithms and systems should be used and designed in \nan equitable way. Algorithmic discrimination occurs when automated systems contribute to unjustified \ndifferent treatment or impacts disfavoring people based on their race, color, ethnicity, sex (including \npregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual", "77cc9870-55ed-4598-9480-e11a183531a6": "BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022", "34b4a549-0384-4808-966e-f713a6ea12ad": "applications of GAI. \nCBRN Information or \nCapabilities; Obscene, \nDegrading, and/or Abusive \nContent; Data Privacy; Civil \nRights violations \nAI Actor Tasks: AI Development, AI Deployment, Governance and Oversight", "d2512e2e-5c60-4d43-af2b-449ec03a2865": "dinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called", "83aa9af9-6569-437e-af51-675af1d1e46e": "and data that are considered sensitive are understood to change over time based on societal norms and context. \n36", "c51b3f98-fc6a-430b-8e01-4248eccba989": "Provide timely human consideration and remedy by a fallback and escalation system in the \nevent that an automated system fails, produces error, or you would like to appeal or con\u00ad\ntest its impacts on you \nProportionate. The availability of human consideration and fallback, along with associated training and \nsafeguards against human bias, should be proportionate to the potential of the automated system to meaning\u00ad\nfully impact rights, opportunities, or access. Automated systems that have greater control over outcomes, \nprovide input to high-stakes decisions, relate to sensitive domains, or otherwise have a greater potential to \nmeaningfully impact rights, opportunities, or access should have greater availability (e.g., staffing) and over\u00ad\nsight of human consideration and fallback mechanisms. \nAccessible. Mechanisms for human consideration and fallback, whether in-person, on paper, by phone, or \notherwise provided, should be easy to find and use. These mechanisms should be tested to ensure that users", "1b44f9d8-f8e4-40fb-9dce-83baf7655bb3": "About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair arti\ufb01cial intelligence (AI) so that its full commercial and societal bene\ufb01ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to ful\ufb01ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the e\ufb00orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST sta\ufb00 and guest", "427b5af4-e471-4ebd-8a89-da73e750b6d4": "Applying The Blueprint for an AI Bill of Rights \nSENSITIVE DATA: Data and metadata are sensitive if they pertain to an individual in a sensitive domain \n(defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a \nsensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric \ndata, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship \nhistory and legal status such as custody and divorce information, and home, work, or school environmental \ndata); or have the reasonable potential to be used in ways that are likely to expose individuals to meaningful \nharm, such as a loss of privacy or financial harm due to identity theft. Data and metadata generated by or about \nthose who are not yet legal adults is also sensitive, even if not related to a sensitive domain. Such data includes, \nbut is not limited to, numerical, text, image, audio, or video data.", "fbb4ec61-5d7a-469c-8266-667075da02ba": "ENDNOTES\n75. See., e.g., Sam Sabin. Digital surveillance in a post-Roe world. Politico. May 5, 2022. https://\nwww.politico.com/newsletters/digital-future-daily/2022/05/05/digital-surveillance-in-a-post-roe\u00ad\nworld-00030459; Federal Trade Commission. FTC Sues Kochava for Selling Data that Tracks People at\nReproductive Health Clinics, Places of Worship, and Other Sensitive Locations. Aug. 29, 2022. https://\nwww.ftc.gov/news-events/news/press-releases/2022/08/ftc-sues-kochava-selling-data-tracks-people\u00ad\nreproductive-health-clinics-places-worship-other\n76. Todd Feathers. This Private Equity Firm Is Amassing Companies That Collect Data on America\u2019s\nChildren. The Markup. Jan. 11, 2022.\nhttps://themarkup.org/machine-learning/2022/01/11/this-private-equity-firm-is-amassing-companies\u00ad\nthat-collect-data-on-americas-children\n77. Reed Albergotti. Every employee who leaves Apple becomes an \u2018associate\u2019: In job databases used by", "4a1f6a6c-b79b-4a2f-b298-a7261f617663": "licensed works, or personal, privileged, proprietary or sensitive data; Underlying \nfoundation models, versions of underlying models, and access modes. \nData Privacy; Human-AI \nCon\ufb01guration; Information \nIntegrity; Intellectual Property; \nValue Chain and Component \nIntegration \nAI Actor Tasks: Governance and Oversight", "9eef7ac9-2709-4532-bbf9-e0d3adbf40c5": "whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57", "90eb2dd9-3186-4c57-bcae-574485a1be52": "4 \n1. CBRN Information or Capabilities: Eased access to or synthesis of materially nefarious \ninformation or design capabilities related to chemical, biological, radiological, or nuclear (CBRN) \nweapons or other dangerous materials or agents. \n2. Confabulation: The production of con\ufb01dently stated but erroneous or false content (known \ncolloquially as \u201challucinations\u201d or \u201cfabrications\u201d) by which users may be misled or deceived.6 \n3. Dangerous, Violent, or Hateful Content: Eased production of and access to violent, inciting, \nradicalizing, or threatening content as well as recommendations to carry out self-harm or \nconduct illegal activities. Includes di\ufb03culty controlling public exposure to hateful and disparaging \nor stereotyping content. \n4. Data Privacy: Impacts due to leakage and unauthorized use, disclosure, or de-anonymization of \nbiometric, health, location, or other personally identi\ufb01able information or sensitive data.7", "f46c1f82-6ce7-493e-b3e8-3d02f09a2bda": "6 \n2.2. Confabulation \n\u201cConfabulation\u201d refers to a phenomenon in which GAI systems generate and con\ufb01dently present \nerroneous or false content in response to prompts. Confabulations also include generated outputs that \ndiverge from the prompts or other input or that contradict previously generated statements in the same \ncontext. These phenomena are colloquially also referred to as \u201challucinations\u201d or \u201cfabrications.\u201d \nConfabulations can occur across GAI outputs and contexts.9,10 Confabulations are a natural result of the \nway generative models are designed: they generate outputs that approximate the statistical distribution \nof their training data; for example, LLMs predict the next token or word in a sentence or phrase. While \nsuch statistical prediction can produce factually accurate and consistent outputs, it can also produce \noutputs that are factually inaccurate or internally inconsistent. This dynamic is particularly relevant when", "dabf101d-f998-43b9-ad15-0a6933bde316": "20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, A\ufb00ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signi\ufb01cant risks. \nHuman-AI Con\ufb01guration; \nConfabulation", "74f049f0-a803-4a0c-80cf-58f96cc16b98": "to wait\u2014immediate human consideration and fallback should be available. In many time-critical systems, such a \nremedy is already immediately available, such as a building manager who can open a door in the case an automated \ncard access system fails. \nIn the criminal justice system, employment, education, healthcare, and other sensitive domains, automated systems \nare used for many purposes, from pre-trial risk assessments and parole decisions to technologies that help doctors \ndiagnose disease. Absent appropriate safeguards, these technologies can lead to unfair, inaccurate, or dangerous \noutcomes. These sensitive domains require extra protections. It is critically important that there is extensive human \noversight in such settings. \nThese critical protections have been adopted in some scenarios. Where automated systems have been introduced to \nprovide the public access to government benefits, existing human paper and phone-based processes are generally still", "02282833-7fe4-4b1a-aebf-3604b78c9e23": "managing-bias-arti\ufb01cial-intelligence \nNorthcutt, C. et al. (2021) Pervasive Label Errors in Test Sets Destabilize Machine Learning Benchmarks. \narXiv. https://arxiv.org/pdf/2103.14749 \nOECD (2023) \"Advancing accountability in AI: Governing and managing risks throughout the lifecycle for \ntrustworthy AI\", OECD Digital Economy Papers, No. 349, OECD Publishing, Paris. \nhttps://doi.org/10.1787/2448f04b-en \nOECD (2024) \"De\ufb01ning AI incidents and related terms\" OECD Arti\ufb01cial Intelligence Papers, No. 16, OECD \nPublishing, Paris. https://doi.org/10.1787/d1a8d965-en \nOpenAI (2023) GPT-4 System Card. https://cdn.openai.com/papers/gpt-4-system-card.pdf \nOpenAI (2024) GPT-4 Technical Report. https://arxiv.org/pdf/2303.08774 \nPadmakumar, V. et al. (2024) Does writing with language models reduce content diversity? ICLR. \nhttps://arxiv.org/pdf/2309.05196 \nPark, P. et. al. (2024) AI deception: A survey of examples, risks, and potential solutions. Patterns, 5(5). \narXiv. https://arxiv.org/pdf/2308.14752", "aac514f9-9cfd-4d14-8171-8d4c909cede1": "proof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Con\ufb01guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV", "aebd3e65-e2da-4b5f-ad11-9d4a31881517": "Notice and explanations also serve an important safety and efficacy purpose, allowing experts to verify the reasonable\u00ad\nness of a recommendation before enacting it. \nIn order to guard against potential harms, the American public needs to know if an automated system is being used. \nClear, brief, and understandable notice is a prerequisite for achieving the other protections in this framework. Like\u00ad\nwise, the public is often unable to ascertain how or why an automated system has made a decision or contributed to a \nparticular outcome. The decision-making processes of automated systems tend to be opaque, complex, and, therefore, \nunaccountable, whether by design or by omission. These factors can make explanations both more challenging and \nmore important, and should not be used as a pretext to avoid explaining important decisions to the people impacted \nby those choices. In the context of automated systems, clear and valid explanations should be recognized as a baseline \nrequirement.", "d372d7d5-e676-451d-9c07-75dbecc3beb4": "DATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nData access and correction. People whose data is collected, used, shared, or stored by automated \nsystems should be able to access data and metadata about themselves, know who has access to this data, and \nbe able to correct it if necessary. Entities should receive consent before sharing data with other entities and \nshould keep records of what data is shared and with whom. \nConsent withdrawal and data deletion. Entities should allow (to the extent legally permissible) with\u00ad\ndrawal of data access consent, resulting in the deletion of user data, metadata, and the timely removal of \ntheir data from any systems (e.g., machine learning models) derived from that data.68", "771dac26-6584-4f0b-bc14-a12c56cd48ea": "DATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\n\u2022\nContinuous positive airway pressure machines gather data for medical purposes, such as diagnosing sleep\napnea, and send usage data to a patient\u2019s insurance company, which may subsequently deny coverage for the\ndevice based on usage data. Patients were not aware that the data would be used in this way or monitored\nby anyone other than their doctor.70 \n\u2022\nA department store company used predictive analytics applied to collected consumer data to determine that a\nteenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her\nhouse, revealing to her father that she was pregnant.71\n\u2022\nSchool audio surveillance systems monitor student conversations to detect potential \"stress indicators\" as\na warning of potential violence.72 Online proctoring systems claim to detect if a student is cheating on an", "998558ef-dea7-4966-b3be-986e930153d1": "disabilities, voters with shorter or hyphenated names, and voters who have changed their name.97 A human\ncuring process,98 which helps voters to confirm their signatures and correct other voting mistakes, is\nimportant to ensure all votes are counted,99 and it is already standard practice in much of the country for\nboth an election official and the voter to have the opportunity to review and correct any such issues.100 \n47", "6d5fe8b5-2369-47c1-9bd6-5aa5d716523a": "due to identity theft. Data and metadata generated by or about those who are not yet legal adults is also sensitive, even \nif not related to a sensitive domain. Such data includes, but is not limited to, numerical, text, image, audio, or video \ndata. \u201cSensitive domains\u201d are those in which activities being conducted can cause material harms, including signifi\u00ad\ncant adverse effects on human rights such as autonomy and dignity, as well as civil liberties and civil rights. Domains \nthat have historically been singled out as deserving of enhanced data protections or where such enhanced protections \nare reasonably expected by the public include, but are not limited to, health, family planning and care, employment, \neducation, criminal justice, and personal finance. In the context of this framework, such domains are considered \nsensitive whether or not the specifics of a system context would necessitate coverage under existing law, and domains", "3c0b8e22-5102-47e7-b4a0-8252e2df10e3": "should not be used in education, work, housing, or in other contexts where the \nuse of such surveillance technologies is likely to limit rights, opportunities, or \naccess. Whenever possible, you should have access to reporting that confirms \nyour data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or \naccess. \nDATA PRIVACY\n30", "da2d77db-b7d9-40d4-b923-2bb8dba6d444": "sex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using", "868df082-6495-43dd-a3a9-a3989e022a35": "a reduced ability to use analytical reasoning.61 Documented patterns show that personal data is being aggregated by \ndata brokers to profile communities in harmful ways.62 The impact of all this data harvesting is corrosive, \nbreeding distrust, anxiety, and other mental health problems; chilling speech, protest, and worker organizing; and \nthreatening our democratic process.63 The American public should be protected from these growing risks. \nIncreasingly, some companies are taking these concerns seriously and integrating mechanisms to protect consumer \nprivacy into their products by design and by default, including by minimizing the data they collect, communicating \ncollection and use clearly, and improving security practices. Federal government surveillance and other collection and \nuse of data is governed by legal protections that help to protect civil liberties and provide for limits on data retention", "d40255dc-30f7-4d12-a137-356c9331d036": "counterfactual and low-context (e.g., \u201cleader,\u201d \u201cbad guys\u201d) prompts. For ML \npipelines or business processes with categorical or numeric outcomes that rely \non GAI, apply general fairness metrics (e.g., demographic parity, equalized odds, \nequal opportunity, statistical hypothesis tests), to the pipeline or business \noutcome where appropriate; Custom, context-speci\ufb01c metrics developed in \ncollaboration with domain experts and a\ufb00ected communities; Measurements of \nthe prevalence of denigration in generated content in deployment (e.g., sub-\nsampling a fraction of tra\ufb03c and manually annotating denigrating content). \nHarmful Bias and Homogenization; \nDangerous, Violent, or Hateful \nContent \nMS-2.11-003 \nIdentify the classes of individuals, groups, or environmental ecosystems which \nmight be impacted by GAI systems through direct engagement with potentially \nimpacted communities. \nEnvironmental; Harmful Bias and \nHomogenization \nMS-2.11-004", "dd4ef4a9-d623-4695-8648-6dc788ce570b": "could lead to negative psychological impacts. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Valid and Reliable \n2.8. Information Integrity \nInformation integrity describes the \u201cspectrum of information and associated patterns of its creation, \nexchange, and consumption in society.\u201d High-integrity information can be trusted; \u201cdistinguishes fact \nfrom \ufb01ction, opinion, and inference; acknowledges uncertainties; and is transparent about its level of \nvetting. This information can be linked to the original source(s) with appropriate evidence. High-integrity \ninformation is also accurate and reliable, can be veri\ufb01ed and authenticated, has a clear chain of custody, \nand creates reasonable expectations about when its validity may expire.\u201d11 \n \n \n11 This de\ufb01nition of information integrity is derived from the 2022 White House Roadmap for Researchers on", "6c472ef1-8115-4732-a5dc-a39c15d5eb94": "inputs and content delivered through these plugins is often distributed, with inconsistent or insu\ufb03cient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.", "f469924d-ea0b-4792-976b-4b057974feca": "assess validity or reliability risks. \nMeasurement gaps can arise from mismatches between laboratory and real-world settings. Current \ntesting approaches often remain focused on laboratory conditions or restricted to benchmark test \ndatasets and in silico techniques that may not extrapolate well to\u2014or directly assess GAI impacts in real-\nworld conditions. For example, current measurement gaps for GAI make it di\ufb03cult to precisely estimate \nits potential ecosystem-level or longitudinal risks and related political, social, and economic impacts. \nGaps between benchmarks and real-world use of GAI systems may likely be exacerbated due to prompt \nsensitivity and broad heterogeneity of contexts of use. \nA.1.5. Structured Public Feedback \nStructured public feedback can be used to evaluate whether GAI systems are performing as intended \nand to calibrate and verify traditional measurement methods. Examples of structured feedback include, \nbut are not limited to: \n\u2022", "e072ffd2-a697-41f5-ae4b-0ee596f98d0a": "Information Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization", "c02ee20e-7730-4a17-ae99-3ef97db1ac6d": "out with large groups of users, these tests can provide estimations of the likelihood of risks and impacts \nin real world interactions. \nOrganizations may also collect feedback on outcomes, harms, and user experience directly from users in \nthe production environment after a model has been released, in accordance with human subject \nstandards such as informed consent and compensation. Organizations should follow applicable human \nsubjects research requirements, and best practices such as informed consent and subject compensation, \nwhen implementing feedback activities. \nAI Red-teaming \nAI red-teaming is an evolving practice that references exercises often conducted in a controlled \nenvironment and in collaboration with AI developers building AI models to identify potential adverse \nbehavior or outcomes of a GAI model or system, how they could occur, and stress test safeguards\u201d. AI \nred-teaming can be performed before or after AI models or systems are made available to the broader", "4c667dca-e6ab-4029-9e45-9e9f70dee4c1": "those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \n\u2022 Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems, \nprojections of student progress or outcomes, algorithms that determine access to resources or", "b8c15525-e535-4798-9d4a-7c596b7a91d3": "Terminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/\ufb01nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Pro\ufb01les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Pro\ufb01les/6-sec-pro\ufb01le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product", "ddd2f334-6ef8-42e7-9807-2ca32d1cfa58": "18 \nGOVERN 3.2: Policies and procedures are in place to de\ufb01ne and di\ufb00erentiate roles and responsibilities for human-AI con\ufb01gurations \nand oversight of AI systems. \nAction ID \nSuggested Action \nGAI Risks \nGV-3.2-001 \nPolicies are in place to bolster oversight of GAI systems with independent \nevaluations or assessments of GAI models or systems where the type and \nrobustness of evaluations are proportional to the identi\ufb01ed risks. \nCBRN Information or Capabilities; \nHarmful Bias and Homogenization \nGV-3.2-002 \nConsider adjustment of organizational roles and components across lifecycle \nstages of large or complex GAI systems, including: Test and evaluation, validation, \nand red-teaming of GAI systems; GAI content moderation; GAI system \ndevelopment and engineering; Increased accessibility of GAI tools, interfaces, and \nsystems, Incident response and containment. \nHuman-AI Con\ufb01guration; \nInformation Security; Harmful Bias \nand Homogenization \nGV-3.2-003", "2df14ea9-2d95-4d2a-a898-51924df1d539": "7 \nunethical behavior. Text-to-image models also make it easy to create images that could be used to \npromote dangerous or violent messages. Similar concerns are present for other GAI media, including \nvideo and audio. GAI may also produce content that recommends self-harm or criminal/illegal activities. \nMany current systems restrict model outputs to limit certain content or in response to certain prompts, \nbut this approach may still produce harmful recommendations in response to other less-explicit, novel \nprompts (also relevant to CBRN Information or Capabilities, Data Privacy, Information Security, and \nObscene, Degrading and/or Abusive Content). Crafting such prompts deliberately is known as \n\u201cjailbreaking,\u201d or, manipulating prompts to circumvent output controls. Limitations of GAI systems can be \nharmful or dangerous in certain contexts. Studies have observed that users may disclose mental health", "9e6e5d45-3bc3-48da-b43d-f8038b9cde7b": "Suggested Action \nGAI Risks \nMG-3.1-001 \nApply organizational risk tolerances and controls (e.g., acquisition and \nprocurement processes; assessing personnel credentials and quali\ufb01cations, \nperforming background checks; \ufb01ltering GAI input and outputs, grounding, \ufb01ne \ntuning, retrieval-augmented generation) to third-party GAI resources: Apply \norganizational risk tolerance to the utilization of third-party datasets and other \nGAI resources; Apply organizational risk tolerances to \ufb01ne-tuned third-party \nmodels; Apply organizational risk tolerance to existing third-party models \nadapted to a new domain; Reassess risk measurements after \ufb01ne-tuning third-\nparty GAI models. \nValue Chain and Component \nIntegration; Intellectual Property \nMG-3.1-002 \nTest GAI system value chain risks (e.g., data poisoning, malware, other software \nand hardware vulnerabilities; labor practices; data privacy and localization \ncompliance; geopolitical alignment). \nData Privacy; Information Security; \nValue Chain and Component", "baca4be8-4a8b-4a79-9934-69c4047b87fb": "Priorities Related to Information Integrity Research and Development.", "39c0388e-6071-4e28-a76a-89954c7a76cd": "Capabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (\u201cgo/\u201dno-go\u201d) policies, procedures, and processes, \nwith reviewed processes and approval thresholds re\ufb02ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or o\ufb00ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security", "6616086f-0d37-4a95-8d4e-56b9a0c20ae7": "while simultaneously enhancing the security effectiveness capabilities of the existing technology. \n\u2022\nThe National Disabled Law Students Association expressed concerns that individuals with disabilities were\nmore likely to be flagged as potentially suspicious by remote proctoring AI systems because of their disabili-\nty-specific access needs such as needing longer breaks or using screen readers or dictation software.45 \n\u2022\nAn algorithm designed to identify patients with high needs for healthcare systematically assigned lower\nscores (indicating that they were not as high need) to Black patients than to those of white patients, even\nwhen those patients had similar numbers of chronic conditions and other markers of health.46 In addition,\nhealthcare clinical algorithms that are used by physicians to guide clinical decisions may include\nsociodemographic variables that adjust or \u201ccorrect\u201d the algorithm\u2019s output on the basis of a patient\u2019s race or\nethnicity, which can lead to race-based health inequities.47\n25", "c7489ca0-62a7-4494-9c38-dc8c208a5226": "MANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speci\ufb01c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Con\ufb01guration", "80737253-4302-451d-a2f5-0b3f5dd957e8": "that resulted in sensitive data leaks; the number, type, and outcomes of ethical pre-reviews undertaken; a \ndescription of any data sold, shared, or made public, and how that data was assessed to determine it did not pres-\nent a sensitive data risk; and ongoing risk identification and management procedures, and any mitigation added \nbased on these procedures. Reporting should be provided in a clear and machine-readable manner. \n38", "0dbf8ebc-7f28-4e8b-b047-5c5399e261da": "help assess the authenticity, integrity, intellectual property rights, and potential manipulations in digital \ncontent. Some well-known techniques for provenance data tracking include digital watermarking, \nmetadata recording, digital \ufb01ngerprinting, and human authentication, among others. \nProvenance Data Tracking Approaches \nProvenance data tracking techniques for GAI systems can be used to track the history and origin of data \ninputs, metadata, and synthetic content. Provenance data tracking records the origin and history for \ndigital content, allowing its authenticity to be determined. It consists of techniques to record metadata \nas well as overt and covert digital watermarks on content. Data provenance refers to tracking the origin \nand history of input data through metadata and digital watermarking techniques. Provenance data \ntracking processes can include and assist AI Actors across the lifecycle who may not have full visibility or", "73ffeb66-1a26-4495-82ee-3a48226ba7f9": "60 \nZhang, Y. et al. (2023) Human favoritism, not AI aversion: People\u2019s perceptions (and bias) toward \ngenerative AI, human experts, and human\u2013GAI collaboration in persuasive content generation. Judgment \nand Decision Making. https://www.cambridge.org/core/journals/judgment-and-decision-\nmaking/article/human-favoritism-not-ai-aversion-peoples-perceptions-and-bias-toward-generative-ai-\nhuman-experts-and-humangai-collaboration-in-persuasive-content-\ngeneration/419C4BD9CE82673EAF1D8F6C350C4FA8 \nZhang, Y. et al. (2023) Siren\u2019s Song in the AI Ocean: A Survey on Hallucination in Large Language Models. \narXiv. https://arxiv.org/pdf/2309.01219 \nZhao, X. et al. (2023) Provable Robust Watermarking for AI-Generated Text. Semantic Scholar. \nhttps://www.semanticscholar.org/paper/Provable-Robust-Watermarking-for-AI-Generated-Text-Zhao-\nAnanth/75b68d0903af9d9f6e47ce3cf7e1a7d27ec811dc", "b85154a6-7baf-4d71-a85b-7d5874470889": "NIST Trustworthy and Responsible AI \nNIST AI 600-1 \nArtificial Intelligence Risk Management \nFramework: Generative Artificial \nIntelligence Profile \n \n \n \nThis publication is available free of charge from: \nhttps://doi.org/10.6028/NIST.AI.600-1", "7ff9ca89-d042-4d93-86ca-c819ce96f571": "2021. https://www.justice.gov/opa/pr/justice-department-announces-new-initiative-combat-redlining\n52. PAVE Interagency Task Force on Property Appraisal and Valuation Equity. Action Plan to Advance\nProperty Appraisal and Valuation Equity: Closing the Racial Wealth Gap by Addressing Mis-valuations for\nFamilies and Communities of Color. March 2022. https://pave.hud.gov/sites/pave.hud.gov/files/\ndocuments/PAVEActionPlan.pdf\n53. U.S. Equal Employment Opportunity Commission. The Americans with Disabilities Act and the Use of\nSoftware, Algorithms, and Artificial Intelligence to Assess Job Applicants and Employees. EEOC\u00ad\nNVTA-2022-2. May 12, 2022. https://www.eeoc.gov/laws/guidance/americans-disabilities-act-and-use\u00ad\nsoftware-algorithms-and-artificial-intelligence; U.S. Department of Justice. Algorithms, Artificial\nIntelligence, and Disability Discrimination in Hiring. May 12, 2022. https://beta.ada.gov/resources/ai\u00ad\nguidance/", "9ce6023d-f47a-4508-b851-7bc92ec92e5d": "Public and Private Sector Uses of Biometric Technologies. Issued Oct. 8, 2021.\nhttps://www.federalregister.gov/documents/2021/10/08/2021-21975/notice-of-request-for\u00ad\ninformation-rfi-on-public-and-private-sector-uses-of-biometric-technologies\n114. National Artificial Intelligence Initiative Office. Public Input on Public and Private Sector Uses of\nBiometric Technologies. Accessed Apr. 19, 2022.\nhttps://www.ai.gov/86-fr-56300-responses/\n115. Thomas D. Olszewski, Lisa M. Van Pay, Javier F. Ortiz, Sarah E. Swiersz, and Laurie A. Dacus.\nSynopsis of Responses to OSTP\u2019s Request for Information on the Use and Governance of Biometric\nTechnologies in the Public and Private Sectors. Science and Technology Policy Institute. Mar. 2022.\nhttps://www.ida.org/-/media/feature/publications/s/sy/synopsis-of-responses-to-request-for\u00ad\ninformation-on-the-use-and-governance-of-biometric-technologies/ida-document-d-33070.ashx\n73", "09e1411f-8588-40fa-9a33-66cb9e9a8933": "WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\u00ad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\u00ad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\u00ad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities.", "a1d58349-50d4-45c7-922b-3fb722db0852": "EQUITY: \u201cEquity\u201d means the consistent and systematic fair, just, and impartial treatment of all individuals. \nSystemic, fair, and just treatment must take into account the status of individuals who belong to underserved \ncommunities that have been denied such treatment, such as Black, Latino, and Indigenous and Native American \npersons, Asian Americans and Pacific Islanders and other persons of color; members of religious minorities; \nwomen, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and intersex (LGBTQI+) \npersons; older adults; persons with disabilities; persons who live in rural areas; and persons otherwise adversely \naffected by persistent poverty or inequality. \nRIGHTS, OPPORTUNITIES, OR ACCESS: \u201cRights, opportunities, or access\u201d is used to indicate the scoping \nof this framework. It describes the set of: civil rights, civil liberties, and privacy, including freedom of speech,", "9414a260-83d0-4bdc-8083-68512cb56f75": "14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organization\u2019s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks", "e2a000b0-29a7-49a1-9c87-de674486a78a": "Implementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\u00ad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\u00ad\nthe-intelligence-community\n64", "5f733903-c4f4-41c6-8410-033c4e6b3390": "https://cdt.org/insights/how-automated-test-proctoring-software-discriminates-against-disabled\u00ad\nstudents/\n46. Ziad Obermeyer, et al., Dissecting racial bias in an algorithm used to manage the health of\npopulations, 366 Science (2019), https://www.science.org/doi/10.1126/science.aax2342.\n66", "9278423d-7d2c-4d91-8e74-2ff10c9952b1": "requirements and best practices, and re\ufb02ects risk management priorities. Consistent with other AI RMF \npro\ufb01les, this pro\ufb01le o\ufb00ers insights into how risk can be managed across various stages of the AI lifecycle \nand for GAI as a technology. \nAs GAI covers risks of models or applications that can be used across use cases or sectors, this document \nis an AI RMF cross-sectoral pro\ufb01le. Cross-sectoral pro\ufb01les can be used to govern, map, measure, and \nmanage risks associated with activities or business processes common across sectors, such as the use of \nlarge language models (LLMs), cloud-based services, or acquisition. \nThis document de\ufb01nes risks that are novel to or exacerbated by the use of GAI. After introducing and \ndescribing these risks, the document provides a set of suggested actions to help organizations govern, \nmap, measure, and manage these risks. \n \n \n1 EO 14110 de\ufb01nes Generative AI as \u201cthe class of AI models that emulate the structure and characteristics of input", "c8290368-e6cc-4c11-86e8-211b0f6ff429": "30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output \ufb01lters; Removing any personally \nidenti\ufb01able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nCon\ufb01guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their", "92689676-f99d-4f3a-aedb-82a851140cf7": "identified users, and impacted populations; the assessment of notice clarity and timeliness; the assessment of \nthe explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment \nof how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of \nrisk. Individualized profile information should be made readily available to the greatest extent possible that \nincludes explanations for any system impacts or inferences. Reporting should be provided in a clear plain \nlanguage and machine-readable manner. \n44", "1ef77bcd-1235-4349-a57f-76304c020694": "consortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policy\u2014such as \nsector-specific privacy laws and oversight requirements\u2014do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the country\u2014from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal government\u2014on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-", "2d23ffb7-65db-44d5-bb6b-31fffc00e7bc": "RIGHTS, OPPORTUNITIES, OR ACCESS\nCivil rights, civil liberties, and privacy, including freedom of speech, voting, and protections from discrimi\u00ad\nnation, excessive punishment, unlawful surveillance, and violations of privacy and other freedoms in both \npublic and private sector contexts; \nEqual opportunities, including equitable access to education, housing, credit, employment, and other \nprograms; or, \nAccess to critical resources or services, such as healthcare, financial services, safety, social services, \nnon-deceptive information about goods and services, and government benefits. \nA list of examples of automated systems for which these principles should be considered is provided in the \nAppendix. The Technical Companion, which follows, offers supportive guidance for any person or entity that \ncreates, deploys, or oversees automated systems. \nConsidered together, the five principles and associated practices of the Blueprint for an AI Bill of", "47d45fac-167f-4abe-85ba-c592dfc54e76": "31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Con\ufb01guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, \ufb01ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Con\ufb01guration", "ba676a38-8c2d-4a8d-9b84-d5dc34be9ad5": "control over the various trade-o\ufb00s and cascading impacts of early-stage model decisions on downstream \nperformance and synthetic outputs. For example, by selecting a watermarking model to prioritize \nrobustness (the durability of a watermark), an AI actor may inadvertently diminish computational \ncomplexity (the resources required to implement watermarking). Organizational risk management \ne\ufb00orts for enhancing content provenance include: \n\u2022 \nTracking provenance of training data and metadata for GAI systems; \n\u2022 \nDocumenting provenance data limitations within GAI systems;", "f5b7c80b-9987-4c1a-90b7-40725e445d68": "Shared Statement Of Civil Rights Concerns. Jul. 30, 2018. http://civilrightsdocs.info/pdf/criminal-justice/\nPretrial-Risk-Assessment-Short.pdf; https://civilrights.org/edfund/pretrial-risk-assessments/\n29. Idaho Legislature. House Bill 118. Jul. 1, 2019. https://legislature.idaho.gov/sessioninfo/2019/\nlegislation/H0118/\n30. See, e.g., Executive Office of the President. Big Data: A Report on Algorithmic Systems, Opportunity, and\nCivil Rights. May, 2016. https://obamawhitehouse.archives.gov/sites/default/files/microsites/\nostp/2016_0504_data_discrimination.pdf; Cathy O\u2019Neil. Weapons of Math Destruction. Penguin Books.\n2017. https://en.wikipedia.org/wiki/Weapons_of_Math_Destruction; Ruha Benjamin. Race After\nTechnology: Abolitionist Tools for the New Jim Code. Polity. 2019. https://www.ruhabenjamin.com/race\u00ad\nafter-technology\n31. See, e.g., Kashmir Hill. Another Arrest, and Jail Time, Due to a Bad Facial Recognition Match: A New", "1ad6e133-af0d-462b-8fcf-c037919602b6": "who have trouble with the automated system are able to use human consideration and fallback, with the under\u00ad\nstanding that it may be these users who are most likely to need the human assistance. Similarly, it should be \ntested to ensure that users with disabilities are able to find and use human consideration and fallback and also \nrequest reasonable accommodations or modifications. \nConvenient. Mechanisms for human consideration and fallback should not be unreasonably burdensome as \ncompared to the automated system\u2019s equivalent. \n49", "3c014cdb-93a6-4ced-a0b2-4da9ff5e1441": "Integrity; Information Security \nMS-2.6-006 \nVerify that systems properly handle queries that may give rise to inappropriate, \nmalicious, or illegal usage, including facilitating manipulation, extortion, targeted \nimpersonation, cyber-attacks, and weapons creation. \nCBRN Information or Capabilities; \nInformation Security \nMS-2.6-007 Regularly evaluate GAI system vulnerabilities to possible circumvention of safety \nmeasures. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV", "cf73a083-880a-4afc-86b6-5667ae73d419": "where possible, available before the harm occurs. Time-critical systems include, but are not limited to, \nvoting-related systems, automated building access and other access systems, systems that form a critical \ncomponent of healthcare, and systems that have the ability to withhold wages or otherwise cause \nimmediate financial penalties. \nEffective. The organizational structure surrounding processes for consideration and fallback should \nbe designed so that if the human decision-maker charged with reassessing a decision determines that it \nshould be overruled, the new decision will be effectively enacted. This includes ensuring that the new \ndecision is entered into the automated system throughout its components, any previous repercussions from \nthe old decision are also overturned, and safeguards are put in place to help ensure that future decisions do \nnot result in the same errors. \nMaintained. The human consideration and fallback process and any associated automated processes", "cb37af2e-5517-4a81-8049-07d3d61ad295": "Information Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-de\ufb01ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property", "2183508f-9a9e-41b1-a517-f2fc17931b63": "27 \nMP-4.1-010 \nConduct appropriate diligence on training data use to assess intellectual property, \nand privacy, risks, including to examine whether use of proprietary or sensitive \ntraining data is consistent with applicable laws. \nIntellectual Property; Data Privacy \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring, Procurement, Third-party entities \n \nMAP 5.1: Likelihood and magnitude of each identi\ufb01ed impact (both potentially bene\ufb01cial and harmful) based on expected use, past \nuses of AI systems in similar contexts, public incident reports, feedback from those external to the team that developed or deployed \nthe AI system, or other data are identi\ufb01ed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.1-001 Apply TEVV practices for content provenance (e.g., probing a system's synthetic \ndata generation capabilities for potential misuse or vulnerabilities. \nInformation Integrity; Information \nSecurity \nMP-5.1-002", "39d7af6a-7298-47bc-a608-55c4a9d09d31": "security, health, foreign relations, the environment, and the technological recovery and use of resources, among \nother topics. OSTP leads interagency science and technology policy coordination efforts, assists the Office of \nManagement and Budget (OMB) with an annual review and analysis of Federal research and development in \nbudgets, and serves as a source of scientific and technological analysis and judgment for the President with \nrespect to major policies, plans, and programs of the Federal Government. \nLegal Disclaimer \nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People is a white paper \npublished by the White House Office of Science and Technology Policy. It is intended to support the \ndevelopment of policies and practices that protect civil rights and promote democratic values in the building, \ndeployment, and governance of automated systems. \nThe Blueprint for an AI Bill of Rights is non-binding and does not constitute U.S. government policy. It", "7ee6f299-9e32-45b6-8a95-f63af7ab302d": "8 \nTrustworthy AI Characteristics: Accountable and Transparent, Privacy Enhanced, Safe, Secure and \nResilient \n2.5. Environmental Impacts \nTraining, maintaining, and operating (running inference on) GAI systems are resource-intensive activities, \nwith potentially large energy and environmental footprints. Energy and carbon emissions vary based on \nwhat is being done with the GAI model (i.e., pre-training, \ufb01ne-tuning, inference), the modality of the \ncontent, hardware used, and type of task or application. \nCurrent estimates suggest that training a single transformer LLM can emit as much carbon as 300 round-\ntrip \ufb02ights between San Francisco and New York. In a study comparing energy consumption and carbon \nemissions for LLM inference, generative tasks (e.g., text summarization) were found to be more energy- \nand carbon-intensive than discriminative or non-generative tasks (e.g., text classi\ufb01cation). \nMethods for creating smaller versions of trained models, such as model distillation or compression,", "56387bf7-3970-47f2-a78c-1d685ff6ff3c": "these technologies, various panelists emphasized that transparency is important but is not enough to achieve \naccountability. Some panelists discussed their individual views on additional system needs for validity, and \nagreed upon the importance of advisory boards and compensated community input early in the design process \n(before the technology is built and instituted). Various panelists also emphasized the importance of regulation \nthat includes limits to the type and cost of such technologies. \n56", "9d4dae2d-1304-42fd-853c-5e8a87dfd8ca": "MANAGE 4.1: Post-deployment AI system monitoring plans are implemented, including mechanisms for capturing and evaluating \ninput from users and other relevant AI Actors, appeal and override, decommissioning, incident response, recovery, and change \nmanagement. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.1-001 \nCollaborate with external researchers, industry experts, and community \nrepresentatives to maintain awareness of emerging best practices and \ntechnologies in measuring and managing identi\ufb01ed risks. \nInformation Integrity; Harmful Bias \nand Homogenization \nMG-4.1-002 \nEstablish, maintain, and evaluate e\ufb00ectiveness of organizational processes and \nprocedures for post-deployment monitoring of GAI systems, particularly for \npotential confabulation, CBRN, or cyber risks. \nCBRN Information or Capabilities; \nConfabulation; Information \nSecurity \nMG-4.1-003 \nEvaluate the use of sentiment analysis to gauge user sentiment regarding GAI", "225dc155-5855-48c7-8617-a254366cfdbd": "against Black patients, who generally have less access to medical care and therefore have generated less cost \nthan white patients with similar illness and need. A landmark study documented this pattern and proposed \npractical ways that were shown to reduce this bias, such as focusing specifically on active chronic health \nconditions or avoidable future costs related to emergency visits and hospitalization.54 \nLarge employers have developed best practices to scrutinize the data and models used \nfor hiring. An industry initiative has developed Algorithmic Bias Safeguards for the Workforce, a structured \nquestionnaire that businesses can use proactively when procuring software to evaluate workers. It covers \nspecific technical questions such as the training data used, model training process, biases identified, and \nmitigation steps employed.55 \nStandards organizations have developed guidelines to incorporate accessibility criteria", "4b9f6363-19fe-4e5d-b658-d8ffcacc03d5": "arXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Di\ufb00usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Arti\ufb01cial intelligence and biological misuse: Di\ufb00erentiating risks of language models", "f3b2640d-2aa6-4a43-bed5-77c9f7e3d4a5": "narrowly defined contexts with clear benefits to the individual and/or society. \nTo this end, automated systems that collect, use, share, or store data related to these sensitive domains should meet \nadditional expectations. Data and metadata are sensitive if they pertain to an individual in a sensitive domain (defined \nbelow); are generated by technologies used in a sensitive domain; can be used to infer data from a sensitive domain or \nsensitive data about an individual (such as disability-related data, genomic data, biometric data, behavioral data, \ngeolocation data, data related to interaction with the criminal justice system, relationship history and legal status such \nas custody and divorce information, and home, work, or school environmental data); or have the reasonable potential \nto be used in ways that are likely to expose individuals to meaningful harm, such as a loss of privacy or financial harm", "1509d387-1fa9-4049-ade6-b412686b43fd": "39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Con\ufb01guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Con\ufb01guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, A\ufb00ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV", "66b30463-4011-43f5-8112-826208de6c25": "DATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \n\u2022\nAn insurer might collect data from a person's social media presence as part of deciding what life\ninsurance rates they should be offered.64\n\u2022\nA data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of\nthousands of people to potential identity theft. 65\n\u2022\nA local public housing authority installed a facial recognition system at the entrance to housing complexes to\nassist law enforcement with identifying individuals viewed via camera when police reports are filed, leading\nthe community, both those living in the housing complex and not, to have videos of them sent to the local\npolice department and made available for scanning by its facial recognition software.66\n\u2022\nCompanies use surveillance software to track employee discussions about union activity and use the", "182ea1c4-9823-4498-8afb-5fed66f8afa4": "A.1.3. Third-Party Considerations \nOrganizations may seek to acquire, embed, incorporate, or use open-source or proprietary third-party \nGAI models, systems, or generated data for various applications across an enterprise. Use of these GAI \ntools and inputs has implications for all functions of the organization \u2013 including but not limited to \nacquisition, human resources, legal, compliance, and IT services \u2013 regardless of whether they are carried \nout by employees or third parties. Many of the actions cited above are relevant and options for \naddressing third-party considerations. \nThird party GAI integrations may give rise to increased intellectual property, data privacy, or information \nsecurity risks, pointing to the need for clear guidelines for transparency and risk management regarding \nthe collection and use of third-party data for model inputs. Organizations may consider varying risk \ncontrols for foundation models, \ufb01ne-tuned models, and embedded tools, enhanced processes for", "4fa78531-4c21-4163-9d75-1fbae52d1d1e": "was demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42", "dae39aa6-9cbc-4da4-8222-ceafa8a0a43d": "in\ufb02uence-humans-too/ \nEpstein, Z. et al. (2023). Art and the science of generative AI. Science. \nhttps://www.science.org/doi/10.1126/science.adh4451 \nFe\ufb00er, M. et al. (2024) Red-Teaming for Generative AI: Silver Bullet or Security Theater? arXiv. \nhttps://arxiv.org/pdf/2401.15897 \nGlazunov, S. et al. (2024) Project Naptime: Evaluating O\ufb00ensive Security Capabilities of Large Language \nModels. Project Zero. https://googleprojectzero.blogspot.com/2024/06/project-naptime.html \nGreshake, K. et al. (2023) Not what you've signed up for: Compromising Real-World LLM-Integrated \nApplications with Indirect Prompt Injection. arXiv. https://arxiv.org/abs/2302.12173 \nHagan, M. (2024) Good AI Legal Help, Bad AI Legal Help: Establishing quality standards for responses to \npeople\u2019s legal problem stories. SSRN. https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4696936 \nHaran, R. (2023) Securing LLM Systems Against Prompt Injection. NVIDIA. \nhttps://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection/", "33c2c28e-8966-4c61-8bd6-cc54ebe272fd": "45 \nMG-4.1-007 \nVerify that AI Actors responsible for monitoring reported issues can e\ufb00ectively \nevaluate GAI system performance including the application of content \nprovenance data tracking techniques, and promptly escalate issues for response. \nHuman-AI Con\ufb01guration; \nInformation Integrity \nAI Actor Tasks: AI Deployment, A\ufb00ected Individuals and Communities, Domain Experts, End-Users, Human Factors, Operation and \nMonitoring \n \nMANAGE 4.2: Measurable activities for continual improvements are integrated into AI system updates and include regular \nengagement with interested parties, including relevant AI Actors. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.2-001 Conduct regular monitoring of GAI systems and publish reports detailing the \nperformance, feedback received, and improvements made. \nHarmful Bias and Homogenization \nMG-4.2-002 \nPractice and follow incident response plans for addressing the generation of \ninappropriate or harmful content and adapt processes based on \ufb01ndings to", "f926ae48-8dda-479c-9348-124b494c0446": "21 \nGV-6.1-005 \nImplement a use-cased based supplier risk assessment framework to evaluate and \nmonitor third-party entities\u2019 performance and adherence to content provenance \nstandards and technologies to detect anomalies and unauthorized changes; \nservices acquisition and value chain risk management; and legal compliance. \nData Privacy; Information \nIntegrity; Information Security; \nIntellectual Property; Value Chain \nand Component Integration \nGV-6.1-006 Include clauses in contracts which allow an organization to evaluate third-party \nGAI processes and standards. \nInformation Integrity \nGV-6.1-007 Inventory all third-party entities with access to organizational content and \nestablish approved GAI technology and service provider lists. \nValue Chain and Component \nIntegration \nGV-6.1-008 Maintain records of changes to content made by third parties to promote content \nprovenance, including sources, timestamps, metadata. \nInformation Integrity; Value Chain \nand Component Integration; \nIntellectual Property", "ad86da1d-b377-4e66-ae6e-bf0697f3c5ca": "consent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nCon\ufb01guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, di\ufb00erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nCon\ufb01guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for \ufb01ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity", "b10e392b-f29b-4aa9-a4d6-339ee55d6518": "Data Privacy; Intellectual \nProperty", "6f6d8ce3-01ef-4bfa-a9e1-20c5125a338a": "or on an existing credit account.\"90 In addition, under the risk-based pricing rule,91 lenders must either inform \nborrowers of their credit score, or else tell consumers when \"they are getting worse terms because of \ninformation in their credit report.\" The CFPB has also asserted that \"[t]he law gives every applicant the right to \na specific explanation if their application for credit was denied, and that right is not diminished simply because \na company uses a complex algorithm that it doesn't understand.\"92 Such explanations illustrate a shared value \nthat certain decisions need to be explained. \nA California law requires that warehouse employees are provided with notice and explana-\ntion about quotas, potentially facilitated by automated systems, that apply to them. Warehous-\ning employers in California that use quota systems (often facilitated by algorithmic monitoring systems) are \nrequired to provide employees with a written description of each quota that applies to the employee, including", "0d87b088-8909-47ef-9705-c44f991c695e": "Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54", "948acbdd-82ea-43ec-b9f4-c20ea2ce234d": "find notices and explanations, read them quickly, and understand and act on them. This includes ensuring that \nnotices and explanations are accessible to users with disabilities and are available in the language(s) and read-\ning level appropriate for the audience. Notices and explanations may need to be available in multiple forms, \n(e.g., on paper, on a physical sign, or online), in order to meet these expectations and to be accessible to the \nAmerican public. \nProvide explanations as to how and why a decision was made or an action was taken by an \nautomated system \nTailored to the purpose. Explanations should be tailored to the specific purpose for which the user is \nexpected to use the explanation, and should clearly state that purpose. An informational explanation might \ndiffer from an explanation provided to allow for the possibility of recourse, an appeal, or one provided in the \ncontext of a dispute or contestation process. For the purposes of this framework, 'explanation' should be", "07b0149e-d8de-4787-a581-784029c5ddd1": "ENDNOTES\n35. Carrie Johnson. Flaws plague a tool meant to help low-risk federal prisoners win early release. NPR.\nJan. 26, 2022. https://www.npr.org/2022/01/26/1075509175/flaws-plague-a-tool-meant-to-help-low\u00ad\nrisk-federal-prisoners-win-early-release.; Carrie Johnson. Justice Department works to curb racial bias\nin deciding who's released from prison. NPR. Apr. 19, 2022. https://\nwww.npr.org/2022/04/19/1093538706/justice-department-works-to-curb-racial-bias-in-deciding\u00ad\nwhos-released-from-pris; National Institute of Justice. 2021 Review and Revalidation of the First Step Act\nRisk Assessment Tool. National Institute of Justice NCJ 303859. Dec., 2021. https://www.ojp.gov/\npdffiles1/nij/303859.pdf\n36. Andrew Thompson. Google\u2019s Sentiment Analyzer Thinks Being Gay Is Bad. Vice. Oct. 25, 2017. https://\nwww.vice.com/en/article/j5jmj8/google-artificial-intelligence-bias\n37. Kaggle. Jigsaw Unintended Bias in Toxicity Classification: Detect toxicity across a diverse range of", "d1f75864-ad74-4188-a2f0-625b3b34dd62": "given a brief, clear notice that they are entitled to opt-out, along with clear instructions for how to opt-out. \nInstructions should be provided in an accessible form and should be easily findable by those impacted by the \nautomated system. The brevity, clarity, and accessibility of the notice and instructions should be assessed (e.g., \nvia user experience research). \nHuman alternatives provided when appropriate. In many scenarios, there is a reasonable expectation \nof human involvement in attaining rights, opportunities, or access. When automated systems make up part of \nthe attainment process, alternative timely human-driven processes should be provided. The use of a human \nalternative should be triggered by an opt-out process. \nTimely and not burdensome human alternative. Opting out should be timely and not unreasonably \nburdensome in both the process of requesting to opt-out and the human-driven alternative provided.", "d5430903-a32f-42c0-908e-1b1b0caa72eb": "Power. Public Affairs. 2019.\n64. Angela Chen. Why the Future of Life Insurance May Depend on Your Online Presence. The Verge. Feb.\n7, 2019.\nhttps://www.theverge.com/2019/2/7/18211890/social-media-life-insurance-new-york-algorithms-big\u00ad\ndata-discrimination-online-records\n68", "8c7c6cd5-965a-408f-b1fc-cba59e93879d": "technical companion is intended to be used as a reference by people across many circumstances \u2013 anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \n\u2022 The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\n\u2022 This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing", "e1311e28-6431-4f7d-b026-99aa986c05f9": "capabilities, competencies, demographic groups, domain expertise, educational \nbackgrounds, lived experiences, professions, and skills across the enterprise to \ninform and conduct risk measurement and management functions. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization \nMP-1.2-002 \nVerify that data or benchmarks used in risk measurement, and users, \nparticipants, or subjects involved in structured GAI public feedback exercises \nare representative of diverse in-context user populations. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization \nAI Actor Tasks: AI Deployment", "22760b2b-f237-4c9b-8e4b-6c86d4043388": "evaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity", "714b25fc-f9c6-4d83-b067-6f0c22b4a6f0": "65. See, e.g., Scott Ikeda. Major Data Broker Exposes 235 Million Social Media Profiles in Data Lead: Info\nAppears to Have Been Scraped Without Permission. CPO Magazine. Aug. 28, 2020. https://\nwww.cpomagazine.com/cyber-security/major-data-broker-exposes-235-million-social-media-profiles\u00ad\nin-data-leak/; Lily Hay Newman. 1.2 Billion Records Found Exposed Online in a Single Server. WIRED,\nNov. 22, 2019. https://www.wired.com/story/billion-records-exposed-online/\n66. Lola Fadulu. Facial Recognition Technology in Public Housing Prompts Backlash. New York Times.\nSept. 24, 2019.\nhttps://www.nytimes.com/2019/09/24/us/politics/facial-recognition-technology-housing.html\n67. Jo Constantz. \u2018They Were Spying On Us\u2019: Amazon, Walmart, Use Surveillance Technology to Bust\nUnions. Newsweek. Dec. 13, 2021.\nhttps://www.newsweek.com/they-were-spying-us-amazon-walmart-use-surveillance-technology-bust\u00ad\nunions-1658603\n68. See, e.g., enforcement actions by the FTC against the photo storage app Everalbaum", "08545214-7d84-4218-a3c8-92b8a4f288d8": "organization\u2019s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,", "4318fa7a-1dda-469f-9fb2-e36092d0bb7b": "Integration \nMP-4.1-008 \nRe-evaluate risks when adapting GAI models to new domains. Additionally, \nestablish warning systems to determine if a GAI system is being used in a new \ndomain where previous assumptions (relating to context of use or mapped risks \nsuch as security, and safety) may no longer hold. \nCBRN Information or Capabilities; \nIntellectual Property; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-009 Leverage approaches to detect the presence of PII or sensitive data in generated \noutput text, image, video, or audio. \nData Privacy", "d282c21a-510c-4f1c-b774-28375bfd6162": "https://arxiv.org/pdf/2308.13387 \nWardle, C. et al. (2017) Information Disorder: Toward an interdisciplinary framework for research and \npolicy making. Council of Europe. https://rm.coe.int/information-disorder-toward-an-interdisciplinary-\nframework-for-researc/168076277c \nWeatherbed, J. (2024) Trolls have \ufb02ooded X with graphic Taylor Swift AI fakes. The Verge. \nhttps://www.theverge.com/2024/1/25/24050334/x-twitter-taylor-swift-ai-fake-images-trending \nWei, J. et al. (2024) Long Form Factuality in Large Language Models. arXiv. \nhttps://arxiv.org/pdf/2403.18802 \nWeidinger, L. et al. (2021) Ethical and social risks of harm from Language Models. arXiv. \nhttps://arxiv.org/pdf/2112.04359 \nWeidinger, L. et al. (2023) Sociotechnical Safety Evaluation of Generative AI Systems. arXiv. \nhttps://arxiv.org/pdf/2310.11986 \nWeidinger, L. et al. (2022) Taxonomy of Risks posed by Language Models. FAccT \u201922. \nhttps://dl.acm.org/doi/pdf/10.1145/3531146.3533088", "e24fe8bc-2648-4d4a-8e40-01161440f8fe": "35 \nMEASURE 2.9: The AI model is explained, validated, and documented, and AI system output is interpreted within its context \u2013 as \nidenti\ufb01ed in the MAP function \u2013 to inform responsible use and governance. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.9-001 \nApply and document ML explanation results such as: Analysis of embeddings, \nCounterfactual prompts, Gradient-based attributions, Model \ncompression/surrogate models, Occlusion/term reduction. \nConfabulation \nMS-2.9-002 \nDocument GAI model details including: Proposed use and organizational value; \nAssumptions and limitations, Data collection methodologies; Data provenance; \nData quality; Model architecture (e.g., convolutional neural network, \ntransformers, etc.); Optimization objectives; Training algorithms; RLHF \napproaches; Fine-tuning or retrieval-augmented generation approaches; \nEvaluation data; Ethical considerations; Legal and regulatory requirements. \nInformation Integrity; Harmful Bias \nand Homogenization", "a7a1ff30-7a4a-40c5-8ed2-58fc42094e5c": "SAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nWhile technologies are being deployed to solve problems across a wide array of issues, our reliance on technology can \nalso lead to its use in situations where it has not yet been proven to work\u2014either at all or within an acceptable range \nof error. In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. \nAutomated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informa\u00ad\ntion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposeful\u00ad\nly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \nor unintended uses lead to unintended harms.", "c4246db7-6c73-48a5-aba8-2216ba9a71d5": "moving principles into practice. \nThe expectations given in the Technical Companion are meant to serve as a blueprint for the development of \nadditional technical standards and practices that should be tailored for particular sectors and contexts. While \nexisting laws informed the development of the Blueprint for an AI Bill of Rights, this framework does not detail \nthose laws beyond providing them as examples, where appropriate, of existing protective measures. This \nframework instead shares a broad, forward-leaning vision of recommended principles for automated system \ndevelopment and use to inform private and public involvement with these systems where they have the poten\u00ad\ntial to meaningfully impact rights, opportunities, or access. Additionally, this framework does not analyze or \ntake a position on legislative and regulatory proposals in municipal, state, and federal government, or those in \nother countries.", "d0d99450-83ba-48c6-8f9e-3935192d7a48": "monitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \n\u2022 Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individuals\u2019 rights. These reporting expectations are important for transparency, so the American people can have", "3836563b-34f7-43bb-be66-5bf8842759b9": "10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speci\ufb01c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic \u201cdeepfakes\u201d \u2013 that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities.", "0c4eee0b-95f6-4546-9277-8d41b2128ead": "Human-AI Con\ufb01guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, A\ufb00ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-party\u2019s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize di\ufb00erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management e\ufb00orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain", "03c8bb9f-e7b9-4e4f-93d6-3a31ac3c4348": "access needs. \nReporting. When members of the public wish to know what data about them is being used in a system, the \nentity responsible for the development of the system should respond quickly with a report on the data it has \ncollected or stored about them. Such a report should be machine-readable, understandable by most users, and \ninclude, to the greatest extent allowable under law, any data and metadata about them or collected from them, \nwhen and how their data and metadata were collected, the specific ways that data or metadata are being used, \nwho has access to their data and metadata, and what time limitations apply to these data. In cases where a user \nlogin is not available, identity verification may need to be performed before providing such a report to ensure \nuser privacy. Additionally, summary reporting should be proactively made public with general information \nabout how peoples\u2019 data and metadata is used, accessed, and stored. Summary reporting should include the", "913ec1ef-ecb8-499f-bbd0-c79b8355fd1c": "Applying The Blueprint for an AI Bill of Rights \nDEFINITIONS\nALGORITHMIC DISCRIMINATION: \u201cAlgorithmic discrimination\u201d occurs when automated systems \ncontribute to unjustified different treatment or impacts disfavoring people based on their race, color, ethnicity, \nsex (including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifica-\ntion protected by law. Depending on the specific circumstances, such algorithmic discrimination may violate \nlegal protections. Throughout this framework the term \u201calgorithmic discrimination\u201d takes this meaning (and \nnot a technical understanding of discrimination as distinguishing between items). \nAUTOMATED SYSTEM: An \"automated system\" is any system, software, or process that uses computation as \nwhole or part of a system to determine outcomes, make or aid decisions, inform policy implementation, collect", "81258dfa-876d-4eae-befc-9ecdeb596c38": "12 \nCSAM. Even when trained on \u201cclean\u201d data, increasingly capable GAI models can synthesize or produce \nsynthetic NCII and CSAM. Websites, mobile apps, and custom-built models that generate synthetic NCII \nhave moved from niche internet forums to mainstream, automated, and scaled online businesses. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Safe, Privacy Enhanced \n2.12. \nValue Chain and Component Integration \nGAI value chains involve many third-party components such as procured datasets, pre-trained models, \nand software libraries. These components might be improperly obtained or not properly vetted, leading \nto diminished transparency or accountability for downstream users. While this is a risk for traditional AI \nsystems and some other digital technologies, the risk is exacerbated for GAI due to the scale of the \ntraining data, which may be too large for humans to vet; the di\ufb03culty of training foundation models,", "db999b51-bd0e-46d4-9202-cff81afad964": "38 \nMEASURE 2.13: E\ufb00ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric e\ufb00ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are di\ufb03cult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001", "45268e73-21c7-4a9a-b026-a97f0f1e299b": "prevent future occurrences. Conduct post-mortem analyses of incidents with \nrelevant AI Actors, to understand the root causes and implement preventive \nmeasures. \nHuman-AI Con\ufb01guration; \nDangerous, Violent, or Hateful \nContent \nMG-4.2-003 Use visualizations or other methods to represent GAI model behavior to ease \nnon-technical stakeholders understanding of GAI system functionality. \nHuman-AI Con\ufb01guration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, A\ufb00ected Individuals and Communities, End-Users, Operation and \nMonitoring, TEVV \n \nMANAGE 4.3: Incidents and errors are communicated to relevant AI Actors, including a\ufb00ected communities. Processes for tracking, \nresponding to, and recovering from incidents and errors are followed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.3-001 \nConduct after-action assessments for GAI system incidents to verify incident \nresponse and recovery processes are followed and e\ufb00ective, including to follow", "c8d3ea4b-63fa-4698-b7c2-63895c628511": "in whole or in part may not be appropriate given the intended use of automated systems to achieve government \nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \nautomated systems in certain settings such as AI systems used as part of school building security or automated \nhealth diagnostic systems. \nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \nequities, for example, between the protection of sensitive law enforcement information and the principle of \nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960,", "51e515fd-fd17-447c-a07f-ff68b43cc751": "https://www.mandiant.com/resources/blog/securing-ai-pipeline \nBurgess, M. (2024) Generative AI\u2019s Biggest Security Flaw Is Not Easy to Fix. WIRED. \nhttps://www.wired.com/story/generative-ai-prompt-injection-hacking/ \nBurtell, M. et al. (2024) The Surprising Power of Next Word Prediction: Large Language Models \nExplained, Part 1. Georgetown Center for Security and Emerging Technology. \nhttps://cset.georgetown.edu/article/the-surprising-power-of-next-word-prediction-large-language-\nmodels-explained-part-1/ \nCanadian Centre for Cyber Security (2023) Generative arti\ufb01cial intelligence (AI) - ITSAP.00.041. \nhttps://www.cyber.gc.ca/en/guidance/generative-arti\ufb01cial-intelligence-ai-itsap00041 \nCarlini, N., et al. (2021) Extracting Training Data from Large Language Models. Usenix. \nhttps://www.usenix.org/conference/usenixsecurity21/presentation/carlini-extracting \nCarlini, N. et al. (2023) Quantifying Memorization Across Neural Language Models. ICLR 2023. \nhttps://arxiv.org/pdf/2202.07646", "89295981-fe45-4788-b206-98c5e10f6bcf": "Advocate and Floor Captain, Atlantic Plaza Towers Tenants Association\nThe individual panelists described the ways in which AI systems and other technologies are increasingly being \nused to limit access to equal opportunities in education, housing, and employment. Education-related \nconcerning uses included the increased use of remote proctoring systems, student location and facial \nrecognition tracking, teacher evaluation systems, robot teachers, and more. Housing-related concerning uses \nincluding automated tenant background screening and facial recognition-based controls to enter or exit \nhousing complexes. Employment-related concerning uses included discrimination in automated hiring \nscreening and workplace surveillance. Various panelists raised the limitations of existing privacy law as a key \nconcern, pointing out that students should be able to reinvent themselves and require privacy of their student", "d91a4739-1860-4e2f-a88d-921afc9e58ad": "22 \nGV-6.2-003 \nEstablish incident response plans for third-party GAI technologies: Align incident \nresponse plans with impacts enumerated in MAP 5.1; Communicate third-party \nGAI incident response plans to all relevant AI Actors; De\ufb01ne ownership of GAI \nincident response functions; Rehearse third-party GAI incident response plans at \na regular cadence; Improve incident response plans based on retrospective \nlearning; Review incident response plans for alignment with relevant breach \nreporting, data protection, data privacy, or other laws. \nData Privacy; Human-AI \nCon\ufb01guration; Information \nSecurity; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization \nGV-6.2-004 \nEstablish policies and procedures for continuous monitoring of third-party GAI \nsystems in deployment. \nValue Chain and Component \nIntegration \nGV-6.2-005 \nEstablish policies and procedures that address GAI data redundancy, including \nmodel weights and other system artifacts. \nHarmful Bias and Homogenization \nGV-6.2-006", "0c855fa7-f69e-4584-8664-653671650fc2": "answer itself is incorrect. Similarly, an LLM could falsely assert that it is human or has human traits, \npotentially deceiving humans into believing they are speaking with another human. \nThe extent to which humans can be deceived by LLMs, the mechanisms by which this may occur, and the \npotential risks from adversarial prompting of such behavior are emerging areas of study. Given the wide \nrange of downstream impacts of GAI, it is di\ufb03cult to estimate the downstream scale and impact of \nconfabulations. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Safe, Valid and Reliable, Explainable \nand Interpretable \n2.3. Dangerous, Violent, or Hateful Content \nGAI systems can produce content that is inciting, radicalizing, or threatening, or that glori\ufb01es violence, \nwith greater ease and scale than other technologies. LLMs have been reported to generate dangerous or \nviolent recommendations, and some models have generated actionable instructions for dangerous or", "60b017ce-e6fe-4811-834f-795d4a12c364": "results of any surveillance pre-deployment assessment, including disparity assessment in the real-world \ndeployment context, the specific identified goals of any data collection, and the assessment done to ensure \nonly the minimum required data is collected. It should also include documentation about the scope limit \nassessments, including data retention timelines and associated justification, and an assessment of the \nimpact of surveillance or data collection on rights, opportunities, and access. Where possible, this \nassessment of the impact of surveillance should be done by an independent party. Reporting should be \nprovided in a clear and machine-readable manner. \n35", "3d60073b-d973-4f20-8886-8710331b996e": "appropriately and meaningfully given. Any consent requests should be brief, \nbe understandable in plain language, and give you agency over data collection \nand the specific context of use; current hard-to-understand no\u00ad\ntice-and-choice practices for broad uses of data should be changed. Enhanced \nprotections and restrictions for data and inferences related to sensitive do\u00ad\nmains, including health, work, education, criminal justice, and finance, and \nfor data pertaining to youth should put you first. In sensitive domains, your \ndata and related inferences should only be used for necessary functions, and \nyou should be protected by ethical review and use prohibitions. You and your \ncommunities should be free from unchecked surveillance; surveillance tech\u00ad\nnologies should be subject to heightened oversight that includes at least \npre-deployment assessment of their potential harms and scope limits to pro\u00ad\ntect privacy and civil liberties. Continuous surveillance and monitoring", "fee5c143-dc56-4cc6-ba07-552465ae2b72": "You should be able to opt out, where appropriate, and \nhave access to a person who can quickly consider and \nremedy problems you encounter. You should be able to opt \nout from automated systems in favor of a human alternative, where \nappropriate. Appropriateness should be determined based on rea\u00ad\nsonable expectations in a given context and with a focus on ensuring \nbroad accessibility and protecting the public from especially harm\u00ad\nful impacts. In some cases, a human or other alternative may be re\u00ad\nquired by law. You should have access to timely human consider\u00ad\nation and remedy by a fallback and escalation process if an automat\u00ad\ned system fails, it produces an error, or you would like to appeal or \ncontest its impacts on you. Human consideration and fallback \nshould be accessible, equitable, effective, maintained, accompanied \nby appropriate operator training, and should not impose an unrea\u00ad\nsonable burden on the public. Automated systems with an intended", "e1dc1672-fbcf-4551-aa8d-4896cc302aef": "and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952", "eb4a5bf4-cad4-46d7-9ca0-a93e194b65eb": "standards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\u00ad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\u00ad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via", "44763663-ed9a-43f0-84a5-ada3a7da3066": "9 \nand reduced content diversity). Overly homogenized outputs can themselves be incorrect, or they may \nlead to unreliable decision-making or amplify harmful biases. These phenomena can \ufb02ow from \nfoundation models to downstream models and systems, with the foundation models acting as \n\u201cbottlenecks,\u201d or single points of failure. \nOverly homogenized content can contribute to \u201cmodel collapse.\u201d Model collapse can occur when model \ntraining over-relies on synthetic data, resulting in data points disappearing from the distribution of the \nnew model\u2019s outputs. In addition to threatening the robustness of the model overall, model collapse \ncould lead to homogenized outputs, including by amplifying any homogenization from the model used to \ngenerate the synthetic training data. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Valid and Reliable \n2.7. Human-AI Con\ufb01guration \nGAI system use can involve varying risks of miscon\ufb01gurations and poor interactions between a system", "dcfe160e-c7b2-4341-8d39-762060034476": "SAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \u00ad\nSome U.S government agencies have developed specific frameworks for ethical use of AI \nsystems. The Department of Energy (DOE) has activated the AI Advancement Council that oversees coordina-\ntion and advises on implementation of the DOE AI Strategy and addresses issues and/or escalations on the \nethical use and development of AI systems.20 The Department of Defense has adopted Artificial Intelligence \nEthical Principles, and tenets for Responsible Artificial Intelligence specifically tailored to its national \nsecurity and defense activities.21 Similarly, the U.S. Intelligence Community (IC) has developed the Principles", "fabb9ed6-5dfa-413e-bcf5-2c9e1edb6d58": "the AI lifecycle; or other issues that diminish transparency or accountability for downstream \nusers. \n2.1. CBRN Information or Capabilities \nIn the future, GAI may enable malicious actors to more easily access CBRN weapons and/or relevant \nknowledge, information, materials, tools, or technologies that could be misused to assist in the design, \ndevelopment, production, or use of CBRN weapons or other dangerous materials or agents. While \nrelevant biological and chemical threat knowledge and information is often publicly accessible, LLMs \ncould facilitate its analysis or synthesis, particularly by individuals without formal scienti\ufb01c training or \nexpertise. \nRecent research on this topic found that LLM outputs regarding biological threat creation and attack \nplanning provided minimal assistance beyond traditional search engine queries, suggesting that state-of-\nthe-art LLMs at the time these studies were conducted do not substantially increase the operational", "63006b69-c2c6-4bab-9d11-2b7976bea75b": "SECTION TITLE\n \n \n \n \n \n \nApplying The Blueprint for an AI Bill of Rights \nRELATIONSHIP TO EXISTING LAW AND POLICY\nThere are regulatory safety requirements for medical devices, as well as sector-, population-, or technology-spe\u00ad\ncific privacy and security protections. Ensuring some of the additional protections proposed in this framework \nwould require new laws to be enacted or new policies and practices to be adopted. In some cases, exceptions to \nthe principles described in the Blueprint for an AI Bill of Rights may be necessary to comply with existing law, \nconform to the practicalities of a specific use case, or balance competing public interests. In particular, law \nenforcement, and other regulatory contexts may require government actors to protect civil rights, civil liberties, \nand privacy in a manner consistent with, but using alternate mechanisms to, the specific principles discussed in \nthis framework. The Blueprint for an AI Bill of Rights is meant to assist governments and the private sector in", "0f4c5c5a-f8dc-43af-85f2-45a85fba36ca": "APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54", "76064db3-f7c4-4ded-bee2-739ed588ce64": "purpose speci\ufb01cation. For example, most model developers do not disclose speci\ufb01c data sources on \nwhich models were trained, limiting user awareness of whether personally identi\ufb01ably information (PII) \nwas trained on and, if so, how it was collected. \nModels may leak, generate, or correctly infer sensitive information about individuals. For example, \nduring adversarial attacks, LLMs have revealed sensitive information (from the public domain) that was \nincluded in their training data. This problem has been referred to as data memorization, and may pose \nexacerbated privacy risks even for data present only in a small number of training samples. \nIn addition to revealing sensitive information in GAI training data, GAI models may be able to correctly \ninfer PII or sensitive data that was not in their training data nor disclosed by the user by stitching \ntogether information from disparate sources. These inferences can have negative impact on an individual", "cfd1017b-b416-4a90-a926-b0851ad24488": "52 \n\u2022 \nMonitoring system capabilities and limitations in deployment through rigorous TEVV processes; \n\u2022 \nEvaluating how humans engage, interact with, or adapt to GAI content (especially in decision \nmaking tasks informed by GAI content), and how they react to applied provenance techniques \nsuch as overt disclosures. \nOrganizations can document and delineate GAI system objectives and limitations to identify gaps where \nprovenance data may be most useful. For instance, GAI systems used for content creation may require \nrobust watermarking techniques and corresponding detectors to identify the source of content or \nmetadata recording techniques and metadata management tools and repositories to trace content \norigins and modi\ufb01cations. Further narrowing of GAI task de\ufb01nitions to include provenance data can \nenable organizations to maximize the utility of provenance data and risk management e\ufb00orts. \nA.1.7. Enhancing Content Provenance through Structured Public Feedback", "0a829510-5812-4f88-bd78-9bd5eb4f0632": "2 \nThis work was informed by public feedback and consultations with diverse stakeholder groups as part of NIST\u2019s \nGenerative AI Public Working Group (GAI PWG). The GAI PWG was an open, transparent, and collaborative \nprocess, facilitated via a virtual workspace, to obtain multistakeholder input on GAI risk management and to \ninform NIST\u2019s approach. \nThe focus of the GAI PWG was limited to four primary considerations relevant to GAI: Governance, Content \nProvenance, Pre-deployment Testing, and Incident Disclosure (further described in Appendix A). As such, the \nsuggested actions in this document primarily address these considerations. \nFuture revisions of this pro\ufb01le will include additional AI RMF subcategories, risks, and suggested actions based \non additional considerations of GAI as the space evolves and empirical evidence indicates additional risks. A \nglossary of terms pertinent to GAI risk management will be developed and hosted on NIST\u2019s Trustworthy &", "1233b7f3-d931-4995-86d7-fcdf13a0ea42": "Information Technology Industry Council (2024) Authenticating AI-Generated Content. \nhttps://www.itic.org/policy/ITI_AIContentAuthorizationPolicy_122123.pdf \nJain, S. et al. (2023) Algorithmic Pluralism: A Structural Approach To Equal Opportunity. arXiv. \nhttps://arxiv.org/pdf/2305.08157 \nJi, Z. et al (2023) Survey of Hallucination in Natural Language Generation. ACM Comput. Surv. 55, 12, \nArticle 248. https://doi.org/10.1145/3571730 \nJones-Jang, S. et al. (2022) How do people react to AI failure? Automation bias, algorithmic aversion, and \nperceived controllability. Oxford. https://academic.oup.com/jcmc/article/28/1/zmac029/6827859] \nJussupow, E. et al. (2020) Why Are We Averse Towards Algorithms? A Comprehensive Literature Review \non Algorithm Aversion. ECIS 2020. https://aisel.aisnet.org/ecis2020_rp/168/ \nKalai, A., et al. (2024) Calibrated Language Models Must Hallucinate. arXiv. \nhttps://arxiv.org/pdf/2311.14648", "6adeab1c-726d-44b7-a091-4c8f57d78efe": "technologies and data, and contractors, consultants, and other third-party \npersonnel. \nIntellectual Property; Value Chain \nand Component Integration \nAI Actor Tasks: Operation and Monitoring, Procurement, Third-party entities \n \nGOVERN 6.2: Contingency processes are in place to handle failures or incidents in third-party data or AI systems deemed to be \nhigh-risk. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.2-001 \nDocument GAI risks associated with system value chain to identify over-reliance \non third-party data and to identify fallbacks. \nValue Chain and Component \nIntegration \nGV-6.2-002 \nDocument incidents involving third-party GAI data and systems, including open-\ndata and open-source software. \nIntellectual Property; Value Chain \nand Component Integration", "d4dbc93f-1c23-4106-a8a8-aea5eab30df5": "any automated component is used to determine an action or decision. It should also include expectations about \nreporting described throughout this framework, such as the algorithmic impact assessments described as \npart of Algorithmic Discrimination Protections. \nAccountable. Notices should clearly identify the entity responsible for designing each component of the \nsystem and the entity using it. \nTimely and up-to-date. Users should receive notice of the use of automated systems in advance of using or \nwhile being impacted by the technology. An explanation should be available with the decision itself, or soon \nthereafter. Notice should be kept up-to-date and people impacted by the system should be notified of use case \nor key functionality changes. \nBrief and clear. Notices and explanations should be assessed, such as by research on users\u2019 experiences, \nincluding user testing, to ensure that the people using or impacted by the automated system are able to easily", "37e1195e-1f2b-4a57-9c67-44f8fd137203": "While some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systems\u2019 connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable", "8f766e05-5499-4c8d-b6f1-e64c9bc1b6fe": "and critical services. The American public deserves the assurance that, when rights, opportunities, or access are \nmeaningfully at stake and there is a reasonable expectation of an alternative to an automated system, they can conve\u00ad\nniently opt out of an automated system and will not be disadvantaged for that choice. In some cases, such a human or \nother alternative may be required by law, for example it could be required as \u201creasonable accommodations\u201d for people \nwith disabilities. \nIn addition to being able to opt out and use a human alternative, the American public deserves a human fallback \nsystem in the event that an automated system fails or causes harm. No matter how rigorously an automated system is \ntested, there will always be situations for which the system fails. The American public deserves protection via human \nreview against these outlying or unexpected scenarios. In the case of time-critical systems, the public should not have", "4c2995c2-8cf4-4666-8cae-2cddab5c0aa1": "About this Document \nThe Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People was \npublished by the White House Office of Science and Technology Policy in October 2022. This framework was \nreleased one year after OSTP announced the launch of a process to develop \u201ca bill of rights for an AI-powered \nworld.\u201d Its release follows a year of public engagement to inform this initiative. The framework is available \nonline at: https://www.whitehouse.gov/ostp/ai-bill-of-rights \nAbout the Office of Science and Technology Policy \nThe Office of Science and Technology Policy (OSTP) was established by the National Science and Technology \nPolicy, Organization, and Priorities Act of 1976 to provide the President and others within the Executive Office \nof the President with advice on the scientific, engineering, and technological aspects of the economy, national", "ed293c39-a2c7-4e96-bdb7-43ea102ba174": "29 \nMS-1.1-006 \nImplement continuous monitoring of GAI system impacts to identify whether GAI \noutputs are equitable across various sub-populations. Seek active and direct \nfeedback from a\ufb00ected communities via structured feedback mechanisms or red-\nteaming to monitor and improve outputs. \nHarmful Bias and Homogenization \nMS-1.1-007 \nEvaluate the quality and integrity of data used in training and the provenance of \nAI-generated content, for example by employing techniques like chaos \nengineering and seeking stakeholder feedback. \nInformation Integrity \nMS-1.1-008 \nDe\ufb01ne use cases, contexts of use, capabilities, and negative impacts where \nstructured human feedback exercises, e.g., GAI red-teaming, would be most \nbene\ufb01cial for GAI risk measurement and management based on the context of \nuse. \nHarmful Bias and \nHomogenization; CBRN \nInformation or Capabilities \nMS-1.1-009 \nTrack and document risks or opportunities related to all GAI risks that cannot be", "9feea339-e568-4440-96ce-ba269c40347f": "issues in conversations with chatbots \u2013 and that users exhibit negative reactions to unhelpful responses \nfrom these chatbots during situations of distress. \nThis risk encompasses di\ufb03culty controlling creation of and public exposure to o\ufb00ensive or hateful \nlanguage, and denigrating or stereotypical content generated by AI. This kind of speech may contribute \nto downstream harm such as fueling dangerous or violent behaviors. The spread of denigrating or \nstereotypical content can also further exacerbate representational harms (see Harmful Bias and \nHomogenization below). \nTrustworthy AI Characteristics: Safe, Secure and Resilient \n2.4. Data Privacy \nGAI systems raise several risks to privacy. GAI system training requires large volumes of data, which in \nsome cases may include personal data. The use of personal data for GAI training raises risks to widely \naccepted privacy principles, including to transparency, individual participation (including consent), and", "0dae095f-3616-47ba-9b4c-f495fbe42c2f": "necessary functions should be optional, i.e., should not be required, incentivized, or coerced in order to \nreceive opportunities or access to services. In cases where data is provided to an entity (e.g., health insurance \ncompany) in order to facilitate payment for such a need, that data should only be used for that purpose. \nEthical review and use prohibitions. Any use of sensitive data or decision process based in part on sensi-\ntive data that might limit rights, opportunities, or access, whether the decision is automated or not, should go \nthrough a thorough ethical review and monitoring, both in advance and by periodic review (e.g., via an indepen-\ndent ethics committee or similarly robust process). In some cases, this ethical review may determine that data \nshould not be used or shared for specific uses even with consent. Some novel uses of automated systems in this \ncontext, where the algorithm is dynamically developing and where the science behind the use case is not well", "3061ca42-f18e-4a76-9b9b-e378dc162984": "Bias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-002 \nEngage in internal and external evaluations, GAI red-teaming, impact \nassessments, or other structured human feedback exercises in consultation \nwith representative AI Actors with expertise and familiarity in the context of \nuse, and/or who are representative of the populations associated with the \ncontext of use. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.3-003 \nVerify those conducting structured human feedback exercises are not directly \ninvolved in system development tasks for the same GAI model. \nHuman-AI Con\ufb01guration; Data \nPrivacy \nAI Actor Tasks: AI Deployment, AI Development, AI Impact Assessment, A\ufb00ected Individuals and Communities, Domain Experts, \nEnd-Users, Operation and Monitoring, TEVV", "c8465ca6-885a-48c9-9b62-5449f084b0ce": "into technology design processes. The most prevalent in the United States is the Access Board\u2019s Section \n508 regulations,56 which are the technical standards for federal information communication technology (software, \nhardware, and web). Other standards include those issued by the International Organization for \nStandardization,57 and the World Wide Web Consortium Web Content Accessibility Guidelines,58 a globally \nrecognized voluntary consensus standard for web content and other information and communications \ntechnology. \nNIST has released Special Publication 1270, Towards a Standard for Identifying and Managing Bias \nin Artificial Intelligence.59 The special publication: describes the stakes and challenges of bias in artificial \nintelligence and provides examples of how and why it can chip away at public trust; identifies three categories \nof bias in AI \u2013 systemic, statistical, and human \u2013 and describes how and where they contribute to harms; and", "0c1c5568-2fbb-4c5b-b79a-6cbe6c17f209": "history of content. Provenance data tracking and synthetic content detection can help facilitate greater \ninformation access about both authentic and synthetic content to users, enabling better knowledge of \ntrustworthiness in AI systems. When combined with other organizational accountability mechanisms, \ndigital content transparency approaches can enable processes to trace negative outcomes back to their \nsource, improve information integrity, and uphold public trust. Provenance data tracking and synthetic \ncontent detection mechanisms provide information about the origin and history of content to assist in \nGAI risk management e\ufb00orts. \nProvenance metadata can include information about GAI model developers or creators of GAI content, \ndate/time of creation, location, modi\ufb01cations, and sources. Metadata can be tracked for text, images, \nvideos, audio, and underlying datasets. The implementation of provenance data tracking techniques can", "980b42a4-0ff2-437f-bc3f-e021380c4904": "that may potentially arise in more advanced, future GAI systems are not considered. Future updates may \nincorporate additional risks or provide further details on the risks identi\ufb01ed below. \nTo guide organizations in identifying and managing GAI risks, a set of risks unique to or exacerbated by \nthe development and use of GAI are de\ufb01ned below.5 Each risk is labeled according to the outcome, \nobject, or source of the risk (i.e., some are risks \u201cto\u201d a subject or domain and others are risks \u201cof\u201d or \n\u201cfrom\u201d an issue or theme). These risks provide a lens through which organizations can frame and execute \nrisk management e\ufb00orts. To help streamline risk management e\ufb00orts, each risk is mapped in Section 3 \n(as well as in tables in Appendix B) to relevant Trustworthy AI Characteristics identi\ufb01ed in the AI RMF. \n \n \n5 These risks can be further categorized by organizations depending on their unique approaches to risk de\ufb01nition", "1f602c98-0f78-4112-bd8d-1948ae89882d": "opportunities\u2026, or benefits.\u201d \nNIST\u2019s Privacy Framework provides a comprehensive, detailed and actionable approach for \norganizations to manage privacy risks. The NIST Framework gives organizations ways to identify and \ncommunicate their privacy risks and goals to support ethical decision-making in system, product, and service \ndesign or deployment, as well as the measures they are taking to demonstrate compliance with applicable laws \nor regulations. It has been voluntarily adopted by organizations across many different sectors around the world.78\nA school board\u2019s attempt to surveil public school students\u2014undertaken without \nadequate community input\u2014sparked a state-wide biometrics moratorium.79 Reacting to a plan in \nthe city of Lockport, New York, the state\u2019s legislature banned the use of facial recognition systems and other \n\u201cbiometric identifying technology\u201d in schools until July 1, 2022.80 The law additionally requires that a report on", "d0a26208-636c-4214-90a7-aa53131b8919": "does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. government\u2019s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles", "ee563e5c-3618-435e-8a28-e9c8655c8d6e": "these systems and use associated human-driven mechanisms instead, ensuring timeliness of benefit payments, \nand providing clear notice about the use of these systems and clear explanations of how and what the \ntechnologies are doing. Some panelists suggested that technology should be used to help people receive \nbenefits, e.g., by pushing benefits to those in need and ensuring automated decision-making systems are only \nused to provide a positive outcome; technology shouldn't be used to take supports away from people who need \nthem. \nPanel 6: The Healthcare System. This event explored current and emerging uses of technology in the \nhealthcare system and consumer products related to health. \nWelcome:\n\u2022\nAlondra Nelson, Deputy Director for Science and Society, White House Office of Science and Technology\nPolicy\n\u2022\nPatrick Gaspard, President and CEO, Center for American Progress\nModerator: Micky Tripathi, National Coordinator for Health Information Technology, U.S Department of \nHealth and Human Services.", "73123dea-1304-4967-8f32-c8c9484ff477": "SAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDerived data sources tracked and reviewed carefully. Data that is derived from other data through \nthe use of algorithms, such as data derived or inferred from prior model outputs, should be identified and \ntracked, e.g., via a specialized type in a data schema. Derived data should be viewed as potentially high-risk \ninputs that may lead to feedback loops, compounded harm, or inaccurate results. Such sources should be care\u00ad\nfully validated against the risk of collateral consequences. \nData reuse limits in sensitive domains. Data reuse, and especially data reuse in a new context, can result \nin the spreading and scaling of harms. Data from some domains, including criminal justice data and data indi\u00ad", "41c0457c-0232-44b5-852a-d481ca70ce21": "16 \nGOVERN 1.5: Ongoing monitoring and periodic review of the risk management process and its outcomes are planned, and \norganizational roles and responsibilities are clearly de\ufb01ned, including determining the frequency of periodic review. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.5-001 De\ufb01ne organizational responsibilities for periodic review of content provenance \nand incident monitoring for GAI systems. \nInformation Integrity \nGV-1.5-002 \nEstablish organizational policies and procedures for after action reviews of GAI \nsystem incident response and incident disclosures, to identify gaps; Update \nincident response and incident disclosure processes as required. \nHuman-AI Con\ufb01guration; \nInformation Security \nGV-1.5-003 \nMaintain a document retention policy to keep history for test, evaluation, \nvalidation, and veri\ufb01cation (TEVV), and digital content transparency methods for \nGAI. \nInformation Integrity; Intellectual \nProperty \nAI Actor Tasks: Governance and Oversight, Operation and Monitoring", "96a6d0c5-2e52-417f-a2db-56555912e00c": "Based Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy,", "3118d718-c7c5-4e30-a48f-6da5e0f06528": "or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\u00ad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\u00ad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\u00ad", "56e73190-3953-4683-bd63-8c163f90136b": "SAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \n\u2022\nAI-enabled \u201cnudification\u201d technology that creates images where people appear to be nude\u2014including apps that\nenable non-technical users to create or alter images of individuals without their consent\u2014has proliferated at an\nalarming rate. Such technology is becoming a common form of image-based abuse that disproportionately\nimpacts women. As these tools become more sophisticated, they are producing altered images that are increasing\u00ad\nly realistic and are difficult for both humans and AI to detect as inauthentic. Regardless of authenticity, the expe\u00ad\nrience of harm to victims of non-consensual intimate images can be devastatingly real\u2014affecting their personal\nand professional lives, and impacting their mental and physical health.10\n\u2022", "c3309604-8e53-406d-871b-1186ac575409": "Virginia Doellgast and Sean O'Brady. Making Call Center Jobs Better: The Relationship between\nManagement Practices and Worker Stress. A Report for the CWA. June 2020. https://\nhdl.handle.net/1813/74307\n62. See, e.g., Federal Trade Commission. Data Brokers: A Call for Transparency and Accountability. May\n2014.\nhttps://www.ftc.gov/system/files/documents/reports/data-brokers-call-transparency-accountability\u00ad\nreport-federal-trade-commission-may-2014/140527databrokerreport.pdf; Cathy O\u2019Neil.\nWeapons of Math Destruction. Penguin Books. 2017.\nhttps://en.wikipedia.org/wiki/Weapons_of_Math_Destruction\n63. See, e.g., Rachel Levinson-Waldman, Harsha Pandurnga, and Faiza Patel. Social Media Surveillance by\nthe U.S. Government. Brennan Center for Justice. Jan. 7, 2022.\nhttps://www.brennancenter.org/our-work/research-reports/social-media-surveillance-us-government;\nShoshana Zuboff. The Age of Surveillance Capitalism: The Fight for a Human Future at the New Frontier of\nPower. Public Affairs. 2019.", "0fdf9b30-0b9a-4ac0-b691-44357de98eb0": "roughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-", "52a90a9b-df34-446c-99d7-913e6f4bffb5": "privacy, peaceful assembly, speech, or association, in a way that limits the exercise of civil rights or civil liber\u00ad\nties. Information about or algorithmically-determined assumptions related to identity should be carefully \nlimited if used to target or guide surveillance systems in order to avoid algorithmic discrimination; such iden\u00ad\ntity-related information includes group characteristics or affiliations, geographic designations, location-based \nand association-based inferences, social networks, and biometrics. Continuous surveillance and monitoring \nsystems should not be used in physical or digital workplaces (regardless of employment status), public educa\u00ad\ntional institutions, and public accommodations. Continuous surveillance and monitoring systems should not \nbe used in a way that has the effect of limiting access to critical resources or services or suppressing the exer\u00ad\ncise of rights, even where the organization is not under a particular duty to protect those rights.", "050a041d-1482-4d8c-afc2-0475b0eafa28": "validity and reasonable use of automated systems. \n\u2022\nA lawyer representing an older client with disabilities who had been cut off from Medicaid-funded home\nhealth-care assistance couldn't determine why, especially since the decision went against historical access\npractices. In a court hearing, the lawyer learned from a witness that the state in which the older client\nlived had recently adopted a new algorithm to determine eligibility.83 The lack of a timely explanation made it\nharder to understand and contest the decision.\n\u2022\nA formal child welfare investigation is opened against a parent based on an algorithm and without the parent\never being notified that data was being collected and used as part of an algorithmic child maltreatment\nrisk assessment.84 The lack of notice or an explanation makes it harder for those performing child\nmaltreatment assessments to validate the risk assessment and denies parents knowledge that could help them\ncontest a decision.\n41", "2b47e851-40e8-454f-b9d4-bcc6db94d988": "Rights form an overlapping set of backstops against potential harms. This purposefully overlapping \nframework, when taken as a whole, forms a blueprint to help protect the public from harm. \nThe measures taken to realize the vision set forward in this framework should be proportionate \nwith the extent and nature of the harm, or risk of harm, to people's rights, opportunities, and \naccess. \nRELATIONSHIP TO EXISTING LAW AND POLICY\nThe Blueprint for an AI Bill of Rights is an exercise in envisioning a future where the American public is \nprotected from the potential harms, and can fully enjoy the benefits, of automated systems. It describes princi\u00ad\nples that can help ensure these protections. Some of these protections are already required by the U.S. Constitu\u00ad\ntion or implemented under existing U.S. laws. For example, government surveillance, and data search and \nseizure are subject to legal requirements and judicial oversight. There are Constitutional requirements for", "4b3441f0-0254-424b-ba91-cba271cc448a": "34 \nMS-2.7-009 Regularly assess and verify that security measures remain e\ufb00ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability \u2013 as identi\ufb01ed in the MAP function \u2013 are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Con\ufb01guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modi\ufb01ed, or shared to provide a tamper-", "fe0a25b0-d293-453b-82a9-e3b1fa922e0a": "Formal Methods in the Field26 program supports research on rigorous formal verification and analysis of \nautomated systems and machine learning, and the Designing Accountable Software Systems27 program supports \nresearch on rigorous and reproducible methodologies for developing software systems with legal and regulatory \ncompliance in mind. \nSome state legislatures have placed strong transparency and validity requirements on \nthe use of pretrial risk assessments. The use of algorithmic pretrial risk assessments has been a \ncause of concern for civil rights groups.28 Idaho Code Section 19-1910, enacted in 2019,29 requires that any \npretrial risk assessment, before use in the state, first be \"shown to be free of bias against any class of \nindividuals protected from discrimination by state or federal law\", that any locality using a pretrial risk \nassessment must first formally validate the claim of its being free of bias, that \"all documents, records, and", "15634080-f043-4a89-8422-a66280a2b0e6": "technical standards and certi\ufb01cations \u2013 are de\ufb01ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Con\ufb01guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certi\ufb01cation programs that test pro\ufb01ciency in managing GAI risks and \ninterpreting content provenance, relevant to speci\ufb01c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human pro\ufb01ciency tests from tests of GAI capabilities. \nHuman-AI Con\ufb01guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \ncon\ufb01gurations for future re\ufb01nement and improvements. \nHuman-AI Con\ufb01guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping", "5c9c0bbc-8122-4c8c-986a-cdf7c4d63e34": "exam using biometric markers.73 These systems have the potential to limit student freedom to express a range\nof emotions at school and may inappropriately flag students with disabilities who need accommodations or\nuse screen readers or dictation software as cheating.74\n\u2022\nLocation data, acquired from a data broker, can be used to identify people who visit abortion clinics.75\n\u2022\nCompanies collect student data such as demographic information, free or reduced lunch status, whether\nthey've used drugs, or whether they've expressed interest in LGBTQI+ groups, and then use that data to \nforecast student success.76 Parents and education experts have expressed concern about collection of such\nsensitive data without express parental consent, the lack of transparency in how such data is being used, and\nthe potential for resulting discriminatory impacts.\n\u2022 Many employers transfer employee data to third party job verification services. This information is then used", "00048035-8497-42d2-80a6-8e8be91e9e20": "Human-AI Con\ufb01guration \nAI Actor Tasks: AI Development, AI Deployment, AI Impact Assessment, Operation and Monitoring \n \nMANAGE 2.2: Mechanisms are in place and applied to sustain the value of deployed AI systems. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.2-001 \nCompare GAI system outputs against pre-de\ufb01ned organization risk tolerance, \nguidelines, and principles, and review and test AI-generated content against \nthese guidelines. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content \nMG-2.2-002 \nDocument training data sources to trace the origin and provenance of AI-\ngenerated content. \nInformation Integrity \nMG-2.2-003 \nEvaluate feedback loops between GAI system content provenance and human \nreviewers, and update where needed. Implement real-time monitoring systems \nto a\ufb03rm that content provenance protocols remain e\ufb00ective. \nInformation Integrity \nMG-2.2-004", "78698c18-fad9-4e5a-8247-59113eab63c6": "Model Cards for Model Reporting. In Proceedings of the Conference on Fairness, Accountability, and\nTransparency (FAT* '19). Association for Computing Machinery, New York, NY, USA, 220\u2013229. https://\ndl.acm.org/doi/10.1145/3287560.3287596\n90. Sarah Ammermann. Adverse Action Notice Requirements Under the ECOA and the FCRA. Consumer\nCompliance Outlook. Second Quarter 2013.\nhttps://consumercomplianceoutlook.org/2013/second-quarter/adverse-action-notice-requirements\u00ad\nunder-ecoa-fcra/\n91. Federal Trade Commission. Using Consumer Reports for Credit Decisions: What to Know About\nAdverse Action and Risk-Based Pricing Notices. Accessed May 2, 2022.\nhttps://www.ftc.gov/business-guidance/resources/using-consumer-reports-credit-decisions-what\u00ad\nknow-about-adverse-action-risk-based-pricing-notices#risk\n92. Consumer Financial Protection Bureau. CFPB Acts to Protect the Public from Black-Box Credit\nModels Using Complex Algorithms. May 26, 2022.", "4b917f4a-d442-4e69-a9a6-1ddff3471c4b": "\u2022 \nStage of the AI lifecycle: Risks can arise during design, development, deployment, operation, \nand/or decommissioning. \n\u2022 \nScope: Risks may exist at individual model or system levels, at the application or implementation \nlevels (i.e., for a speci\ufb01c use case), or at the ecosystem level \u2013 that is, beyond a single system or \norganizational context. Examples of the latter include the expansion of \u201calgorithmic \nmonocultures,3\u201d resulting from repeated use of the same model, or impacts on access to \nopportunity, labor markets, and the creative economies.4 \n\u2022 \nSource of risk: Risks may emerge from factors related to the design, training, or operation of the \nGAI model itself, stemming in some cases from GAI model or system inputs, and in other cases, \nfrom GAI system outputs. Many GAI risks, however, originate from human behavior, including \n \n \n3 \u201cAlgorithmic monocultures\u201d refers to the phenomenon in which repeated use of the same model or algorithm in", "8d05f043-5126-4ba3-841b-52609cbb7433": "interacting with external GAI technologies or service providers. Organizations can apply standard or \nexisting risk controls and processes to proprietary or open-source GAI technologies, data, and third-party \nservice providers, including acquisition and procurement due diligence, requests for software bills of \nmaterials (SBOMs), application of service level agreements (SLAs), and statement on standards for \nattestation engagement (SSAE) reports to help with third-party transparency and risk management for \nGAI systems. \nA.1.4. Pre-Deployment Testing \nOverview \nThe diverse ways and contexts in which GAI systems may be developed, used, and repurposed \ncomplicates risk mapping and pre-deployment measurement e\ufb00orts. Robust test, evaluation, validation, \nand veri\ufb01cation (TEVV) processes can be iteratively applied \u2013 and documented \u2013 in early stages of the AI \nlifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous", "76c4d456-13da-43be-890b-72a103ef2db3": "1 \n1. \nIntroduction \nThis document is a cross-sectoral pro\ufb01le of and companion resource for the AI Risk Management \nFramework (AI RMF 1.0) for Generative AI,1 pursuant to President Biden\u2019s Executive Order (EO) 14110 on \nSafe, Secure, and Trustworthy Arti\ufb01cial Intelligence.2 The AI RMF was released in January 2023, and is \nintended for voluntary use and to improve the ability of organizations to incorporate trustworthiness \nconsiderations into the design, development, use, and evaluation of AI products, services, and systems. \nA pro\ufb01le is an implementation of the AI RMF functions, categories, and subcategories for a speci\ufb01c \nsetting, application, or technology \u2013 in this case, Generative AI (GAI) \u2013 based on the requirements, risk \ntolerance, and resources of the Framework user. AI RMF pro\ufb01les assist organizations in deciding how to \nbest manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory", "3db4d91b-029a-4782-8c7b-acecc3a4a75a": "Algorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination", "29854fee-d0ae-47b0-9612-fceb12868283": "HUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nHealthcare \u201cnavigators\u201d help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is \u201can individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.\u201d106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \n\u201ctrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.\u201d107", "b83ad31a-ed48-4afc-9e7e-8561388c0a01": "of Artificial Intelligence Ethics for the Intelligence Community to guide personnel on whether and how to \ndevelop and use AI in furtherance of the IC's mission, as well as an AI Ethics Framework to help implement \nthese principles.22\nThe National Science Foundation (NSF) funds extensive research to help foster the \ndevelopment of automated systems that adhere to and advance their safety, security and \neffectiveness. Multiple NSF programs support research that directly addresses many of these principles: \nthe National AI Research Institutes23 support research on all aspects of safe, trustworthy, fair, and explainable \nAI algorithms and systems; the Cyber Physical Systems24 program supports research on developing safe \nautonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace25 \nprogram supports research on cybersecurity and privacy enhancing technologies in automated systems; the", "de752b0d-82c7-4ad6-b01d-86406280913d": "SAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\u00ad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\u00ad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\u00ad", "ebca47da-e839-4e2f-bea2-317ac8d5564c": "groups and have di\ufb03culty producing non-stereotyped content even when the prompt speci\ufb01cally \nrequests image features that are inconsistent with the stereotypes. Harmful bias in GAI models, which \nmay stem from their training data, can also cause representational harms or perpetuate or exacerbate \nbias based on race, gender, disability, or other protected classes. \nHarmful bias in GAI systems can also lead to harms via disparities between how a model performs for \ndi\ufb00erent subgroups or languages (e.g., an LLM may perform less well for non-English languages or \ncertain dialects). Such disparities can contribute to discriminatory decision-making or ampli\ufb01cation of \nexisting societal biases. In addition, GAI systems may be inappropriately trusted to perform similarly \nacross all subgroups, which could leave the groups facing underperformance with worse outcomes than \nif no GAI system were used. Disparate or reduced performance for lower-resource languages also", "de9ab1ca-89c7-42ee-b32e-4c1c51b99ef7": "101. Andrew Kenney. 'I'm shocked that they need to have a smartphone': System for unemployment\nbenefits exposes digital divide. USA Today. May 2, 2021.\nhttps://www.usatoday.com/story/tech/news/2021/05/02/unemployment-benefits-system-leaving\u00ad\npeople-behind/4915248001/\n102. Allie Gross. UIA lawsuit shows how the state criminalizes the unemployed. Detroit Metro-Times.\nSep. 18, 2015.\nhttps://www.metrotimes.com/news/uia-lawsuit-shows-how-the-state-criminalizes-the\u00ad\nunemployed-2369412\n103. Maia Szalavitz. The Pain Was Unbearable. So Why Did Doctors Turn Her Away? Wired. Aug. 11,\n2021. https://www.wired.com/story/opioid-drug-addiction-algorithm-chronic-pain/\n104. Spencer Soper. Fired by Bot at Amazon: \"It's You Against the Machine\". Bloomberg, Jun. 28, 2021.\nhttps://www.bloomberg.com/news/features/2021-06-28/fired-by-bot-amazon-turns-to-machine\u00ad\nmanagers-and-workers-are-losing-out\n105. Definitions of \u2018equity\u2019 and \u2018underserved communities\u2019 can be found in the Definitions section of", "fc720adf-5fb7-4872-b139-9068225b9ec9": "42 \nMG-2.4-002 \nEstablish and maintain procedures for escalating GAI system incidents to the \norganizational risk management authority when speci\ufb01c criteria for deactivation \nor disengagement is met for a particular context of use or for the GAI system as a \nwhole. \nInformation Security \nMG-2.4-003 \nEstablish and maintain procedures for the remediation of issues which trigger \nincident response processes for the use of a GAI system, and provide stakeholders \ntimelines associated with the remediation plan. \nInformation Security \n \nMG-2.4-004 Establish and regularly review speci\ufb01c criteria that warrants the deactivation of \nGAI systems in accordance with set risk tolerances and appetites. \nInformation Security \n \nAI Actor Tasks: AI Deployment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 3.1: AI risks and bene\ufb01ts from third-party resources are regularly monitored, and risk controls are applied and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.1-001", "10868542-61ec-47eb-ab65-71788898bc00": "Responsible AI Resource Center (AIRC), and added to The Language of Trustworthy AI: An In-Depth Glossary of \nTerms. \nThis document was also informed by public comments and consultations from several Requests for Information. \n \n2. \nOverview of Risks Unique to or Exacerbated by GAI \nIn the context of the AI RMF, risk refers to the composite measure of an event\u2019s probability (or \nlikelihood) of occurring and the magnitude or degree of the consequences of the corresponding event. \nSome risks can be assessed as likely to materialize in a given context, particularly those that have been \nempirically demonstrated in similar contexts. Other risks may be unlikely to materialize in a given \ncontext, or may be more speculative and therefore uncertain. \nAI risks can di\ufb00er from or intensify traditional software risks. Likewise, GAI can exacerbate existing AI \nrisks, and creates unique risks. GAI risks can vary along many dimensions: \n\u2022", "b99b4a4b-8d84-4830-86d5-d78ebfff7f8a": "teenager-2022-03-30/\n42. Miranda Bogen. All the Ways Hiring Algorithms Can Introduce Bias. Harvard Business Review. May\n6, 2019. https://hbr.org/2019/05/all-the-ways-hiring-algorithms-can-introduce-bias\n43. Arli Christian. Four Ways the TSA Is Making Flying Easier for Transgender People. American Civil\nLiberties Union. Apr. 5, 2022. https://www.aclu.org/news/lgbtq-rights/four-ways-the-tsa-is-making\u00ad\nflying-easier-for-transgender-people\n44. U.S. Transportation Security Administration. Transgender/ Non Binary / Gender Nonconforming\nPassengers. TSA. Accessed Apr. 21, 2022. https://www.tsa.gov/transgender-passengers\n45. See, e.g., National Disabled Law Students Association. Report on Concerns Regarding Online\nAdministration of Bar Exams. Jul. 29, 2020. https://ndlsa.org/wp-content/uploads/2020/08/\nNDLSA_Online-Exam-Concerns-Report1.pdf; Lydia X. Z. Brown. How Automated Test Proctoring\nSoftware Discriminates Against Disabled Students. Center for Democracy and Technology. Nov. 16, 2020.", "4f4414a0-d193-43d9-9f8d-d7d21770b1db": "apply to every suggested action in the subcategory (i.e., some apply to AI development and \nothers apply to AI deployment). \nThe tables below begin with the AI RMF subcategory, shaded in blue, followed by suggested actions. \n \nGOVERN 1.1: Legal and regulatory requirements involving AI are understood, managed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.1-001 Align GAI development and use with applicable laws and regulations, including \nthose related to data privacy, copyright and intellectual property law. \nData Privacy; Harmful Bias and \nHomogenization; Intellectual \nProperty \nAI Actor Tasks: Governance and Oversight \n \n \n \n14 AI Actors are de\ufb01ned by the OECD as \u201cthose who play an active role in the AI system lifecycle, including \norganizations and individuals that deploy or operate AI.\u201d See Appendix A of the AI RMF for additional descriptions \nof AI Actors and AI Actor Tasks.", "39990922-0c0a-4008-a84a-ab498e27827f": "West, D. (2023) AI poses disproportionate risks to women. Brookings. \nhttps://www.brookings.edu/articles/ai-poses-disproportionate-risks-to-women/ \nWu, K. et al. (2024) How well do LLMs cite relevant medical references? An evaluation framework and \nanalyses. arXiv. https://arxiv.org/pdf/2402.02008 \nYin, L. et al. (2024) OpenAI\u2019s GPT Is A Recruiter\u2019s Dream Tool. Tests Show There\u2019s Racial Bias. Bloomberg. \nhttps://www.bloomberg.com/graphics/2024-openai-gpt-hiring-racial-discrimination/ \nYu, Z. et al. (March 2024) Don\u2019t Listen To Me: Understanding and Exploring Jailbreak Prompts of Large \nLanguage Models. arXiv. https://arxiv.org/html/2403.17336v1 \nZaugg, I. et al. (2022) Digitally-disadvantaged languages. Policy Review. \nhttps://policyreview.info/pdf/policyreview-2022-2-1654.pdf", "0159af88-d877-4a4a-8cd2-c1341841cb58": "but are not limited to: \n\u2022 \nParticipatory Engagement Methods: Methods used to solicit feedback from civil society groups, \na\ufb00ected communities, and users, including focus groups, small user studies, and surveys. \n\u2022 \nField Testing: Methods used to determine how people interact with, consume, use, and make \nsense of AI-generated information, and subsequent actions and e\ufb00ects, including UX, usability, \nand other structured, randomized experiments. \n\u2022 \nAI Red-teaming: A structured testing exercise used to probe an AI system to \ufb01nd \ufb02aws and \nvulnerabilities such as inaccurate, harmful, or discriminatory outputs, often in a controlled \nenvironment and in collaboration with system developers. \nInformation gathered from structured public feedback can inform design, implementation, deployment \napproval, maintenance, or decommissioning decisions. Results and insights gleaned from these exercises \ncan serve multiple purposes, including improving data quality and preprocessing, bolstering governance", "62055a37-ec25-45b3-ba6c-327cd1a68fb4": "for individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\u00ad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Act\u2019s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individual\u2019s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individual\u2019s \u201cqualifications, character, rights, \u2026 \nopportunities\u2026, or benefits.\u201d", "f2534337-c301-44f5-b42c-5472d130b0b2": "presents challenges to model adoption, inclusion, and accessibility, and may make preservation of \nendangered languages more di\ufb03cult if GAI systems become embedded in everyday processes that would \notherwise have been opportunities to use these languages. \nBias is mutually reinforcing with the problem of undesired homogenization, in which GAI systems \nproduce skewed distributions of outputs that are overly uniform (for example, repetitive aesthetic styles", "fbee7ad0-9a74-472c-9305-bef84494cbac": "should be kept up-to-date and people impacted by the system should be notified of significant use case or key \nfunctionality changes. You should know how and why an outcome impacting you was determined by an \nautomated system, including when the automated system is not the sole input determining the outcome. \nAutomated systems should provide explanations that are technically valid, meaningful and useful to you and to \nany operators or others who need to understand the system, and calibrated to the level of risk based on the \ncontext. Reporting that includes summary information about these automated systems in plain language and \nassessments of the clarity and quality of the notice and explanations should be made public whenever possible. \n6", "ef45de44-4305-4bd2-9488-5cfefcb0e8f0": "for an AI Bill of Rights is fully consistent with these principles and with the direction in Executive Order 13985 \non Advancing Racial Equity and Support for Underserved Communities Through the Federal Government. \nThese principles find kinship in the Fair Information Practice Principles (FIPPs), derived from the 1973 report \nof an advisory committee to the U.S. Department of Health, Education, and Welfare, Records, Computers, \nand the Rights of Citizens.4 While there is no single, universal articulation of the FIPPs, these core \nprinciples for managing information about individuals have been incorporated into data privacy laws and \npolicies across the globe.5 The Blueprint for an AI Bill of Rights embraces elements of the FIPPs that are \nparticularly relevant to automated systems, without articulating a specific set of FIPPs or scoping \napplicability or the interests served to a single particular domain, like privacy, civil rights and civil liberties,", "9137db7a-37b4-4e69-bcdb-cb521f287b3e": "Reporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections", "f1df5b88-dbcd-43e5-9438-c511d20b3a85": "HUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \n\u2022\nAn unemployment benefits system in Colorado required, as a condition of accessing benefits, that applicants\nhave a smartphone in order to verify their identity. No alternative human option was readily available,\nwhich denied many people access to benefits.101\n\u2022\nA fraud detection system for unemployment insurance distribution incorrectly flagged entries as fraudulent,\nleading to people with slight discrepancies or complexities in their files having their wages withheld and tax\nreturns seized without any chance to explain themselves or receive a review by a person.102\n\u2022 A patient was wrongly denied access to pain medication when the hospital\u2019s software confused her medica\u00ad", "70eba8ef-45d4-4747-9f08-4658cbede511": "even if the inferences are not accurate (e.g., confabulations), and especially if they reveal information \nthat the individual considers sensitive or that is used to disadvantage or harm them. \nBeyond harms from information exposure (such as extortion or dignitary harm), wrong or inappropriate \ninferences of PII can contribute to downstream or secondary harmful impacts. For example, predictive \ninferences made by GAI models based on PII or protected attributes can contribute to adverse decisions, \nleading to representational or allocative harms to individuals or groups (see Harmful Bias and \nHomogenization below).", "45015a61-5cbb-4192-a277-099739b8931d": "NOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nAutomated systems now determine opportunities, from employment to credit, and directly shape the American \npublic\u2019s experiences, from the courtroom to online classrooms, in ways that profoundly impact people\u2019s lives. But this \nexpansive impact is not always visible. An applicant might not know whether a person rejected their resume or a \nhiring algorithm moved them to the bottom of the list. A defendant in the courtroom might not know if a judge deny\u00ad\ning their bail is informed by an automated system that labeled them \u201chigh risk.\u201d From correcting errors to contesting \ndecisions, people are often denied the knowledge they need to address the impact of automated systems on their lives.", "0614bcc7-9305-496b-bd3e-a1802f15a833": "and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeci\ufb01c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily \u201caverse\u201d to GAI systems, and thus \ndeprive themselves or others of GAI\u2019s bene\ufb01cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjusti\ufb01ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.", "3ba1fc2a-6637-4894-9ed8-e02f197b2e39": "and mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\u00ad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\u00ad\ntate rollback or significant modification to a launched automated system. \n18", "a0ca0e56-c1af-4533-a94f-456e90ab52f4": "SECTION TITLE\nDATA PRIVACY\nYou should be protected from abusive data practices via built-in protections and you \nshould have agency over how data about you is used. You should be protected from violations of \nprivacy through design choices that ensure such protections are included by default, including ensuring that \ndata collection conforms to reasonable expectations and that only data strictly necessary for the specific \ncontext is collected. Designers, developers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate \nways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be \nused. Systems should not employ user experience and design decisions that obfuscate user choice or burden \nusers with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases", "f2565463-e689-404b-8348-06ab3fef9462": "Information Integrity \nMG-2.2-004 \nEvaluate GAI content and data for representational biases and employ \ntechniques such as re-sampling, re-ranking, or adversarial training to mitigate \nbiases in the generated content. \nInformation Security; Harmful Bias \nand Homogenization \nMG-2.2-005 \nEngage in due diligence to analyze GAI output for harmful content, potential \nmisinformation, and CBRN-related or NCII content. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content", "e24cd6c2-55a9-4c13-994c-7aa012bae660": "requests should be used so that users understand for what use contexts, time span, and entities they are \nproviding data and metadata consent. User experience research should be performed to ensure these consent \nrequests meet performance standards for readability and comprehension. This includes ensuring that consent \nrequests are accessible to users with disabilities and are available in the language(s) and reading level appro\u00ad\npriate for the audience. User experience design choices that intentionally obfuscate or manipulate user \nchoice (i.e., \u201cdark patterns\u201d) should be not be used. \n34", "695adda2-bf04-4a04-9e8d-646121be4bbc": "3 \nthe abuse, misuse, and unsafe repurposing by humans (adversarial or not), and others result \nfrom interactions between a human and an AI system. \n\u2022 \nTime scale: GAI risks may materialize abruptly or across extended periods. Examples include \nimmediate (and/or prolonged) emotional harm and potential risks to physical safety due to the \ndistribution of harmful deepfake images, or the long-term e\ufb00ect of disinformation on societal \ntrust in public institutions. \nThe presence of risks and where they fall along the dimensions above will vary depending on the \ncharacteristics of the GAI model, system, or use case at hand. These characteristics include but are not \nlimited to GAI model or system architecture, training mechanisms and libraries, data types used for \ntraining or \ufb01ne-tuning, levels of model access or availability of model weights, and application or use \ncase context. \nOrganizations may choose to tailor how they measure GAI risks based on these characteristics. They may", "fbac8185-c770-4b0d-bb3b-cdb1deafa299": "Promoting the Use of Trustworthy Artificial Intelligence in the Federal Government (December 2020). \nThis white paper recognizes that national security (which includes certain law enforcement and \nhomeland security activities) and defense activities are of increased sensitivity and interest to our nation\u2019s \nadversaries and are often subject to special requirements, such as those governing classified information and \nother protected data. Such activities require alternative, compatible safeguards through existing policies that \ngovern automated systems and AI, such as the Department of Defense (DOD) AI Ethical Principles and \nResponsible AI Implementation Pathway and the Intelligence Community (IC) AI Ethics Principles and \nFramework. The implementation of these policies to national security and defense activities can be informed by \nthe Blueprint for an AI Bill of Rights where feasible. \nThe Blueprint for an AI Bill of Rights is not intended to, and does not, create any legal right, benefit, or", "6f2b4b9f-4cac-46f9-99ef-3bc4cb1aaf37": "taking place regarding the use or emulation of personal identity, likeness, or voice without permission. \nTrustworthy AI Characteristics: Accountable and Transparent, Fair with Harmful Bias Managed, Privacy \nEnhanced \n2.11. \nObscene, Degrading, and/or Abusive Content \nGAI can ease the production of and access to illegal non-consensual intimate imagery (NCII) of adults, \nand/or child sexual abuse material (CSAM). GAI-generated obscene, abusive or degrading content can \ncreate privacy, psychological and emotional, and even physical harms, and in some cases may be illegal. \nGenerated explicit or obscene AI content may include highly realistic \u201cdeepfakes\u201d of real individuals, \nincluding children. The spread of this kind of material can have downstream negative consequences: in \nthe context of CSAM, even if the generated images do not resemble speci\ufb01c individuals, the prevalence \nof such images can divert time and resources from e\ufb00orts to \ufb01nd real-world victims. Outside of CSAM,", "54c7a44a-c1ee-4ef5-b189-1ef365323d0b": "records and education-related data in order to do so. The overarching concerns of surveillance in these \ndomains included concerns about the chilling effects of surveillance on student expression, inappropriate \ncontrol of tenants via surveillance, and the way that surveillance of workers blurs the boundary between work \nand life and exerts extreme and potentially damaging control over workers' lives. Additionally, some panelists \npointed out ways that data from one situation was misapplied in another in a way that limited people's \nopportunities, for example data from criminal justice settings or previous evictions being used to block further \naccess to housing. Throughout, various panelists emphasized that these technologies are being used to shift the \nburden of oversight and efficiency from employers to workers, schools to students, and landlords to tenants, in \nways that diminish and encroach on equality of opportunity; assessment of these technologies should include", "1742ef3f-905c-4f93-b448-e55fe12fd9df": "human review of criminal investigative matters and statutory requirements for judicial review. Civil rights laws \nprotect the American people against discrimination. \n8", "ed0d3bdd-4d2e-4e05-acd1-0bbb37d3c500": "and testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Con\ufb01guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring", "a4951da4-180d-4dde-b64b-6b70ab5c71e7": "detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the public\u2019s rights, opportunities, \nor access to critical needs. \n3", "573cc872-ae49-416d-8065-83f04ba76d18": "72. Jack Gillum and Jeff Kao. Aggression Detectors: The Unproven, Invasive Surveillance Technology\nSchools are Using to Monitor Students. ProPublica. Jun. 25, 2019.\nhttps://features.propublica.org/aggression-detector/the-unproven-invasive-surveillance-technology\u00ad\nschools-are-using-to-monitor-students/\n73. Drew Harwell. Cheating-detection companies made millions during the pandemic. Now students are\nfighting back. Washington Post. Nov. 12, 2020.\nhttps://www.washingtonpost.com/technology/2020/11/12/test-monitoring-student-revolt/\n74. See, e.g., Heather Morrison. Virtual Testing Puts Disabled Students at a Disadvantage. Government\nTechnology. May 24, 2022.\nhttps://www.govtech.com/education/k-12/virtual-testing-puts-disabled-students-at-a-disadvantage;\nLydia X. Z. Brown, Ridhi Shetty, Matt Scherer, and Andrew Crawford. Ableism And Disability\nDiscrimination In New Surveillance Technologies: How new surveillance technologies in education,", "9322efea-331d-4996-9474-cccaa4df697d": "APPENDIX\n\u2022 OSTP conducted meetings with a variety of stakeholders in the private sector and civil society. Some of these\nmeetings were specifically focused on providing ideas related to the development of the Blueprint for an AI\nBill of Rights while others provided useful general context on the positive use cases, potential harms, and/or\noversight possibilities for these technologies. Participants in these conversations from the private sector and\ncivil society included:\nAdobe \nAmerican Civil Liberties Union \n(ACLU) \nThe Aspen Commission on \nInformation Disorder \nThe Awood Center \nThe Australian Human Rights \nCommission \nBiometrics Institute \nThe Brookings Institute \nBSA | The Software Alliance \nCantellus Group \nCenter for American Progress \nCenter for Democracy and \nTechnology \nCenter on Privacy and Technology \nat Georgetown Law \nChristiana Care \nColor of Change \nCoworker \nData Robot \nData Trust Alliance \nData and Society Research Institute \nDeepmind \nEdSAFE AI Alliance \nElectronic Privacy Information", "b4e35127-be78-433a-b6aa-b7e222864503": "ed by default. Privacy risks should be assessed throughout the development life cycle, including privacy risks \nfrom reidentification, and appropriate technical and policy mitigation measures should be implemented. This \nincludes potential harms to those who are not users of the automated system, but who may be harmed by \ninferred data, purposeful privacy violations, or community surveillance or other community harms. Data \ncollection should be minimized and clearly communicated to the people whose data is collected. Data should \nonly be collected or used for the purposes of training or testing machine learning models if such collection and \nuse is legal and consistent with the expectations of the people whose data is collected. User experience \nresearch should be conducted to confirm that people understand what data is being collected about them and \nhow it will be used, and that this collection matches their expectations and desires.", "20f0980c-bd47-44c4-b835-82832c82da3b": "construed broadly. An explanation need not be a plain-language statement about causality but could consist of \nany mechanism that allows the recipient to build the necessary understanding and intuitions to achieve the \nstated purpose. Tailoring should be assessed (e.g., via user experience research). \nTailored to the target of the explanation. Explanations should be targeted to specific audiences and \nclearly state that audience. An explanation provided to the subject of a decision might differ from one provided \nto an advocate, or to a domain expert or decision maker. Tailoring should be assessed (e.g., via user experience \nresearch). \n43", "ab80b8f9-d569-40f7-a6e3-8d305b06893a": "intended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-pro\ufb01t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.", "410d1dad-982f-4e6c-acfc-8c4393c47fb4": "and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 2.10: Privacy risk of the AI system \u2013 as identi\ufb01ed in the MAP function \u2013 is examined and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.10-001 \nConduct AI red-teaming to assess issues such as: Outputting of training data \nsamples, and subsequent reverse engineering, model extraction, and \nmembership inference risks; Revealing biometric, con\ufb01dential, copyrighted, \nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \nTracking or revealing location information of users or members of training \ndatasets. \nHuman-AI Con\ufb01guration; \nInformation Integrity; Intellectual \nProperty \nMS-2.10-002 \nEngage directly with end-users and other stakeholders to understand their \nexpectations and concerns regarding content provenance. Use this feedback to \nguide the design of provenance data-tracking techniques. \nHuman-AI Con\ufb01guration;", "31f4ccdf-832e-4dfa-96e7-a006d2b27a92": "DATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTraditional terms of service\u2014the block of text that the public is accustomed to clicking through when using a web\u00ad\nsite or digital app\u2014are not an adequate mechanism for protecting privacy. The American public should be protect\u00ad\ned via built-in privacy protections, data minimization, use and collection limitations, and transparency, in addition \nto being entitled to clear mechanisms to control access to and use of their data\u2014including their metadata\u2014in a \nproactive, informed, and ongoing way. Any automated system collecting, using, sharing, or storing personal data \nshould meet these expectations. \nProtect privacy by design and by default \nPrivacy by design and by default. Automated systems should be designed and built with privacy protect\u00ad", "2542b1d9-a513-4744-a0e8-2073dd6969bf": "Human-AI Con\ufb01guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV", "97dc3796-6c1d-4071-a96e-444a6e76f758": "other countries. \nWe have seen modest progress in recent years, with some state and local governments responding to these prob\u00ad\nlems with legislation, and some courts extending longstanding statutory protections to new and emerging tech\u00ad\nnologies. There are companies working to incorporate additional protections in their design and use of auto\u00ad\nmated systems, and researchers developing innovative guardrails. Advocates, researchers, and government \norganizations have proposed principles for the ethical use of AI and other automated systems. These include \nthe Organization for Economic Co-operation and Development\u2019s (OECD\u2019s) 2019 Recommendation on Artificial \nIntelligence, which includes principles for responsible stewardship of trustworthy AI and which the United \nStates adopted, and Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government, which sets out principles that govern the federal government\u2019s use of AI. The Blueprint", "a15c2582-15b2-4587-b765-f5f0457f9f91": "automated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impacts\u2014basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \n\u2022\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan", "93c33081-e2b6-4d06-afb4-8419e8ac265e": "25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at di\ufb00erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner pro\ufb01ciency with AI system performance and trustworthiness \u2013 and relevant", "07584cae-264c-4ad9-abdf-caa426b1af9d": "Meaningful access to examine the system. Designers, developers, and deployers of automated \nsystems should consider limited waivers of confidentiality (including those related to trade secrets) where \nnecessary in order to provide meaningful oversight of systems used in sensitive domains, incorporating mea\u00ad\nsures to protect intellectual property and trade secrets from unwarranted disclosure as appropriate. This \nincludes (potentially private and protected) meaningful access to source code, documentation, and related \ndata during any associated legal discovery, subject to effective confidentiality or court orders. Such meaning\u00ad\nful access should include (but is not limited to) adhering to the principle on Notice and Explanation using the \nhighest level of risk so the system is designed with built-in explanations; such systems should use fully-trans\u00ad\nparent models where the model itself can be understood by people needing to directly examine it.", "354bd225-ca88-41ce-8470-abeebfc7da3b": "orientation), religion, age, national origin, disability, veteran status, genetic information, or any other \nclassification protected by law. Depending on the specific circumstances, such algorithmic discrimination \nmay violate legal protections. Designers, developers, and deployers of automated systems should take \nproactive \nand \ncontinuous \nmeasures \nto \nprotect \nindividuals \nand \ncommunities \nfrom algorithmic \ndiscrimination and to use and design systems in an equitable way. This protection should include proactive \nequity assessments as part of the system design, use of representative data and protection against proxies \nfor demographic features, ensuring accessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent \nevaluation and plain language reporting in the form of an algorithmic impact assessment, including", "9cd96ad0-9958-440d-ac65-a2b4614d7539": "courts have found that such cure procedures are constitutionally required.110 \nBallot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot. \n52", "73903c5d-1dba-43d0-b85d-443cf758da6c": "a system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely", "9a9a9238-61cf-4a36-afef-3d7c4632f4ae": "awareness of their limitations in the context(s) of use; Standard measurement \nand structured human feedback approaches; Anticipated human-AI \ncon\ufb01gurations. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent \nMP-1.1-004 \nIdentify and document foreseeable illegal uses or applications of the GAI system \nthat surpass organizational risk tolerances. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Obscene, Degrading, \nand/or Abusive Content \nAI Actor Tasks: AI Deployment \n \nMAP 1.2: Interdisciplinary AI Actors, competencies, skills, and capacities for establishing context re\ufb02ect demographic diversity and \nbroad domain and user experience expertise, and their participation is documented. Opportunities for interdisciplinary \ncollaboration are prioritized. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.2-001 \nEstablish and empower interdisciplinary teams that re\ufb02ect a wide range of", "3ab19783-41cc-4d8b-9dd5-65034b1c3360": "on democratic processes, unknown long-term performance characteristics of GAI. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nGV-1.3-007 Devise a plan to halt development or deployment of a GAI system that poses \nunacceptable negative risk. \nCBRN Information and Capability; \nInformation Security; Information \nIntegrity \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.4: The risk management process and its outcomes are established through transparent policies, procedures, and other \ncontrols based on organizational risk priorities. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.4-001 \nEstablish policies and mechanisms to prevent GAI systems from generating \nCSAM, NCII or content that violates the law. \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias \nand Homogenization; \nDangerous, Violent, or Hateful \nContent \nGV-1.4-002 \nEstablish transparent acceptable use policies for GAI that address illegal use or \napplications of GAI.", "eadf745e-b710-4384-9248-a774a534b3da": "GOVERN 2.1: Roles and responsibilities and lines of communication related to mapping, measuring, and managing AI risks are \ndocumented and are clear to individuals and teams throughout the organization. \nAction ID \nSuggested Action \nGAI Risks \nGV-2.1-001 \nEstablish organizational roles, policies, and procedures for communicating GAI \nincidents and performance to AI Actors and downstream stakeholders (including \nthose potentially impacted), via community or o\ufb03cial resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor). \nHuman-AI Con\ufb01guration; Value \nChain and Component Integration \nGV-2.1-002 Establish procedures to engage teams for GAI system incident response with \ndiverse composition and responsibilities based on the particular incident type. \nHarmful Bias and Homogenization \nGV-2.1-003 Establish processes to verify the AI Actors conducting GAI incident response tasks \ndemonstrate and maintain the appropriate skills and training. \nHuman-AI Con\ufb01guration", "95c4db5f-ccbc-4cb6-937b-2e6690f89320": "ately to identified risks. Appropriate responses include determining not to process data when the privacy risks \noutweigh the benefits or implementing measures to mitigate acceptable risks. Appropriate responses do not \ninclude sharing or transferring the privacy risks to users via notice or consent requests where users could not \nreasonably be expected to understand the risks without further support. \nPrivacy-preserving security. Entities creating, using, or governing automated systems should follow \nprivacy and security best practices designed to ensure data and metadata do not leak beyond the specific \nconsented use case. Best practices could include using privacy-enhancing cryptography or other types of \nprivacy-enhancing technologies or fine-grained permissions and access control mechanisms, along with \nconventional system security protocols. \n33", "28b833ba-4e82-47fc-9206-2b9e593d4569": "Summaries of Panel Discussions: \nPanel 1: Consumer Rights and Protections. This event explored the opportunities and challenges for \nindividual consumers and communities in the context of a growing ecosystem of AI-enabled consumer \nproducts, advanced platforms and services, \u201cInternet of Things\u201d (IoT) devices, and smart city products and \nservices. \nWelcome:\n\u2022\nRashida Richardson, Senior Policy Advisor for Data and Democracy, White House Office of Science and\nTechnology Policy\n\u2022\nKaren Kornbluh, Senior Fellow and Director of the Digital Innovation and Democracy Initiative, German\nMarshall Fund\nModerator: \nDevin E. Willis, Attorney, Division of Privacy and Identity Protection, Bureau of Consumer Protection, Federal \nTrade Commission \nPanelists: \n\u2022\nTamika L. Butler, Principal, Tamika L. Butler Consulting\n\u2022\nJennifer Clark, Professor and Head of City and Regional Planning, Knowlton School of Engineering, Ohio\nState University\n\u2022\nCarl Holshouser, Senior Vice President for Operations and Strategic Initiatives, TechNet", "38bddc3e-d00f-4b32-8f06-51bf7ee089bd": "ENDNOTES\n96. National Science Foundation. NSF Program on Fairness in Artificial Intelligence in Collaboration\nwith Amazon (FAI). Accessed July 20, 2022.\nhttps://www.nsf.gov/pubs/2021/nsf21585/nsf21585.htm\n97. Kyle Wiggers. Automatic signature verification software threatens to disenfranchise U.S. voters.\nVentureBeat. Oct. 25, 2020.\nhttps://venturebeat.com/2020/10/25/automatic-signature-verification-software-threatens-to\u00ad\ndisenfranchise-u-s-voters/\n98. Ballotpedia. Cure period for absentee and mail-in ballots. Article retrieved Apr 18, 2022.\nhttps://ballotpedia.org/Cure_period_for_absentee_and_mail-in_ballots\n99. Larry Buchanan and Alicia Parlapiano. Two of these Mail Ballot Signatures are by the Same Person.\nWhich Ones? New York Times. Oct. 7, 2020.\nhttps://www.nytimes.com/interactive/2020/10/07/upshot/mail-voting-ballots-signature\u00ad\nmatching.html\n100. Rachel Orey and Owen Bacskai. The Low Down on Ballot Curing. Nov. 04, 2020.\nhttps://bipartisanpolicy.org/blog/the-low-down-on-ballot-curing/", "2524a0ed-cf1b-48e3-a937-7968cf441df0": "Confabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Con\ufb01guration", "30631e3d-b2e2-4cd1-a586-710dc192aae0": "Administration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\n\u201ctraffic calming\u201d measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\u00ad\nVehicle-Speeds\n17. Karen Hao. Worried about your firm\u2019s AI ethics? These startups are here to help.\nA growing ecosystem of \u201cresponsible AI\u201d ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\u00ad", "4495ecb5-1247-453e-aa68-1f4395d7fa9b": "Lee, H. et al. (2024) Deepfakes, Phrenology, Surveillance, and More! A Taxonomy of AI Privacy Risks. \narXiv. https://arxiv.org/pdf/2310.07879 \nLenaerts-Bergmans, B. (2024) Data Poisoning: The Exploitation of Generative AI. Crowdstrike. \nhttps://www.crowdstrike.com/cybersecurity-101/cyberattacks/data-poisoning/ \nLiang, W. et al. (2023) GPT detectors are biased against non-native English writers. arXiv. \nhttps://arxiv.org/abs/2304.02819 \nLuccioni, A. et al. (2023) Power Hungry Processing: Watts Driving the Cost of AI Deployment? arXiv. \nhttps://arxiv.org/pdf/2311.16863 \nMouton, C. et al. (2024) The Operational Risks of AI in Large-Scale Biological Attacks. RAND. \nhttps://www.rand.org/pubs/research_reports/RRA2977-2.html. \nNicoletti, L. et al. (2023) Humans Are Biased. Generative Ai Is Even Worse. Bloomberg. \nhttps://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and", "6c879ac1-2edb-4dc7-aa39-4b70b06464e0": "rograms, and surveillance of classes (whether online or in-person); \nHousing-related systems such as tenant screening algorithms, automated valuation systems that \n estimate the value of homes used in mortgage underwriting or home insurance, and automated \n valuations from online aggregator websites; and \nEmployment-related systems such as workplace algorithms that inform all aspects of the terms \n and conditions of employment including, but not limited to, pay or promotion, hiring or termina- \n tion algorithms, virtual or augmented reality workplace training programs, and electronic work \nplace surveillance and management systems. \n\u2022 Access to critical resources and services, including but not limited to:\nHealth and health insurance technologies such as medical AI systems and devices, AI-assisted \n diagnostic tools, algorithms or predictive models used to support clinical decision making, medical", "35c66776-cca0-4456-a747-9298ba25ad21": "information used to build or validate the risk assessment shall be open to public inspection,\" and that assertions \nof trade secrets cannot be used \"to quash discovery in a criminal matter by a party to a criminal case.\" \n22", "7ce689a3-f85d-494d-895c-ec39ee9cf4c1": "Data collection and use-case scope limits. Data collection should be limited in scope, with specific, \nnarrow identified goals, to avoid \"mission creep.\" Anticipated data collection should be determined to be \nstrictly necessary to the identified goals and should be minimized as much as possible. Data collected based on \nthese identified goals and for a specific context should not be used in a different context without assessing for \nnew privacy risks and implementing appropriate mitigation measures, which may include express consent. \nClear timelines for data retention should be established, with data deleted as soon as possible in accordance \nwith legal or policy-based limitations. Determined data retention timelines should be documented and justi\u00ad\nfied. \nRisk identification and mitigation. Entities that collect, use, share, or store sensitive data should \nattempt to proactively identify harms and seek to manage them so as to avoid, mitigate, and respond appropri\u00ad", "4094bae5-4a0f-4a9a-9b12-570c472171b2": "13 \n\u2022 \nNot every suggested action applies to every AI Actor14 or is relevant to every AI Actor Task. For \nexample, suggested actions relevant to GAI developers may not be relevant to GAI deployers. \nThe applicability of suggested actions to relevant AI actors should be determined based on \norganizational considerations and their unique uses of GAI systems. \nEach table of suggested actions includes: \n\u2022 \nAction ID: Each Action ID corresponds to the relevant AI RMF function and subcategory (e.g., GV-\n1.1-001 corresponds to the \ufb01rst suggested action for Govern 1.1, GV-1.1-002 corresponds to the \nsecond suggested action for Govern 1.1). AI RMF functions are tagged as follows: GV = Govern; \nMP = Map; MS = Measure; MG = Manage. \n\u2022 \nSuggested Action: Steps an organization or AI actor can take to manage GAI risks. \n\u2022 \nGAI Risks: Tags linking suggested actions with relevant GAI risks. \n\u2022 \nAI Actor Tasks: Pertinent AI Actor Tasks for each subcategory. Not every AI Actor Task listed will", "55a2ceec-a531-4aac-946c-eabc0b541811": "Suggested Action \nGAI Risks \nMP-2.2-001 \nIdentify and document how the system relies on upstream data sources, \nincluding for content provenance, and if it serves as an upstream dependency for \nother systems. \nInformation Integrity; Value Chain \nand Component Integration \nMP-2.2-002 \nObserve and analyze how the GAI system interacts with external networks, and \nidentify any potential for negative externalities, particularly where content \nprovenance might be compromised. \nInformation Integrity \nAI Actor Tasks: End Users \n \nMAP 2.3: Scienti\ufb01c integrity and TEVV considerations are identi\ufb01ed and documented, including those related to experimental \ndesign, data collection and selection (e.g., availability, representativeness, suitability), system trustworthiness, and construct \nvalidation \nAction ID \nSuggested Action \nGAI Risks \nMP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of", "c9d0711a-004e-4887-bef2-e4af6d5b3361": "in some cases. Many states have also enacted consumer data privacy protection regimes to address some of these \nharms. \nHowever, these are not yet standard practices, and the United States lacks a comprehensive statutory or regulatory \nframework governing the rights of the public when it comes to personal data. While a patchwork of laws exists to \nguide the collection and use of personal data in specific contexts, including health, employment, education, and credit, \nit can be unclear how these laws apply in other contexts and in an increasingly automated society. Additional protec\u00ad\ntions would assure the American public that the automated systems they use are not monitoring their activities, \ncollecting information on their lives, or otherwise surveilling them without context-specific consent or legal authori\u00ad\nty. \n31", "a09d22ae-57cd-4d06-a965-34970ccc4ea9": "11 \nvalue chain (e.g., data inputs, processing, GAI training, or deployment environments), conventional \ncybersecurity practices may need to adapt or evolve. \nFor instance, prompt injection involves modifying what input is provided to a GAI system so that it \nbehaves in unintended ways. In direct prompt injections, attackers might craft malicious prompts and \ninput them directly to a GAI system, with a variety of downstream negative consequences to \ninterconnected systems. Indirect prompt injection attacks occur when adversaries remotely (i.e., without \na direct interface) exploit LLM-integrated applications by injecting prompts into data likely to be \nretrieved. Security researchers have already demonstrated how indirect prompt injections can exploit \nvulnerabilities by stealing proprietary data or running malicious code remotely on a machine. Merely \nquerying a closed production model can elicit previously undisclosed information about that model.", "d2d7fc43-f0e0-4838-86a3-f9dcbd56064c": "assessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23", "86c2b8cb-0b7b-4629-a4bd-6c66a3f84cd7": "APPENDIX\nExamples of Automated Systems \nThe below examples are meant to illustrate the breadth of automated systems that, insofar as they have the \npotential to meaningfully impact rights, opportunities, or access to critical resources or services, should \nbe covered by the Blueprint for an AI Bill of Rights. These examples should not be construed to limit that \nscope, which includes automated systems that may not yet exist, but which fall under these criteria. \nExamples of automated systems for which the Blueprint for an AI Bill of Rights should be considered include \nthose that have the potential to meaningfully impact: \n\u2022 Civil rights, civil liberties, or privacy, including but not limited to:\nSpeech-related systems such as automated content moderation tools; \nSurveillance and criminal justice system algorithms such as risk assessments, predictive \n policing, automated license plate readers, real-time facial recognition systems (especially", "4c0a384a-4c92-42cf-a63d-a48998f437d3": "most intimate sphere, including political opinions, sex life, or criminal convictions. \n8 The notion of harm presumes some baseline scenario that the harmful factor (e.g., a GAI model) makes worse. \nWhen the mechanism for potential harm is a disparity between groups, it can be di\ufb03cult to establish what the \nmost appropriate baseline is to compare against, which can result in divergent views on when a disparity between \nAI behaviors for di\ufb00erent subgroups constitutes a harm. In discussing harms from disparities such as biased \nbehavior, this document highlights examples where someone\u2019s situation is worsened relative to what it would have \nbeen in the absence of any AI system, making the outcome unambiguously a harm of the system.", "1a1445dc-a483-4689-a196-3fe2e2191059": "Disparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these", "7e2c9d15-c47a-4453-961d-8f3eaca062a8": "Homogenization \nMS-2.11-004 \nReview, document, and measure sources of bias in GAI training and TEVV data: \nDi\ufb00erences in distributions of outcomes across and within groups, including \nintersecting groups; Completeness, representativeness, and balance of data \nsources; demographic group and subgroup coverage in GAI system training \ndata; Forms of latent systemic bias in images, text, audio, embeddings, or other \ncomplex or unstructured data; Input data features that may serve as proxies for \ndemographic group membership (i.e., image metadata, language dialect) or \notherwise give rise to emergent bias within GAI systems; The extent to which \nthe digital divide may negatively impact representativeness in GAI system \ntraining and TEVV data; Filtering of hate speech or content in GAI system \ntraining data; Prevalence of GAI-generated data in GAI system training data. \nHarmful Bias and Homogenization \n \n \n15 Winogender Schemas is a sample set of paired sentences which di\ufb00er only by gender of the pronouns used,", "c1da5b2f-e1bd-424d-ab63-6f4b46cf6b46": "and management. One possible way to further categorize these risks, derived in part from the UK\u2019s International \nScienti\ufb01c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Con\ufb01guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.", "73710384-8ad5-4f95-a5aa-6db21daa851a": "for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections", "44a8cf7b-bf66-4022-ac2c-84893ff96c86": "ENDNOTES\n85. Mick Dumke and Frank Main. A look inside the watch list Chicago police fought to keep secret. The\nChicago Sun Times. May 18, 2017.\nhttps://chicago.suntimes.com/2017/5/18/18386116/a-look-inside-the-watch-list-chicago-police-fought\u00ad\nto-keep-secret\n86. Jay Stanley. Pitfalls of Artificial Intelligence Decisionmaking Highlighted In Idaho ACLU Case.\nACLU. Jun. 2, 2017.\nhttps://www.aclu.org/blog/privacy-technology/pitfalls-artificial-intelligence-decisionmaking\u00ad\nhighlighted-idaho-aclu-case\n87. Illinois General Assembly. Biometric Information Privacy Act. Effective Oct. 3, 2008.\nhttps://www.ilga.gov/legislation/ilcs/ilcs3.asp?ActID=3004&ChapterID=57\n88. Partnership on AI. ABOUT ML Reference Document. Accessed May 2, 2022.\nhttps://partnershiponai.org/paper/about-ml-reference-document/1/\n89. See, e.g., the model cards framework: Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker\nBarnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru.", "3bc0010a-dd64-4488-aa13-f4356b278088": "NOTICE & \nEXPLANATION \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \u00ad\u00ad\u00ad\u00ad\u00ad\nPeople in Illinois are given written notice by the private sector if their biometric informa-\ntion is used. The Biometric Information Privacy Act enacted by the state contains a number of provisions \nconcerning the use of individual biometric data and identifiers. Included among them is a provision that no private \nentity may \"collect, capture, purchase, receive through trade, or otherwise obtain\" such information about an \nindividual, unless written notice is provided to that individual or their legally appointed representative. 87\nMajor technology companies are piloting new ways to communicate with the public about", "9b458a26-9b4a-4d70-917c-6720e9cdbac8": "health coverage.\u201d107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal", "4275e423-4206-4cdc-8dcf-31803c65173d": "by potential future employers, banks, or landlords. In one case, a former employee alleged that a\ncompany supplied false data about her job title which resulted in a job offer being revoked.77\n37", "6cd901b5-7e7c-473d-8147-818cf0bc5701": "generation of artificially intelligent partners.95 The National Science Foundation\u2019s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45", "c4e8bd7d-0329-4e14-9a73-3b0d73e40ed5": "cating adverse outcomes in domains such as finance, employment, and housing, is especially sensitive, and in \nsome cases its reuse is limited by law. Accordingly, such data should be subject to extra oversight to ensure \nsafety and efficacy. Data reuse of sensitive domain data in other contexts (e.g., criminal data reuse for civil legal \nmatters or private sector use) should only occur where use of such data is legally authorized and, after examina\u00ad\ntion, has benefits for those impacted by the system that outweigh identified risks and, as appropriate, reason\u00ad\nable measures have been implemented to mitigate the identified risks. Such data should be clearly labeled to \nidentify contexts for limited reuse based on sensitivity. Where possible, aggregated datasets may be useful for \nreplacing individual-level sensitive data. \nDemonstrate the safety and effectiveness of the system \nIndependent evaluation. Automated systems should be designed to allow for independent evaluation (e.g.,", "c0a4891f-9c78-444c-b309-abba336abc39": "ENDNOTES\n23. National Science Foundation. National Artificial Intelligence Research Institutes. Accessed Sept. 12,\n2022. https://beta.nsf.gov/funding/opportunities/national-artificial-intelligence-research-institutes\n24. National Science Foundation. Cyber-Physical Systems. Accessed Sept. 12, 2022. https://beta.nsf.gov/\nfunding/opportunities/cyber-physical-systems-cps\n25. National Science Foundation. Secure and Trustworthy Cyberspace. Accessed Sept. 12, 2022. https://\nbeta.nsf.gov/funding/opportunities/secure-and-trustworthy-cyberspace-satc\n26. National Science Foundation. Formal Methods in the Field. Accessed Sept. 12, 2022. https://\nbeta.nsf.gov/funding/opportunities/formal-methods-field-fmitf\n27. National Science Foundation. Designing Accountable Software Systems. Accessed Sept. 12, 2022.\nhttps://beta.nsf.gov/funding/opportunities/designing-accountable-software-systems-dass\n28. The Leadership Conference Education Fund. The Use Of Pretrial \u201cRisk Assessment\u201d Instruments: A", "2f83cb98-b4f1-4c32-85c6-9372b78cdd82": "tion history with that of her dog\u2019s. Even after she tracked down an explanation for the problem, doctors\nwere afraid to override the system, and she was forced to go without pain relief due to the system\u2019s error.103\n\u2022 A large corporation automated performance evaluation and other HR functions, leading to workers being\nfired by an automated system without the possibility of human review, appeal or other form of recourse.104 \n48", "b83971f2-8769-4a69-b89a-4eb71d1a867b": "informed by representative AI Actors. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, o\ufb00ensive cyber, and CBRN, while \nmaintaining the models\u2019 ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content", "93135040-675d-4b43-855e-a1606a30e1a7": "5 \noperations, or other cyberattacks; increased attack surface for targeted cyberattacks, which may \ncompromise a system\u2019s availability or the con\ufb01dentiality or integrity of training data, code, or \nmodel weights. \n10. Intellectual Property: Eased production or replication of alleged copyrighted, trademarked, or \nlicensed content without authorization (possibly in situations which do not fall under fair use); \neased exposure of trade secrets; or plagiarism or illegal replication. \n11. Obscene, Degrading, and/or Abusive Content: Eased production of and access to obscene, \ndegrading, and/or abusive imagery which can cause harm, including synthetic child sexual abuse \nmaterial (CSAM), and nonconsensual intimate images (NCII) of adults. \n12. Value Chain and Component Integration: Non-transparent or untraceable integration of \nupstream third-party components, including data that has been improperly obtained or not \nprocessed and cleaned due to increased automation from GAI; improper supplier vetting across", "0e9f5217-a16b-4485-b733-ab139fed5db0": "or insurance health risk assessments, drug addiction risk assessments and associated access alg \n-orithms, wearable technologies, wellness apps, insurance care allocation algorithms, and health\ninsurance cost and underwriting algorithms;\nFinancial system algorithms such as loan allocation algorithms, financial system access determi-\nnation algorithms, credit scoring systems, insurance algorithms including risk assessments, auto\n-mated interest rate determinations, and financial algorithms that apply penalties (e.g., that can\ngarnish wages or withhold tax returns);\n53", "f4d5686d-1caf-40d9-9401-c84e128b0a30": "SAFE AND EFFECTIVE SYSTEMS \nYou should be protected from unsafe or ineffective sys\u00ad\ntems. Automated systems should be developed with consultation \nfrom diverse communities, stakeholders, and domain experts to iden\u00ad\ntify concerns, risks, and potential impacts of the system. Systems \nshould undergo pre-deployment testing, risk identification and miti\u00ad\ngation, and ongoing monitoring that demonstrate they are safe and \neffective based on their intended use, mitigation of unsafe outcomes \nincluding those beyond the intended use, and adherence to do\u00ad\nmain-specific standards. Outcomes of these protective measures \nshould include the possibility of not deploying the system or remov\u00ad\ning a system from use. Automated systems should not be designed \nwith an intent or reasonably foreseeable possibility of endangering \nyour safety or the safety of your community. They should be designed \nto proactively protect you from harms stemming from unintended,", "46776766-3bdc-4e9c-a580-130ca26e7504": "ENDNOTES\n47. Darshali A. Vyas et al., Hidden in Plain Sight \u2013 Reconsidering the Use of Race Correction in Clinical\nAlgorithms, 383 N. Engl. J. Med.874, 876-78 (Aug. 27, 2020), https://www.nejm.org/doi/full/10.1056/\nNEJMms2004740.\n48. The definitions of 'equity' and 'underserved communities' can be found in the Definitions section of\nthis framework as well as in Section 2 of The Executive Order On Advancing Racial Equity and Support\nfor Underserved Communities Through the Federal Government. https://www.whitehouse.gov/\nbriefing-room/presidential-actions/2021/01/20/executive-order-advancing-racial-equity-and-support\u00ad\nfor-underserved-communities-through-the-federal-government/\n49. Id.\n50. Various organizations have offered proposals for how such assessments might be designed. See, e.g.,\nEmanuel Moss, Elizabeth Anne Watkins, Ranjit Singh, Madeleine Clare Elish, and Jacob Metcalf.\nAssembling Accountability: Algorithmic Impact Assessment for the Public Interest. Data & Society", "36392958-09c9-4300-9271-e4655423509a": "SECTION TITLE\u00ad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten people\u2019s \nopportunities, undermine their privacy, or pervasively track their activity\u2014often without their knowledge or \nconsent. \nThese outcomes are deeply harmful\u2014but they are not inevitable. Automated systems have brought about extraor-", "b6584983-93a2-4fe2-bdca-aa01408352d1": "data in order to generate derived synthetic content. This can include images, videos, audio, text, and other digital \ncontent.\u201d While not all GAI is derived from foundation models, for purposes of this document, GAI generally refers \nto generative foundation models. The foundation model subcategory of \u201cdual-use foundation models\u201d is de\ufb01ned by \nEO 14110 as \u201can AI model that is trained on broad data; generally uses self-supervision; contains at least tens of \nbillions of parameters; is applicable across a wide range of contexts.\u201d \n2 This pro\ufb01le was developed per Section 4.1(a)(i)(A) of EO 14110, which directs the Secretary of Commerce, acting \nthrough the Director of the National Institute of Standards and Technology (NIST), to develop a companion \nresource to the AI RMF, NIST AI 100\u20131, for generative AI.", "dd97552e-cc7d-4c14-a4cc-1c23c836536c": "decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.", "3380d726-bb80-484e-ae21-fb3eac80028a": "Software & Information Industry \nAssociation \nStephanie Dinkins and the Future \nHistories Studio at Stony Brook \nUniversity \nTechNet \nThe Alliance for Media Arts and \nCulture, MIT Open Documentary \nLab and Co-Creation Studio, and \nImmerse \nThe International Brotherhood of \nTeamsters \nThe Leadership Conference on \nCivil and Human Rights \nThorn \nU.S. Chamber of Commerce\u2019s \nTechnology Engagement Center \nUber Technologies \nUniversity of Pittsburgh \nUndergraduate Student \nCollaborative \nUpturn \nUS Technology Policy Committee \nof the Association of Computing \nMachinery \nVirginia Puccio \nVisar Berisha and Julie Liss \nXR Association \nXR Safety Initiative \n\u2022 As an additional effort to reach out to stakeholders regarding the RFI, OSTP conducted two listening sessions\nfor members of the public. The listening sessions together drew upwards of 300 participants. The Science and\nTechnology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61", "9fff3b99-08a8-4b76-bff6-66d7574d47c2": "19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identi\ufb01cation process. \nHuman-AI Con\ufb01guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party", "8e850453-d994-4bbd-9857-bff1ebc1c2f0": "DATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nData privacy is a foundational and cross-cutting principle required for achieving all others in this framework. Surveil\u00ad\nlance and data collection, sharing, use, and reuse now sit at the foundation of business models across many industries, \nwith more and more companies tracking the behavior of the American public, building individual profiles based on \nthis data, and using this granular-level information as input into automated systems that further track, profile, and \nimpact the American public. Government agencies, particularly law enforcement agencies, also use and help develop \na variety of technologies that enhance and expand surveillance capabilities, which similarly collect data used as input", "3a4199c4-2b01-412a-b7c5-d9e83cddf2f9": "consequential decision-making settings like employment and lending can result in increased susceptibility by \nsystems to correlated failures (like unexpected shocks), due to multiple actors relying on the same algorithm. \n4 Many studies have projected the impact of AI on the workforce and labor markets. Fewer studies have examined \nthe impact of GAI on the labor market, though some industry surveys indicate that that both employees and \nemployers are pondering this disruption.", "3732bb56-dad5-418a-bc2f-796ccdd31a0e": "5. Environmental Impacts: Impacts due to high compute resource utilization in training or \noperating GAI models, and related outcomes that may adversely impact ecosystems. \n6. Harmful Bias or Homogenization: Ampli\ufb01cation and exacerbation of historical, societal, and \nsystemic biases; performance disparities8 between sub-groups or languages, possibly due to \nnon-representative training data, that result in discrimination, ampli\ufb01cation of biases, or \nincorrect presumptions about performance; undesired homogeneity that skews system or model \noutputs, which may be erroneous, lead to ill-founded decision-making, or amplify harmful \nbiases. \n7. Human-AI Con\ufb01guration: Arrangements of or interactions between a human and an AI system \nwhich can result in the human inappropriately anthropomorphizing GAI systems or experiencing \nalgorithmic aversion, automation bias, over-reliance, or emotional entanglement with GAI \nsystems.", "8ef39111-e7ab-41c7-894a-f4b19f7e92fd": "and user perceptions of content authenticity. Analyze user feedback to identify \nconcerns and/or current literacy levels related to content provenance and \nunderstanding of labels on content. \nHuman-AI Con\ufb01guration; \nInformation Integrity \nMS-2.7-004 \nIdentify metrics that re\ufb02ect the e\ufb00ectiveness of security measures, such as data \nprovenance, the number of unauthorized access attempts, inference, bypass, \nextraction, penetrations, or provenance veri\ufb01cation. \nInformation Integrity; Information \nSecurity \nMS-2.7-005 \nMeasure reliability of content authentication methods, such as watermarking, \ncryptographic signatures, digital \ufb01ngerprints, as well as access controls, \nconformity assessment, and model integrity veri\ufb01cation, which can help support \nthe e\ufb00ective implementation of content provenance techniques. Evaluate the \nrate of false positives and false negatives in content provenance, as well as true \npositives and true negatives for veri\ufb01cation. \nInformation Integrity \nMS-2.7-006", "b4dfe088-8bb2-4a98-b320-22a7dd07c65c": "You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\u00ad\ned systems should provide generally accessible plain language docu\u00ad\nmentation including clear descriptions of the overall system func\u00ad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\u00ad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\u00ad\nes. You should know how and why an outcome impacting you was de\u00ad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who", "a0be6da2-7bf3-45f6-81b9-30d724b4dd3f": "WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below", "e1358306-678d-4e98-9775-74065ea4ad8e": "HUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere are many reasons people may prefer not to use an automated system: the system can be flawed and can lead to \nunintended outcomes; it may reinforce bias or be inaccessible; it may simply be inconvenient or unavailable; or it may \nreplace a paper or manual process to which people had grown accustomed. Yet members of the public are often \npresented with no alternative, or are forced to endure a cumbersome process to reach a human decision-maker once \nthey decide they no longer want to deal exclusively with the automated system or be impacted by its results. As a result \nof this lack of human reconsideration, many receive delayed access, or lose access, to rights, opportunities, benefits,", "702acaa9-737f-44b6-a74c-929662b0fa66": "Suggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Con\ufb01guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might a\ufb00ect \ndi\ufb00erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Con\ufb01guration; \nInformation Integrity \nMS-3.3-003", "657cf38d-9798-4c12-83e4-12695c9d7a54": "plugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identi\ufb01cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the e\ufb00ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security"}} \ No newline at end of file diff --git a/Tasks/Task 4/training_questions.json b/Tasks/Task 4/training_questions.json new file mode 100644 index 0000000000000000000000000000000000000000..00acfac5f18d6a00573b0a265723b4d0b32ca06a --- /dev/null +++ b/Tasks/Task 4/training_questions.json @@ -0,0 +1 @@ +{"1d8a0cfc-df53-467f-88e8-cd378990da4b": "What are the key steps to obtain input from stakeholder communities to identify unacceptable use in AI systems?", "9ec9d981-9115-4672-bd74-23035ecc2e7f": "How can organizations maintain an updated hierarchy of identified and expected GAI risks?", "0605df28-3443-4e71-b065-c5e957b1a3be": "What are some examples of unacceptable uses of AI as identified by stakeholder communities?", "bf3d5106-092b-4dcd-84c8-a6f876550060": "How do harmful bias and homogenization impact AI systems?", "84fa4ea9-be18-4dd5-9e29-a6a5c02feb54": "What is the significance of reevaluating organizational risk tolerances in the context of AI and GAI?", "bb5dcf48-cb19-47b4-b313-265f9f7cb3c8": "What are the potential risks associated with model collapse and algorithmic monoculture in GAI systems?", "bc5f183f-8819-4d54-9853-b8f799e02906": "How can organizations address issues related to obscene, degrading, and/or abusive content in AI systems?", "8d9f5ceb-fce8-4d14-9811-6140c0e69900": "What strategies can be employed to mitigate dangerous, violent, or hateful content in AI applications?", "2f174ff7-4261-4c60-a49e-51bc536fd900": "How do immature safety or risk cultures affect the design, development, and deployment of AI and GAI systems?", "1cef392e-71ff-400b-a4bb-39f4c8d78dab": "What are the public information integrity risks associated with AI and GAI, and how can they be managed?", "b2fdeb09-6112-4cbd-a860-6b770d65c9ef": "What are the short, mid, and long-term impacts of AI in cybersecurity according to De Angelo (2024)?", "50a10f19-e2b0-4f53-bf39-c09e6b2adedb": "How do chatbots and generative AI affect mental health, based on the insights from De Freitas et al (2023)?", "41d1c796-9d40-450d-bb90-58541f4dc294": "What is algorithm aversion and why do people avoid algorithms after seeing them err, as discussed by Dietvorst et al (2014)?", "8ea4ce00-241b-4a28-9f10-127da59b19a3": "How do companies learn consumer secrets according to Duhigg (2012)?", "ee13b775-a27a-4b83-b019-8da1bfe33cd4": "How can images altered to trick machine vision influence humans, as explored by Elsayed et al (2024)?", "0835eb5b-ff9b-4c95-98a1-c1aa96b695d1": "What are the key findings of the Harvard Business School study on the safety of generative AI in mental health?", "a25fa3f1-5fad-4e5f-8635-3c5d7564bc9d": "How does the New York Times article by Duhigg (2012) explain the methods companies use to track shopping habits?", "cad3f6f1-e1cf-428f-b6c0-986a72e27923": "What are the implications of the research by Google DeepMind on altered images and machine vision?", "7795698f-b343-44b2-90b7-333d3e874947": "How does the study by Dietvorst et al (2014) contribute to our understanding of human interaction with algorithms?", "0563fa4d-b516-4410-ba89-1f0315067735": "What are the potential risks and benefits of AI in cybersecurity as outlined by Palo Alto Networks?", "32e11143-ad57-48c7-b0ce-e4922c8b1cc7": "What are the different risk response options mentioned in MANAGE 13 for high-priority AI risks?", "94e3e1d2-3500-48e6-97a6-8c83517b41e0": "How should organizations document trade-offs and decision processes for AI risks that do not surpass risk tolerance?", "abb3f98f-480d-4f2d-bf70-0bb22379dbae": "What is a staged release approach in the context of model release for AI systems?", "2a61da93-9e17-4be7-862f-ff9f941284a1": "How can organizations mitigate, transfer, or avoid AI risks that surpass their risk tolerances?", "6b6dc282-ed05-4387-ae3c-28fea9f6aa32": "What methods can be used to monitor the robustness and effectiveness of AI risk controls and mitigation plans?", "a435043d-540e-4134-9d59-e921db6866dd": "What is the role of red-teaming in assessing AI risk controls?", "c64396a8-0017-4d46-aea9-58f81191f6b9": "How can participatory engagements help in monitoring AI risk mitigation plans?", "90341a67-3948-47fe-acaa-32e9896acdd5": "Why is it important to consider the projected use cases of a model when planning its release?", "a93e9f47-5572-4f65-810c-e65f0c0cbc35": "What are some examples of performance assessments for AI risk controls?", "6b14b109-2e7e-4d40-b149-1eb05bc882a0": "How can user feedback mechanisms contribute to the effectiveness of AI risk mitigation plans?", "cde52e0d-45eb-41b7-89ac-0e8e28ac0457": "What methods can be used to trace the origin and modifications of digital content?", "0d6f47cc-2f11-4d1e-9d54-90b10f6bf2b6": "How can tools designed to analyze content provenance help in detecting data anomalies?", "ed8c6ae4-9a32-44a5-b820-0681f03fd325": "What are the best practices for verifying the authenticity of digital signatures?", "cf07e760-9b9f-4be0-aaf2-9ce158ee1c98": "How can patterns associated with misinformation or manipulation be identified?", "7a9b270d-4022-4dee-8044-c221948c7d25": "Why is it important to disaggregate evaluation metrics by demographic factors?", "93f3824c-e2a6-424d-a45b-f7a6e593c5d7": "What are the potential risks of harmful bias and homogenization in AI systems?", "c0441d12-0139-47e0-a780-13868a3b0469": "How can discrepancies in content provenance mechanisms across diverse populations be identified?", "7a65af06-b620-44fe-90b4-53bdcb7c8559": "What metrics can be used to evaluate structured public feedback exercises?", "3cc7ff9c-22a7-4d2c-a02f-353ba7ef9fe1": "How should risks or trustworthiness characteristics that cannot be measured be documented?", "daf97136-c510-4453-a3f3-dfdd41c8a7eb": "What are the most significant AI risks that should be prioritized for measurement?", "4f8ef4f7-9616-44aa-abfe-8cdaf1756b90": "What are the key points of the Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence issued by the White House in 2023?", "6f46f00e-c286-4eb0-815e-32faf4967e59": "How does the 2022 Roadmap for Researchers on Priorities Related to Information Integrity Research and Development address misinformation?", "bf6d72d3-a20e-4589-8cbd-9e4fcf8ff4f9": "What were the findings of the Stanford Cyber Policy Center's investigation into AI image generation models trained on child abuse?", "1c142ec8-b4a3-456e-ba1c-a485944b6ca4": "How does the White House's 2023 Executive Order aim to ensure the ethical development of AI technologies?", "b39288aa-0a9d-4d94-82e2-35409659c7d1": "What are the main objectives outlined in the White House's 2022 Roadmap for Information Integrity Research and Development?", "21eaa61b-6208-40e7-b9a3-5a37038f9d48": "What measures are being proposed to secure AI development according to the 2023 Executive Order by the White House?", "1233bc76-0ce0-4740-bad7-5de774ca0fd7": "How does the 2022 Roadmap for Researchers contribute to combating disinformation and ensuring information integrity?", "988d7c42-6b16-42d7-8c3b-eca27423ec98": "What are the implications of the findings from the Stanford Cyber Policy Center regarding AI image generation models and child abuse?", "d0ee89a9-21ce-4521-a946-de628b905143": "How does the White House plan to address the challenges of AI safety and security as per the 2023 Executive Order?", "23394fb9-945c-4c8e-a361-a1cfd375abe0": "What steps are being taken to ensure trustworthy AI development according to the latest executive order from the White House?", "37aabec8-0539-4438-8489-8a52afbff96e": "What are the best practices for implementing real-time monitoring processes for content integrity?", "a8ce087b-ada7-4cca-8488-e90b57a28b9f": "How can machine learning models be used to flag illegal or violent content in GAI applications?", "607fa183-8c10-4e68-860f-41798c764cc8": "What are the challenges in detecting CSAM and NCII content using automated filters?", "f9c3b55d-7f98-4fca-a8b3-2276e0cf58d9": "How can rule-based filters be effectively combined with machine learning models to identify harmful content?", "f8fcd7dd-4077-4dd8-8a19-cea03ba0bda5": "What measures can be taken to ensure information integrity in generated content?", "21f56f77-f14f-4eb0-ae84-a15ddf44c478": "How can real-time monitoring help in identifying deviations from content standards?", "c1c10c23-aba8-48a1-a47f-0ea7b9ee4058": "What are the key characteristics to monitor for ensuring content trustworthiness?", "bb1d1592-7123-4163-a963-913da8006be9": "How can human intervention be effectively triggered by automated alerts in content monitoring systems?", "07ea05d4-6b90-4936-9dae-b67d0e70640d": "What are the potential biases that can arise in machine learning models used for content filtering?", "2f84ffbd-1a21-475b-a55e-ed7b70ad40b5": "How can the homogenization of content be prevented in GAI applications?", "9e4c34f4-3d3e-45f8-b44c-86725048d92b": "What are the key findings of the study \"Dissecting racial bias in an algorithm used to manage the health of populations\" by Ziad Obermeyer and colleagues?", "64f82a67-4768-4e6b-a43d-f04086818e85": "How does the Data & Trust Alliance propose to safeguard against algorithmic bias in the workforce?", "7f45b7a7-9fcf-4391-89e9-8cc77c095f2a": "What are the main IT accessibility laws and policies outlined on Section 508gov?", "78c93a0c-adfa-4c7e-a126-d47d3e13352b": "Can you summarize the 2019 Science article on racial bias in health management algorithms?", "f9d25a5e-23c8-45ee-aef2-d7db0c12b767": "What measures are recommended by the Data & Trust Alliance to ensure fairness in workforce algorithms?", "86604d73-f71a-46b4-b444-0f78f434d3a8": "How does Section 508gov help organizations comply with IT accessibility requirements?", "511742cc-dc64-43a3-a033-bd545ce33e57": "What are the implications of the findings by Obermeyer et al on racial bias in health algorithms for healthcare providers?", "04a0b08c-92b8-44bb-886c-252f86746d2c": "What is the purpose of the Algorithmic Bias Safeguards for Workforce document by the Data & Trust Alliance?", "3e71278b-e893-4a19-955e-1505554c8686": "How can organizations implement the guidelines provided by Section 508gov for IT accessibility?", "04516eef-32ce-4fcc-8b77-263ca9de6e10": "What are the potential impacts of algorithmic bias in workforce management as discussed by the Data & Trust Alliance?", "a0c0459c-054c-42ad-bffd-3e69e7a03bbc": "What are the potential impacts of automated systems on data privacy?", "b159acfb-35e3-4cbb-8158-d4d89ccc5870": "How can we ensure that automated systems do not use data inappropriately?", "391ca451-b0e7-43cc-afcc-32b7d1fa8da7": "What measures can be taken to protect against the compounded harm of data reuse in automated systems?", "b15c0ca5-79fe-4cb6-971d-891ab63966bb": "Why is independent evaluation important for the safety and effectiveness of automated systems?", "f8ed278b-212e-45c9-ae9f-5fe98bbd00fb": "What steps should be taken to mitigate potential harms of automated systems?", "c034102c-8cc6-4f9f-aa97-54c4123b05b8": "How can the results of independent evaluations of automated systems be made public?", "19a80a1d-4c42-4b4a-a9f1-8c58f96c948b": "What are the foreseeable uses of automated systems in various industries?", "84b94c19-44d8-4b0e-a9ea-21d1b4e745a9": "How can we ensure transparency in the development and deployment of automated systems?", "08b5ee06-991e-4f06-9629-71d3500295ab": "What are the best practices for reporting the safety and effectiveness of automated systems?", "4328f04f-39f0-472e-a479-26bcd1915133": "How can we protect individuals from the misuse of data by automated systems?", "03a9fe91-d78e-473a-a2f8-3106648d7314": "What are black-box credit models and why is the CFPB acting to protect the public from them?", "c5f06a48-24b4-4bf0-85f8-836e9d96099d": "What does California's AB 701 law entail for warehouse employers regarding quotas?", "2335610a-fe1a-4ccd-b194-0d8b2734a4d1": "How does the National Institute of Standards and Technology (NIST) contribute to AI explainability research?", "fe0c5f11-3d4c-44d7-8f27-6a2da2f8536e": "What is the significance of explainable artificial intelligence (XAI) according to DARPA?", "5342b516-b61d-4ea4-a2da-9a05638d3918": "How might complex algorithms in credit models impact consumer finance?", "f43fcf7f-345c-4942-b43d-e75917a9d53a": "What are the potential benefits and challenges of implementing AB 701 in California warehouses?", "4a15e20a-7144-40e1-93e3-114864d97085": "Why is explainability important in artificial intelligence research?", "32cfcd73-0b8c-4469-91c1-5f5209f15d84": "What are some examples of black-box models in the context of credit scoring?", "85538c5c-826d-45bc-8de8-297408b1897a": "How does DARPA's XAI program aim to improve the transparency of AI systems?", "08be8570-a2e0-4e56-af80-157c2427baf9": "What steps can employers take to comply with California's AB 701 law on warehouse quotas?", "24c87258-bf5b-4144-b1cf-149daa5a5c29": "What is the NIST framework for AI technologies and systems?", "1f1e3f39-8772-4eb1-b7f0-12ff89060503": "How does the NIST framework address robustness in AI systems?", "69e2d6b3-12b3-47d1-a249-65657ba34d64": "What measures does the NIST framework propose for ensuring the safety of AI technologies?", "f139859c-a3ce-4700-a971-5548dc026253": "How does the NIST framework aim to enhance the security and resilience of AI systems?", "04bebb3a-8ecf-4543-8736-671e1a4585be": "What strategies does the NIST framework suggest for mitigating unintended and harmful biases in AI?", "df7b1cdc-bb0a-43b5-a10c-7189eb63373e": "How does the NIST framework promote transparency in the development and deployment of AI technologies?", "7cb4296e-4abe-4bd0-a8ff-edaf4a243ffd": "What accountability mechanisms are included in the NIST framework for AI systems?", "12b8555f-380e-4b4a-adc7-418d59a61297": "How does the NIST framework ensure fairness during the pre-design and design phases of AI development?", "3aef7413-5f60-4e9b-a6ac-191b6972651c": "What are the key principles of the NIST framework for AI testing and evaluation?", "65303757-3ce8-4416-96eb-99d843d040d1": "When is the NIST framework for AI technologies expected to be released?", "a7a1d3dc-f420-48c3-a3cb-1e0a676e9e7b": "What are the key considerations for using AI in sensitive domains like criminal justice and health?", "24566fed-3d75-47f8-80b2-03f3d0e9e7d3": "How can meaningful access for oversight be ensured in AI systems used in employment and education?", "1cc3e51d-3cba-4b58-b525-f86babb21bfb": "What kind of training should be provided to people interacting with AI systems in sensitive areas?", "e0f9ad8a-c33a-4717-88ac-d8559f6c974a": "Why is it important to incorporate human consideration in high-risk AI decisions?", "d4342127-5df0-405f-bbe8-dc24711944be": "How can organizations report on the human governance processes of their AI systems?", "65bf43cf-81c8-4872-ae3b-81bf21751ba3": "What are the best practices for assessing the timeliness and effectiveness of AI systems in sensitive domains?", "2f3abec1-74b5-405f-b734-843ecc211a3a": "How can transparency be maintained when using AI in areas like criminal justice and health?", "a4768a2a-62c0-471c-ab81-e9e0d81d83c0": "What are the potential risks of not including human oversight in AI systems used in employment?", "d68eeb41-7bcf-4744-80e9-7614c92438a8": "How can adverse decisions made by AI systems be mitigated in sensitive fields?", "5d143734-c64d-4ee5-9600-544de3dc380b": "What role does public reporting play in the governance of AI systems in education and health?", "2aac2f6a-920b-4393-b61e-98aa36d35f27": "What is the role of NIST in the research on explainable AI systems?", "6de62d16-19cb-45d2-949b-92609186983f": "What are the core tenets of explainable AI that NIST aims to support?", "8d9912fd-4615-4118-8134-a893b80920c1": "How does the Defense Advanced Research Projects Agency contribute to explainable AI?", "8e48c274-2976-4927-b9ab-6bef0546c6e1": "What are the goals of the Defense Advanced Research Projects Agency's program on Explainable Artificial Intelligence?", "7c6ecd5b-5475-43a4-8259-af3a5f014d39": "What is the importance of explainable AI in machine learning models?", "cfab0915-6e9f-412a-a432-bc0a06148923": "How do explainable AI systems help human users?", "eaa0dca7-f7a9-4bac-a8f3-dc0e410b9a94": "What are the potential adverse employment actions for failing to meet a defined quota?", "67312adb-b7a3-4c16-84b9-4e4697e002e8": "What kind of research is being conducted across federal agencies on explainable AI?", "d1c669d5-4d91-4300-8e70-5a49b9b7b517": "What are the best practices for implementing explainable AI according to NIST?", "a44278d2-1d80-4211-93b9-785f091ee871": "How does explainable AI maintain a high level of learning performance while being understandable to users?", "670b6be9-bff2-4e3f-9ddb-48503a59925b": "What are the key components of effective governance procedures for automated business processes?", "966409cb-250a-4100-9cb5-4cc606c16452": "How can organizations ensure that stakeholders are adequately involved in the governance of automated systems?", "42ce5b4d-fe04-4d95-87e8-bd0a8f2a1cf3": "What level of organizational responsibility is necessary for prompt decision-making in resource allocation and incident response?", "5a9cd608-2ac4-4bfa-8cfa-ca357e9841ac": "Why is it important to consider risk mitigation objectives against competing concerns in automated systems?", "a18b2932-8fa5-4635-9302-7ee0cbaf194a": "How should organizations handle use cases that have a significant impact on people's rights and opportunities?", "379549a5-d7e5-47a9-bafb-a7408746aca0": "When is it appropriate to conduct an independent ethics review before deploying an automated system?", "03540efe-c7b8-4bd6-8835-c6f7bb4e30b5": "What are the risks associated with the use of inappropriate or low-quality data in automated systems?", "09f74024-8869-4f32-b626-2138e3564a74": "How can organizations prevent the compounded harm of reusing low-quality or irrelevant data?", "d8dc0a3b-18dc-42ae-93f3-7614e9b55cfa": "What procedures should be in place for risk identification in automated business processes?", "ecdf8fe5-f149-44ac-b465-3fbacb40865e": "How can organizations balance the need for automation with the potential ethical implications for stakeholders?", "f5606d22-27dc-4b36-9ace-72d6cc48660e": "What are the best practices for engaging diverse communities when introducing a new automated system?", "ae491447-4cf4-4991-b43e-08454411a5a6": "How can we ensure that the concerns of disproportionately impacted communities are adequately addressed?", "7ee78a3e-1ea7-4da5-875a-f65aa6f20054": "What types of experts should be consulted during the development phase of an automated system?", "2078e8f7-9bea-4153-bc28-2944699c7a6e": "How can privacy and civil liberties be protected when implementing large changes in automated systems?", "7b087c9c-e491-4d32-b692-81313a1e6afe": "What are the challenges of maintaining confidentiality in private sector consultations before product launch?", "208c1297-47eb-4aca-9561-f53fdb900d7b": "How should government applications balance the need for confidentiality with stakeholder engagement?", "44b1a320-c340-4a37-bcf5-c93a6963e2e0": "What are the specific considerations for law enforcement applications of automated systems?", "c223bce2-8a81-489c-9230-31400072a3fc": "How do preexisting oversight laws impact the consultation process for new automated systems?", "8b96696a-d368-4159-b644-3e82a3269da4": "What strategies can be used to engage sector-specific experts effectively in the consultation process?", "e0ca9dfa-fbe1-41a0-b064-9e4655a76477": "How can the risks unique to certain communities be identified and mitigated during system development?", "ee3ed9e9-e13a-43ae-8468-ceb8ffcf49b6": "What are some current uses of technology that impact equity of opportunity in employment?", "56c869aa-9f69-476f-bb2c-ff1cb0080445": "How is technology influencing equal opportunities in education?", "ecc2c5eb-78a6-4d19-b33b-19b08c0fdbfe": "What role does the White House Office of Science and Technology Policy play in promoting civil justice?", "db262b36-e3f4-48a0-bddc-dc36a289a41e": "How can technology be leveraged to ensure fair housing opportunities?", "6d61f78a-7ebf-45f4-a5e6-4881a583f4c8": "What insights did Christo Wilson provide regarding technology and employment equity?", "d1b8703a-3114-4088-9309-0c02821f5a37": "How does Pymetrics, led by Frida Polli, use technology to promote equal opportunities?", "80cdf547-f019-49d5-b954-fc44fb501eef": "What are some emerging technologies that could impact civil justice, according to Karen Levy?", "074222ac-fda2-47de-b57c-5ddc1063b7ba": "How does Upturn, directed by Natasha Duarte, address issues of technology and equity?", "55189c9a-9917-4b08-9b39-7f0a6c48256e": "What legal perspectives did Elana Zeide offer on technology's role in civil justice?", "50e8150e-4538-45b0-a17d-be191d1fa248": "How does Fabian Rogers' work with NY State Senator Jabari Brisport's office relate to technology and community advocacy?", "eb5cb844-864a-468b-8c58-527d3531343d": "What are the best practices for involving national security professionals in AI system risk management?", "62d2b5a7-b986-4048-bb97-766d7b98ce5e": "How can organizations effectively map and measure national security risks associated with AI systems?", "f72b3652-6c80-4c83-8aeb-9eee8ef96c3e": "What mechanisms can be implemented to protect whistleblowers in AI governance?", "afb866f7-aa0b-4116-9378-f258a4bf12ad": "How should organizations handle CBRN information or capabilities in AI systems?", "a1ddee9f-3320-4745-8c67-624c95b262a7": "What steps can be taken to manage dangerous, violent, or hateful content in AI applications?", "099e3b3a-218d-4c72-8460-0c899484ecd1": "What are the key components of an effective whistleblower protection mechanism in AI governance?", "b923df73-4a58-4d22-8b55-1a01145c5ac7": "How can AI systems be designed to comply with information security requirements?", "1464f6bf-4914-46ea-a181-13cbb9e7058b": "What role do national security professionals play in the oversight of AI systems?", "4e333af2-62cb-4a18-9737-5e42d27891af": "How can organizations ensure that they are not violating laws related to public safety with their AI systems?", "44cf4f5a-ee5c-4173-8c24-e6a9084636d9": "What are the challenges in providing protections for whistleblowers in the context of AI governance?", "732e3e58-5725-48ab-a40c-961aea398141": "What are some methods to evaluate gender bias in coreference resolution systems?", "c0e18063-83e5-462a-b99d-ca04e73f72f1": "How can gender bias in NLP coreference resolution systems be detected?", "ab81895e-d428-4fd5-bed3-030440a61e22": "What datasets are available for assessing gender bias in coreference resolution?", "33ec5775-e26d-4365-a8e5-69cfded28cf2": "Are there specific metrics to measure gender bias in coreference resolution systems?", "595f4527-0e21-40a7-b9ec-6a757d80f511": "How does gender bias impact the performance of coreference resolution systems?", "1bc870b7-1589-44a0-ae78-e41f637ce93b": "Can you recommend tools for evaluating gender bias in NLP coreference resolution?", "25ab951c-e59e-43dc-9315-79f04297d81b": "What are the challenges in evaluating gender bias in coreference resolution systems?", "c8004ebf-4187-4dab-b1fe-1c725fa9d4f4": "How can we mitigate gender bias in coreference resolution models?", "99062576-8040-45bb-ba14-33e7d06843c4": "What role do pronouns play in gender bias within coreference resolution systems?", "f631fe4b-8511-4605-b562-6b32a5fc4ce3": "Are there any case studies on gender bias in coreference resolution systems?", "e909abea-82ea-425e-9eb4-e7fc5cfd1afa": "What are the key technical and governance interventions needed to protect against the harms of emerging technologies?", "8f2b141e-7e25-48c4-8d6e-c7649407098e": "How can transparency and data collection help in mitigating the negative impacts of new technologies?", "2504edde-ca0f-4ab0-b316-072c94ed4fd7": "Why is flexible and reactive policy development important in the context of technological advancements?", "ee944244-c6e4-4f82-81c9-f965ee4f8f7c": "What role do clear guidelines play in fostering a consistent environment for innovation in technology companies?", "fddee110-d5c1-4e71-a392-a18885c4d603": "How can principles and guardrails contribute to responsible innovation in the tech industry?", "0a8ddcee-d0d1-41e1-a6c9-246be5b5ae5f": "What are the current and emergent uses of technology in the criminal justice system?", "c51a2c1a-9f87-488a-9d42-41968bf4cab1": "How do technological advancements in the criminal justice system impact public safety?", "95c6ba82-ae2a-411b-8572-31a3d48cc6a7": "In what ways can technology undermine justice and democratic values within the criminal justice system?", "0c97280b-063a-46a8-bbd5-f47e888354e5": "What are the potential benefits of using technology in the criminal justice system?", "fa8d6dc4-7eba-45d5-a66a-eb6b8ab2ecad": "Who is Suresh Venkatasubramanian and what is his role in the White House Office of Science and Technology Policy?", "21ceca0c-ce44-43e3-8f77-a9dc1f5fbcc7": "How has facial recognition technology led to wrongful arrests?", "d3a9c980-e575-44e6-bab5-07aaca0e2886": "What are the implications of AI bias in law enforcement?", "7f116d0d-2c08-40d1-817d-6f4c1826f047": "How many Black men have been wrongfully arrested due to facial recognition errors?", "380fee90-ebb8-4757-8fa4-7bb4a3b50dfd": "What steps are being taken to prevent wrongful arrests based on AI?", "f0b6fb64-5d64-4512-97bb-0a674ad3b42f": "How did the wrongful arrest affect the Jersey man accused of shoplifting?", "e2b27dbd-6b32-4fba-8260-c9b54e2ac8de": "What are the consequences of AI bias in recruitment, as seen with Amazon's tool?", "dbe0bf87-a716-4127-9f80-f9e325332d95": "How can AI systems be improved to avoid racial and gender biases?", "b8b6fbea-dea8-487e-bf33-6235d3a607e2": "What is educational redlining, and how does it affect student borrowers?", "a0a96c91-6c5a-4fd0-8127-b7c974303838": "How did the wrongful arrests based on AI impact the lives of the three men mentioned?", "dbd41bb0-0c09-4495-b286-dc9622de9e86": "What measures are being implemented to address AI bias in various sectors?", "0e1301c3-0b83-4603-9d39-8c374b37340d": "What are the benefits of documenting and reporting GAI incidents?", "c00fe054-52c2-4561-80e1-4b27a535ac03": "How can standardization of GAI incident reporting improve risk management?", "fd45462e-12d0-40d0-a428-90041a7dff99": "What roles do AI Actors play in reporting AI incidents?", "a531e2af-3989-43fb-8d5a-5808c6ec17bc": "Why is it important for organizations to develop guidelines for incident reporting?", "69db45ba-aef9-4c24-b15a-824ead34c0cf": "How can guidelines help AI system operators identify GAI incidents?", "c8828f01-879b-4d90-bc2c-f97dde5bf765": "What should be included in the guidelines for publicly available incident reporting?", "ce0d270a-d64e-4526-97cf-96ef519cf24a": "Why is the documentation of third-party inputs and plugins crucial for GAI systems?", "b8747453-8022-467b-b1b5-5145d44ff203": "How can greater awareness of GAI incident reporting promote transparency?", "ddd8c6c0-1296-4762-b557-374a51ff3add": "What measures can organizations implement to prevent future AI incidents?", "14fc80ec-eef8-48bb-9d01-33069ee9b8fe": "How can AI Actors trace the impacts of GAI incidents to their source?", "df8094ac-8e28-4a78-838d-606526447758": "What are the different applications of GAI systems in organizational settings?", "4b8eb074-1b7f-448a-ac6d-be4d1ddaed05": "How can organizations restrict AI applications that exceed risk tolerances?", "3710b3fa-3a13-4381-b514-9fc7f8dea69b": "What governance tools can be applied to GAI systems?", "8aa6ab37-2653-45b2-a725-a7735747f754": "How is content moderation handled by AI technology?", "4c0876f0-776a-4fac-a0e2-a3148db5b6f9": "What are the qualifications required for AI actors interacting with GAI systems?", "288d8ca3-7d48-4d33-8b24-d9e7bc4d94a8": "How can AI technology be used for code generation and review?", "fe80034d-0f1d-4bd0-8551-8f0d79001f23": "What protocols are involved in auditing and assessing GAI systems?", "23dc8c4c-f982-4955-b9bd-af9b54a6b57e": "How do organizations ensure AI applications align with their values?", "90900ded-0b53-44a3-9a22-13f8a5988204": "What are the change-management controls for GAI systems?", "0ca2f759-ebbe-4a9b-817b-1c16c7d1e376": "How can AI technology assist in data labeling and preparation?", "40801caf-7abe-479e-9818-e654ea44c389": "What are the key differences between AI risks and traditional software risks according to the National Institute of Standards and Technology (NIST)?", "0943156a-41b4-4d6c-8add-0bb5010e1936": "How does the NIST AI Risk Management Framework (AI RMF) address AI-specific risks?", "10a26c62-5c72-487a-b041-044cc2f6e7e7": "What resources are available in the NIST AI RMF Playbook for managing AI risks?", "66155b2d-caf6-421b-86c6-ca18b2579570": "How does NIST suggest framing risk in the context of AI?", "120752de-fe52-4f54-9e4a-49a9641d77ff": "What terms are included in NIST's glossary for trustworthy AI?", "28130de3-a5d1-4a75-aef6-60681684793e": "What steps does NIST recommend for identifying and managing bias in artificial intelligence?", "a74132c1-3fa1-462f-88d0-d418f51aaebf": "How can the NIST AI RMF Playbook be utilized by organizations to improve AI risk management?", "66be8191-b960-4fed-b0ba-c92018e56838": "What foundational information does NIST provide for understanding AI risks?", "edbb2f46-b7fe-4483-98f0-ebc5eef57da8": "How does NIST's 2022 publication contribute to the standardization of bias management in AI?", "cb87b719-4b5f-41e2-af94-c5ee3c301e18": "What are the main components of the NIST AI Risk Management Framework?", "02e280c0-6258-47f4-bb62-1324b012cd32": "What are the key challenges in ensuring automated systems work as intended?", "57a1c358-afc7-4547-8390-0545a91d0b6f": "How can automated systems be protected from unintended harmful outcomes?", "9ec63ba3-4a78-4919-9641-06406a26d3df": "What are the consequences of 'alert fatigue' in medical settings?", "30f03404-8efd-4a10-8b15-4563ba490e24": "How can the accuracy of predictive models in healthcare be improved?", "5911ed64-f3fb-4bbc-a354-710e9eb4a6c8": "What measures can be taken to prevent automated moderation systems from silencing counter speech?", "0a2e4e30-4e96-4313-8701-66fb74fbaa6e": "How do automated systems impact the safety and well-being of the public?", "754bdb70-fd0d-42db-b514-ed3e792c9c66": "What are the ethical considerations in designing automated systems for social media moderation?", "2b123ed9-5e26-4f5b-8bac-71dde9f51ca1": "How can hospitals mitigate the negative effects of false alerts in predictive models?", "a72510fb-3627-482b-bc59-e8a9d2d9c7fe": "What role do independent studies play in validating the performance of automated systems?", "77f146ae-3822-4a25-b535-3110c58c0759": "How can automated systems be designed to better distinguish between harmful and non-harmful content?", "b2d56c7c-fa94-47b2-9b32-0c4729fb2f12": "What are the primary information security risks associated with Generative AI (GAI)?", "31cf8128-b127-4673-88f6-875bd9d671be": "How can GAI lower the barriers for offensive cybersecurity capabilities?", "4c7d4d1b-256a-408a-8d84-d8a691961a20": "What types of attacks are GAI systems vulnerable to?", "b5d4dcd0-59de-49a8-97e5-1bc4c4de0c99": "How might GAI expand the available attack surface in cybersecurity?", "43f7aac1-4168-4700-b4c2-ba575419b046": "In what ways can GAI augment traditional cybersecurity attacks like hacking, malware, and phishing?", "b5e47640-3a12-4f25-80db-01036d1872b5": "Are there reports indicating that Large Language Models (LLMs) can discover system vulnerabilities?", "032a7d8e-cbd4-4005-96ab-461828a13663": "How can sophisticated threat actors use GAI-powered security co-pilots in cyber attacks?", "7ab5047e-51ca-40c0-8cc4-48fd68fcb7df": "What role can GAI play in helping attackers evade threat detection?", "0338aa3a-47c1-4a51-acce-90bfd9a2dcf2": "How might GAI assist attackers in escalating privileges after gaining system access?", "5001a4f7-f1ac-4bf1-9152-b893c1f45412": "What measures are necessary to maintain the availability of GAI systems in the context of information security?", "06e02bbe-ec13-46b6-a82a-c3eedbe9df94": "What is the significance of a waiver of sovereign immunity in legal terms?", "c5af22e4-d127-4a04-a563-47eaad15d6e0": "How does sovereign immunity protect the United States and its entities?", "d073d233-f4cd-4e5a-a3af-b8fa829b6a27": "What is the difference between substantive and procedural defenses?", "f810f097-05b7-43e9-b7e2-fa7d21b5c53b": "Can a work of the United States Government be copyrighted?", "b56876d6-1fb9-40ab-86b9-a78a889bbbec": "What does 17 USC \u00a7105 state about government works and public domain?", "73e2f0c1-f0cf-49da-906d-fc8510921e3f": "How can one enforce a legal defense against a US government agency?", "d8c94105-67dc-479c-a469-65e8104efca1": "What are the implications of a document being in the public domain?", "1a7e6b55-7f76-4439-9567-12cc8d835c96": "What legal protections do officers, employees, or agents of the US government have?", "c0aa67d6-de29-4bf6-8b18-04bbaf49240a": "How does the concept of equity differ from law in legal proceedings?", "88df5351-74b0-465c-9de9-b5807548c0dd": "What are the limitations of enforcing legal claims against the United States government?", "168b2e48-4e05-44a6-b944-aa2feb95f9e4": "What are the potential impacts of surveillance technologies on individual rights and opportunities?", "b72701b8-aef7-496e-8747-79854e65fcda": "How can one ensure that their data decisions are respected in automated systems?", "68af651a-6b8b-41bb-8f3e-14604272284d": "Why is it important to have access to reporting on the use of surveillance technologies?", "7faa544a-97f1-4d9c-804c-36a978652159": "What kind of documentation should designers and developers of automated systems provide?", "3d27b603-05bb-4a64-bc55-e9add2c9f314": "How can automated systems limit access to education, work, and housing?", "63d1952d-d789-4fd2-b18a-c7ddf8ce8cc1": "What should be included in the notice provided by automated systems?", "44065a66-2c8a-4d0f-9ebb-8dcfbdefe195": "Why is plain language documentation important for automated systems?", "fa9aff28-f81d-4034-acb6-19b295cae5fa": "Who is responsible for providing explanations of outcomes in automated systems?", "7e79602f-7d17-47d4-8874-50337e61fdc9": "How can individuals understand the role of automation in systems that impact them?", "eba5ba0f-95a3-4429-9437-7d0be9b5e0b8": "What measures can be taken to ensure transparency in the use of automated systems?", "b3486f90-735d-49df-86bc-739ba6403a31": "What are some top progressive companies building ethical AI in 2021?", "d5195aba-7870-4c55-961d-ed0ffe276376": "What methods are being used to assess equity according to the Office of Management and Budget's 2021 report?", "f0365668-2d3e-4a22-aac1-d25a64683ef8": "What is the AI Risk Management Framework by the National Institute of Standards and Technology?", "ce4365ac-9fdf-43ce-b139-503ec9ad37a4": "What is the purpose of the US Department of Energy's Artificial Intelligence Advancement Council?", "6737307a-5bbc-4ab4-b524-9437eee812f1": "When was the US Department of Energy's Artificial Intelligence Advancement Council established?", "e209d21e-f9c1-41cd-bbf5-14d112a1db70": "What are the key points of the US Department of Defense's Responsible Artificial Intelligence Strategy?", "139ad9ad-384f-497d-af84-dc9b24d2dade": "How does the AI Risk Management Framework help in managing AI risks?", "a24c2df7-395d-4019-8e4c-feb9d16b19d6": "What are the main objectives of the US Department of Energy's Artificial Intelligence and Technology Office?", "93b1cd97-2fe1-4152-ab7c-a0efbf49a6b5": "How can companies ensure they are building ethical AI?", "9c028b3c-c92c-4ca5-b7c4-e8d1c358b6c6": "What are the latest developments in AI ethics according to the 2021 article by Disha Sinha?", "0bfaf653-4226-41e5-865c-a5d63578d4e7": "What are the main concerns associated with the expense of broadband service for telehealth?", "0fd4687b-e9e7-498f-acd7-9eb020850852": "How do privacy concerns impact the adoption of telehealth systems?", "ebdadd5c-1215-40f5-8b2b-f242fbd35451": "What are the equity issues related to the cost of health monitoring devices?", "fba46c57-4104-40f5-bc91-fb9b706c2ef2": "How can racial biases in technology-enhanced care perpetuate discrimination in medicine?", "dfef4df6-fa61-419f-82ae-6a148536db56": "Why is it important for medical technologies to be accountable to relevant stakeholders?", "0fa04860-d710-40b5-856d-1dbc9d3bc759": "How can the voices of those subjected to medical technologies be better heard?", "6275567f-707b-48df-93ce-787e58e407de": "What are the potential solutions to address the expense of broadband service for telehealth?", "5c0dd9dd-bd3d-4f2f-b1d3-e8d142665e33": "How can privacy concerns in telehealth systems be mitigated?", "be446845-3854-499c-886a-d08121ae6700": "What steps can be taken to reduce the cost of health monitoring devices?", "21abc9b0-3037-4ca1-bebe-3ebbb20cbd20": "How can the medical community ensure that technology-enhanced care does not perpetuate racial biases?", "735c4032-1519-4fd5-8cdb-0e9a0ce6da5c": "How can the principles of the AI Bill of Rights be implemented in real-life scenarios?", "9daf1259-d81a-4cc8-8d81-e60f2962b064": "What are some practical technical approaches to protecting rights and opportunities in AI?", "d927d226-242f-4436-8243-c62ee8339ab8": "How can sociotechnical approaches help in actualizing the AI Bill of Rights?", "920eaf59-a989-498c-8964-7ac0a342d7b2": "What role do laws and policies play in ensuring the AI Bill of Rights is respected?", "1ddaf3a2-3fbb-4e1f-8bea-3a5bd70e5f8d": "Can you provide examples of how industry and civil society can collaborate to protect AI rights?", "aa9fb7f7-6205-45a6-8e2c-e7713a8420fc": "What are some illustrative cases that show the implementation of the AI Bill of Rights?", "f738f637-ecf5-43fc-b89f-65431bac4cb2": "How important is the cooperation among researchers, policymakers, and technologists in implementing AI rights?", "9db348ec-94bf-435c-aec7-b1da0e3acad9": "What are the expectations about technologies in the context of the AI Bill of Rights?", "2af37117-da7d-4d16-afb2-b96ea0513e67": "How can the public contribute to the effective implementation of the AI Bill of Rights?", "2939615e-23fc-4214-966c-6ba4fe1b7335": "What are the challenges in moving the principles of the AI Bill of Rights into practice?", "a7c4e78f-9e12-4e77-80a9-1b6d0e378dc7": "What are the key findings of Karasavva et al (2021) regarding the predictors of non-consensual dissemination of intimate images?", "f9d93ab9-108e-49db-901e-3dc647847f97": "How does the study by Katzman et al (2023) contribute to understanding representational harms in image tagging?", "73c8f947-0282-4541-8b91-1746ecf82b5a": "What are the main components of the value chain analysis for generative AI as discussed by Khan et al (2024)?", "f28b1c3a-c692-41ea-8144-dd7b5230aab8": "What is the purpose of the watermark proposed by Kirchenbauer et al (2023) for large language models?", "14f8c882-d08f-4bd0-923e-5d8b8841fd45": "How does Kleinberg et al (2021) address the issue of algorithmic monoculture and its impact on social welfare?", "17eb019d-fa03-4024-90b1-435b97d565de": "What insights does Lakatos (2023) provide in the report \"A Revealing Picture\" published by Graphika?", "e0f19f85-0585-4ce1-b033-c658c72c6731": "How can the findings of Karasavva et al (2021) be applied to prevent the non-consensual dissemination of intimate images?", "ff276cd8-c1b4-4ac4-ad1a-fe49517a74c0": "What methodologies were used by Katzman et al (2023) to measure representational harms in image tagging?", "afaa6994-4806-44cf-958a-057084b24a97": "What are the implications of the value chain analysis by Khan et al (2024) for the future development of generative AI?", "b7b673c0-0b37-42fe-9b9a-93758490f9ef": "How effective is the watermarking technique proposed by Kirchenbauer et al (2023) in protecting large language models?", "98816e76-3c36-44a7-9f7b-e1c94ca23586": "How can active learning techniques be used to identify model failures or unexpected outputs?", "8fe3db0a-9fc1-49f4-b08a-ed387dd67784": "What are the benefits of sharing transparency reports with stakeholders regarding GAI system updates?", "62865470-d8f3-4ebf-bf2b-050f5fcbce3b": "How does tracking dataset modifications help in maintaining the provenance and integrity of information?", "fe41122b-c0e0-4a47-9424-b21efd0d8405": "What role do AI actors play in user research and experience?", "c5fabcc7-22e9-49c6-b965-c80316d5b4d8": "How can harmful bias and homogenization be mitigated in Human-AI configurations?", "8086f26a-3e53-45ca-8e09-adcaeb35b100": "What steps should be included in transparency reports to enhance accountability in AI systems?", "61e1e6a5-ba18-4763-9a90-5f6b278dcda0": "Why is it important to monitor data deletions and rectification requests in AI datasets?", "45090c7b-a6aa-4517-8b79-30964e872d62": "How can collaboration with AI actors improve content performance and impact?", "3b005aea-9582-4e7e-b2ed-86f7c16dffa6": "What are some effective methods for ensuring the veri\ufb01ability of content origins in AI systems?", "2bb3e489-4c81-4e1b-aa8a-149336e61c0f": "How does confabulation affect the transparency and accountability of AI systems?", "77aa993b-372c-4534-8252-ca9c4010c1cf": "What is predictive policing and how does it work?", "6a1dc5f7-53a2-431d-80bd-8ddfbb546084": "How can predictive policing systems impact individuals' privacy and rights?", "0b5870d7-28c3-4da6-82be-4f2dcc63e1bf": "What are the potential risks of using automated systems in law enforcement?", "673c25a5-a7a8-4bfc-82aa-81b7f1bda04f": "Why is transparency important in predictive policing systems?", "ea441231-122d-47f5-8f5c-11085f9fd1f4": "How can data entry errors in automated systems affect individuals' access to benefits?", "0ed1645c-4661-46cb-8443-35a4c8e8ce60": "What measures can be taken to ensure fairness in automated decision-making systems?", "11076db2-fd60-471a-aba9-62b355aed19e": "How can the public be informed about the criteria used by predictive policing systems?", "3ae32342-738f-4cb9-b5dc-16fcf2130ea2": "What are the ethical concerns surrounding the use of predictive policing?", "ff50e166-1288-43c1-a067-65b0cf902d06": "How can individuals challenge decisions made by automated systems?", "f12d83b4-be05-4565-86df-f93a0c64b324": "What role does public transparency play in the effectiveness of predictive policing systems?", "37dd7447-0e75-4e14-8e17-865d6555c74a": "What is the Labor-Management Reporting and Disclosure Act (LMRDA) and what does Section 203 cover?", "1687fec4-dc70-4040-b728-4b5e44edc435": "How do I fill out the US Department of Labor Form LM-10?", "10d62236-489c-42fd-81dd-7885112ad9b2": "What are the key points in the OLMS Fact Sheet regarding Form LM-10?", "e9cd49c5-55d2-4b58-b05a-d0534264115e": "How does Apple protect user privacy according to their documentation?", "e659b628-dfa2-49f4-a768-e8761ae1c74e": "What measures does Google take to ensure Android is secure by default and private by design?", "7ea78477-98bc-4ff3-8d95-b316f391a035": "What are the main arguments in Karen Hao's article about algorithms trapping people in poverty?", "74061af4-43ea-4d23-9765-abe2b5fbb9e1": "How are lawyers fighting back against hidden algorithms that create poverty traps, as discussed in the MIT Tech Review article?", "dfdb3835-219b-4d3a-9673-d2923df2fb4b": "What concerns does the ACLU raise about family surveillance by algorithm?", "18a0946d-d4d3-4c73-b1bc-ef5d2d6cca86": "What are the implications of algorithmic surveillance on families, according to the ACLU fact sheet?", "3f73048f-6c23-48db-be72-402295190c87": "How can developers design for safety and privacy in mobile applications, as suggested by Apple and Google?", "f9d78f26-8f38-45a5-a05d-126e99e0b295": "What are the best practices for testing automated systems in real-world contexts?", "caec4118-4465-454f-8361-aebd8d21a912": "How should feedback be documented when reconsidering the deployment of an automated system?", "318fb3a4-b202-4e61-870b-b9a18c7c8f54": "What are the key differences between automated systems testing and human-led testing?", "3681a289-6754-4f14-9c68-bba82f3179af": "Why is it important to mirror real-world conditions during the testing of automated systems?", "424ac5ef-46e1-4f01-bee3-d71a0e907e5d": "How can material differences in deployment conditions affect the performance of automated systems?", "032fd89f-0b92-4533-a520-39fe68cc1744": "What role do human operators or reviewers play in the effectiveness of automated systems?", "a788ae08-165b-4797-b530-9054fffc79b9": "How should system performance be compared with existing human-driven procedures after testing?", "c823ae3a-5c23-421a-a316-3fb0a90fdff6": "What domain-specific best practices should be followed for testing new technologies?", "4e06d594-f59f-43ed-9c42-766b3d71377a": "Why might new testing be required for each deployment of an automated system?", "f0a8cfb7-ca5a-41af-874f-24ca1c10564a": "How can the outcomes of testing influence the decision to deploy an automated system?", "ceea82a8-3f36-499c-8999-f705089fd5c4": "What are the expectations for automated systems in sensitive domains?", "4da1b1fe-5a0f-4873-9a51-592e7031b5f0": "How can users opt out of automated systems in favor of human alternatives?", "2d5f0e7a-37f4-41f3-a270-083718406673": "What mechanisms should automated systems provide for human oversight?", "b597f3f4-73ca-41d6-805f-ea8566ee4335": "Why is it important to have human consideration and remedy in automated systems?", "51a36031-cbe6-4896-a26f-0d1231ac9ac5": "What kind of training and assessment is necessary for human-based portions of automated systems?", "28e9d3c8-9aad-4ac2-8b76-8112ba317fc7": "How should automated systems ensure the effectiveness of human alternatives?", "3cd1b051-c5ec-4ba6-85a3-6c8ecc3d382e": "What safeguards are recommended for automated systems in sensitive areas?", "93157218-65e8-4b27-bdc0-d3b5c75016f2": "How can automated systems provide clear and accessible instructions for opting out?", "6a607a7d-6808-444f-a772-aa55eccd8e61": "What role does human oversight play in the effectiveness of automated systems?", "522e086d-9b12-48c9-b070-b9f0c6b871e8": "Why is it important to have fallback systems with human oversight in automated systems?", "ce02496c-71b3-4ab7-98bb-eed1cc3edc11": "What are built-in protections for abusive data practices?", "55c73a61-2c93-43db-b06b-142fea09d1a3": "How can I ensure I have control over how my data is used?", "8dae2406-bb95-4836-9224-224fa830dd81": "What design choices help protect my privacy by default?", "1ddfec1f-63dc-4114-8d2c-6c6b080df23d": "What are reasonable expectations for data collection?", "ca588f53-c672-4ec4-8c2f-4edb75780625": "How can designers and developers seek my permission for data use?", "f01609ce-a9ca-4166-bc21-4837e9f8dbe6": "What are alternative privacy by design safeguards?", "bf05189a-5783-4529-9a04-33ecb2b039f6": "How can I avoid privacy-invasive default settings?", "69cd9e44-ebad-4444-bd6a-0ffe7dd0fea1": "When is consent necessary for data collection?", "5592a53b-bc51-4eb2-ad09-d91eab84cbca": "What are the best practices for respecting user decisions on data use?", "da87855a-e8d3-464f-b44a-2f1c0f36efd9": "How can automated systems ensure my data privacy?", "537d6d33-2d70-4cda-bfd2-a7e7d58dca1c": "What are the guidelines for ensuring surveillance is proportionate and necessary?", "f9c45eea-e5e9-4963-b36b-eac4e1041f10": "How can designers and developers minimize the invasiveness of surveillance systems?", "95df7bca-5759-48de-aded-0bdcfd73b47d": "What measures should be taken to inform individuals about surveillance activities?", "08b6077f-18cf-437e-93e3-b2272cafdf09": "How can surveillance systems be designed to protect civil liberties and civil rights?", "32c66693-7c38-4482-b27e-1226a0d82810": "What are the legal requirements for providing notice to individuals subject to surveillance?", "b0213953-d766-4b10-aa1b-c7170ea2c1ab": "How can surveillance be limited to avoid infringing on democratic rights like voting?", "a7153e52-4939-4c06-bfa6-43f880a88d70": "What are the best practices for restricting the number of subjects monitored by surveillance systems?", "c1a6fa3b-e163-482c-8ca2-b05ca142d9f1": "How should data gathered through surveillance be managed and used responsibly?", "1b8e4a92-c4c9-4200-9e7f-a441aa82cd9b": "What are the ethical considerations for deploying automated surveillance systems?", "b3ed9e28-9cca-467c-9c04-1b533b1ff751": "How can surveillance systems be aligned with national security needs without compromising individual rights?", "b814a05e-73e6-4ac4-8098-646df1806acd": "What are the common biases found in automated sentiment analyzers?", "e2b9260e-db2c-4449-9f56-d5cbf8e617c2": "How do automated sentiment analyzers affect social media content moderation?", "9a8125b8-6346-4652-9584-b5e5399fa7bd": "What steps are being taken to address bias in sentiment analysis tools?", "2c59dede-dec0-4ef4-9a1c-f4e8e47a6727": "Why do searches for terms like \"Black girls\" or \"Asian girls\" often return sexualized content?", "3c7f39ce-861e-4abc-8b91-941233a6ff50": "How can researchers help in mitigating bias in sentiment analysis tools?", "0e532265-3e9e-4043-ac99-d073b610bb72": "What are the implications of biased sentiment analysis on minority groups?", "3479b71e-e61b-42b5-9866-827680891f1c": "How can technology platforms ensure fair sentiment analysis for all users?", "6334ec05-5071-42d8-98b8-fc5306d236b2": "What examples illustrate the bias in automated sentiment analyzers?", "c5293c46-c7bd-4332-b088-0f07e7fca312": "How does bias in sentiment analysis impact online communities?", "5386554b-00b3-4e7e-b130-eafd4d8eb057": "What measures can be taken to improve the accuracy of sentiment analyzers?", "22b750f1-231b-489a-bebd-3b193bea2a49": "What is the importance of calibrating automated systems to the level of risk?", "5de799b0-d874-492e-aa45-4fd7647abab2": "How can summary information about automated systems be effectively communicated in plain language?", "81900311-01e7-4092-a602-1d8187f760e4": "Why should assessments of the clarity and quality of notices and explanations be made public?", "65bccbee-7732-4dd2-be47-39627cdfeab0": "What are the key components of a good notice and explanation for automated systems?", "db31ef6b-186d-4690-9967-65f38c663c5b": "How can organizations ensure that their automated systems are transparent to the public?", "540f0169-64c9-4548-888a-05f286d81496": "What methods can be used to assess the quality of explanations provided by automated systems?", "415e4fbc-b69e-428a-9f94-7312d737e467": "Why is it important to report on the clarity of notices related to automated systems?", "1bfaa0c5-41a4-4311-a8ca-15d61ff4d3cf": "How can the public benefit from having access to summary information about automated systems?", "768e8af3-91d4-480a-8e3c-f3fb6fa362a0": "What are the challenges in making automated system reports understandable to the general public?", "85eec5c3-d2d6-458e-ada1-b311bf4a72f9": "How often should assessments of automated systems' notices and explanations be updated and made public?", "3d150e71-c79a-4bd8-8084-2ae5bafda260": "What are the key findings of Acemoglu's 2024 paper \"The Simple Macroeconomics of AI\"?", "1a24bdd1-8636-4d56-8125-f82bbece4d8e": "How does the AI Incident Database track and report AI-related incidents?", "84f606b0-d5e2-46fe-b0fe-9f8bd603926f": "What were the main incidents and responses discussed in Atherton's 2024 survey on deepfakes and child safety?", "b0068345-aa37-45f2-a66d-ab765c64e4c3": "What are the implications of intentional biases in LLM responses as discussed by Badyal et al in their 2023 paper?", "322cd2e4-d7f9-48b7-8de7-f1bb820e45a4": "How was the Bing Chat data exfiltration exploit discovered and fixed, according to Embrace The Red's 2023 blog post?", "555f7d72-2732-4953-b60e-5aa9bb3b5329": "What is the concept of algorithmic monoculture and its potential impact on outcome homogenization as explored by Bommasani et al in 2022?", "0a9a8d6d-03b6-4a09-a022-86b5bcbb4f9d": "What strategies do Boyarskaya et al suggest for overcoming failures of imagination in AI system development and deployment?", "28ffa310-5101-4271-a96f-ae81552638be": "What are the main security challenges in the AI pipeline as identified by Browne et al in their 2023 report?", "e59e161d-73b9-44a7-ad00-86e936500c7d": "How does the AI Incident Database contribute to improving AI safety and accountability?", "7712df59-b5e6-4300-81f2-1d0e16da04e2": "What are the potential risks associated with deepfakes, particularly concerning child safety, as highlighted in Atherton's 2024 analysis?", "ecfdcb90-ee70-422b-9850-10c9ebbf34c6": "What is the AI Bill of Rights?", "6dd78dc1-02ed-4f5b-abe6-398caa87184d": "How can AI improve Americans' lives?", "43f126a5-f5cf-47b2-a267-d84e162dc483": "What are the potential harms of AI technologies?", "2d69fe62-48f6-443a-af60-47dd8aa8e223": "How can we prevent the harms of AI technologies?", "b15d2c3c-6578-4ac7-bf29-1bb9ffae84d3": "What role did public servants play in shaping the AI Bill of Rights?", "c4aaa7b2-60f1-480b-9a24-5f8d421c6136": "How did the international community contribute to the AI Bill of Rights?", "98490660-182e-4994-a0e9-79f5063132a2": "What are some examples of public engagements included in the Appendix?", "db8d310e-c511-4d66-9486-f8723f206d2f": "Why is it important to have a formal request for information regarding AI?", "4dce7da8-27f9-40a3-917b-17d3726fd5ee": "How was the input from the public collected for the AI Bill of Rights?", "9f0a6fe1-594c-447e-8837-2a78a94f7b03": "What are the core messages from the discussions about AI technologies?", "60709a95-43fe-4194-a602-00bee13a364a": "What is algorithmic discrimination?", "d64d1e05-da97-4caa-a99f-c615a4ec29f3": "How can we protect against algorithmic discrimination?", "ec9e492e-7c12-49e0-b47d-e552f7d1f972": "What are some examples of algorithmic discrimination?", "336ad8fa-faaf-4447-bfc6-ef161da017ce": "What laws exist to prevent algorithmic discrimination?", "3b634a49-58a5-4fcf-a112-9a86f5ac0893": "How do algorithms contribute to discrimination in hiring practices?", "4b58cb88-0109-4889-95e2-055c73c49939": "Can algorithmic discrimination be completely eliminated?", "21b86043-f03c-4c93-a916-be8fd8b2ad61": "What role does transparency play in preventing algorithmic discrimination?", "63133742-a77f-4db6-846d-4d1f5059f01a": "How can companies ensure their algorithms are not discriminatory?", "661fa6f5-10ba-4b3b-a3aa-67fcb6b7c0ec": "What are the ethical implications of algorithmic discrimination?", "d181a784-4cc1-4749-98e2-0f0ce4f2e3be": "How do biases in data lead to algorithmic discrimination?", "ebf17b14-0e3f-4c3d-904d-649dfc42fa95": "What are examples of automated systems that can impact individuals' rights or opportunities?", "a6fabc41-7dfb-4c46-aa88-b4a3530b1d84": "How do automated systems derived from machine learning differ from passive computing infrastructure?", "24c03da2-ba86-485a-86f8-59894c03d3e1": "What is considered passive computing infrastructure in the context of automated systems?", "5e4464c8-91c2-44f1-9ab2-914ebaa083c0": "How can automated systems influence policy implementation?", "6784a5e2-6056-4306-93f8-80a495e09e01": "What types of data processing techniques are included in the definition of automated systems?", "79f1c48b-968c-4851-928d-ae6f42c61343": "Why are web hosting and domain registration considered passive computing infrastructure?", "925e7b0d-8fe7-40c2-a359-c8ee0f5ed344": "How do automated systems interact with individuals and communities?", "3397b416-793c-48c5-be7e-199cd416c910": "What criteria determine if an automated system is in scope for impacting communities' rights?", "fc6683f3-3edb-4168-be40-07bfdd0d288b": "How do automated systems aid in decision-making processes?", "f7d73d0c-df20-4ca2-91d2-a8c5afdc14c4": "What is the significance of excluding passive computing infrastructure from the definition of automated systems?", "36690cae-c8ac-4416-86a2-94636c14e662": "What is algorithmic discrimination and how can it be avoided in automated systems?", "ed1dbeba-1948-4424-9f4e-49940e6ced35": "Why is it important to limit data use in automated systems to avoid group-based inferences?", "6144bb8c-dc20-482c-b508-3a6746a31aad": "How can human oversight ensure that automated systems are tailored to specific use cases?", "10bc0fe7-54f9-4f07-8e70-4dc100547fa8": "What are the risks of using automated systems in sensitive domains without human oversight?", "a2df3dd2-4f82-4dc5-a09b-87f4bb447213": "Why should validation testing of automated systems not be assumed to transfer to different locations or use cases?", "1ceaf948-0544-42aa-998b-834363202b17": "What role should human consideration play in high-risk decisions involving automated systems?", "7ecb6173-5692-4d0c-9c5e-8b0b13bffa23": "How can automated systems provide positive outcomes without directly intervening in high-risk situations?", "1e214f08-1e4a-4588-b61a-55376c21b541": "What are some examples of high-risk decisions where automated systems should not intervene without human oversight?", "68fab0c7-c29b-44ec-af9d-05ad2474f8b0": "How can evaluation testing ensure that an automated system is safe and effective for a specific situation?", "7bd24a17-0f1e-4165-ad6c-a177ddda289f": "What are the potential consequences of allowing automated systems to make high-risk decisions without human consideration?", "c8b856a5-bdd4-468f-967f-2a2d971d1c03": "What are the legal limitations on the use of surveillance technologies by the government?", "403aa84b-752d-4588-b4e3-fa5a8c170913": "How do surveillance technologies impact the rights and opportunities of individuals?", "d6cc3ca1-888e-40c0-be4f-a80b26fe264f": "What constitutes \"real-time or subsequent automated analysis\" in the context of surveillance?", "33590f14-1e42-4a8a-b98e-4b50ed420647": "How can surveillance technologies affect underserved communities?", "a2a6d551-b7e7-4648-9043-311d908a8546": "What is the definition of \"underserved communities\" in relation to surveillance technology?", "8da5e9a2-f967-4417-a687-2d49feb921bc": "How does the framework address the protection of data and communications?", "2826ff48-bba2-4f87-9f2e-e425dd1999ff": "What are the ethical considerations for using surveillance technologies on individuals or groups?", "ff195633-1d86-4b29-8add-e361af17a5fd": "How can commercial use of surveillance technologies be regulated to protect individual rights?", "d1ee3ce0-b52c-4ea9-a326-9e3a36f91eab": "What are the potential consequences of surveillance technologies on community access to opportunities?", "655de1e5-ce3a-4bce-95ec-ebe789004f66": "How does the framework ensure the preservation and protection of identifying information?", "ec6b21b6-677f-430b-b3f1-682615b0c999": "What are the best practices for obtaining consent for data collection in sensitive domains?", "16885872-4b72-4e64-b7db-18121bb64984": "How can notice-and-choice practices for data use be made more understandable?", "f9d9b18c-d71e-4abd-b3ca-0223dec3ad95": "What are the enhanced protections for data related to health, work, education, criminal justice, and finance?", "2bb558fb-06ba-4259-b5c2-1069543d1700": "How should data pertaining to youth be handled to ensure their protection?", "9082084b-5bcf-44c9-acbf-4b018579d045": "What are the necessary functions for which data and related inferences can be used in sensitive domains?", "5fc54a90-cce1-45f8-8502-abf134313535": "What ethical reviews are required for the use of data in sensitive domains?", "98454bb2-eaf7-45a3-8e6d-9781a4a9d584": "How can communities protect themselves from unchecked surveillance?", "e9753e53-f009-433f-83b1-86b01c4fdf46": "What kind of oversight is necessary for surveillance technologies to protect privacy and civil liberties?", "7c787e2d-b2de-4444-a4e9-08b349e388a4": "What are the potential harms of surveillance technologies that need to be assessed before deployment?", "6c304545-a0cf-4473-aec5-25670f40a825": "How can continuous surveillance and monitoring be regulated to protect individual privacy?", "34a2354d-7bf6-4914-8c4a-cf196db36040": "What are the key points discussed in the document \"Records, Computers, and the Rights of Citizens\" from July 1973?", "0aad6105-223c-4dab-912f-526f486cdc6c": "How does the Office of Management and Budget's Circular A-130 guide the management of information as a strategic resource?", "f59c944c-112c-40a8-a47f-ac2b0347e2c4": "What are the main recommendations in the OECD's guidelines on the protection of privacy and transborder flows of personal data?", "c8df4ea5-17bd-4cca-a295-e7ee59f9e7d0": "How effective is the proprietary sepsis prediction model validated by Andrew Wong and colleagues in hospitalized patients?", "98b08556-abfd-4833-ae35-a8535571fea9": "What are the implications of the findings from the study on the sepsis prediction model published in JAMA Internal Medicine?", "cba6f835-caec-48d5-9b82-232ba8416963": "How does Facebook's moderation policy affect discussions about racism, according to Jessica Guynn's article in USA Today?", "d5c902d5-6803-43ce-9cc7-e41da57f53fe": "What does the term 'Zucked' mean in the context of Facebook users discussing racism?", "b810fe11-8639-4aa1-9680-e9aed0a7f23f": "How has the recommendation of the OECD Council concerning privacy and data flows evolved since its revision in 2013?", "cf8d9d77-d12d-4feb-affb-6668a8f6b2ce": "What are the criticisms of Facebook's handling of hate speech and discussions about racism as reported by USA Today?", "d47a2d26-34fd-45b5-b3ae-e8dfe20c77ac": "How does the 2016 update to Circular A-130 impact federal agencies' information management practices?", "345ce069-495e-49b5-af9c-c3149193f72c": "How can new technologies help reduce health disparities?", "74332ba4-7dba-484a-b7f3-db3c9b6c6955": "What are the potential risks of relying on technology for healthcare delivery?", "8480a6c9-125b-435b-87a1-ffad65340e1f": "How can policymakers ensure equitable access to healthcare technology?", "d4afcb88-ecf5-4717-9b29-d9b4cd68121b": "What role does artificial intelligence play in improving healthcare outcomes?", "468af870-6b02-4fa5-9205-37a33e43b63c": "How can healthcare providers integrate new technologies into their practice effectively?", "12891465-101e-4897-a86f-046d85706186": "What are the ethical considerations of using technology in healthcare?", "14c6eb6a-036f-4f26-bb80-a075fd2979a1": "How can technology improve healthcare access in underserved communities?", "861a1a80-5a7a-4b03-b86b-e6d7b357b14c": "What are the most promising areas for research in health technology?", "beb717de-9182-47e4-82bf-c27fde4a43fa": "How can healthcare systems balance technology use with patient privacy concerns?", "840b62a5-c2ca-45e6-9793-40fed59c838d": "What impact has telemedicine had on healthcare delivery during the COVID-19 pandemic?", "bd65a985-6adf-4ca0-800a-2f5e4eaf9313": "What are the best practices for monitoring AI-generated content for privacy risks?", "d1bdd54c-5d9d-4677-b5ac-065fd4b4e8e8": "How can organizations address instances of PII or sensitive data exposure in AI-generated content?", "e937f160-66b0-4289-83b1-2a5616b701ae": "What processes should be implemented to respond to potential intellectual property infringement claims in AI?", "a9f3fd07-2b06-4abc-9bae-9cb5c884420a": "How can new GAI policies be integrated with existing IT governance and legal compliance activities?", "00c694b6-964a-4ff8-9c00-082579e3705e": "What are the key components of effective data curation policies for AI training data?", "0d2d3927-99e8-44c3-aec5-08fdc4fc2fd1": "How can companies ensure their AI technology does not infringe on third-party intellectual property rights?", "227007e5-7052-4b42-b7b0-b914bcbe185f": "What are the legal risks associated with using third-party data or software in AI development?", "262df558-56e7-445f-9a93-ad716c929f95": "How often should organizations conduct monitoring of AI-generated content for privacy risks?", "b6f4c293-115e-4811-a3aa-9a9b8ddf6f11": "What steps can be taken to document AI training data curation policies according to applicable laws?", "46fd166c-6f0e-4c3f-85c3-b30f0a1d53a0": "How can information security be maintained while integrating new GAI policies with existing systems?", "560ee295-11c4-41df-a82c-d7e7f0f44386": "What is a performance baseline for an algorithm, and why is it important before deployment?", "66e50da4-fea8-4795-8a27-663c0cde6f02": "How can human performance be used as a lifecycle minimum performance standard for automated systems?", "4d449176-2653-4092-8b2f-4f047ac02fb5": "What are the decision possibilities resulting from performance testing of an automated system?", "b8a080cb-dbf5-4ca7-b5a4-5c84f821b1ef": "Why is it important to consider the possibility of not deploying an automated system after performance testing?", "cde58352-24e9-4fc4-aae0-9d41f4d197d6": "What are the key steps in identifying and mitigating risks before deploying an automated system?", "3426f11e-e5f4-418e-b97a-01f241a68b12": "How can potential risks of an automated system impact people's rights, opportunities, or access?", "f3e038cf-7997-4724-8e6b-99c701363442": "Why should risks to impacted communities that are not direct users of the automated system be considered?", "d730bbe4-b1d4-48dc-9ae6-737485ab0e96": "What are some examples of risks resulting from the purposeful misuse of an automated system?", "e4aae9a3-d377-43f0-b0b7-61cca8c61714": "How can the consultation process help in identifying concerns related to the deployment of an automated system?", "4237e65b-dc5a-4002-b88b-cd45f37bc5f7": "Why is it important to measure the impact of risks and balance attention towards high impact risks?", "a824bf8c-fd69-4e1b-a7fa-d8a0e946b3e3": "What are the key factors to consider when determining the expected and acceptable GAI system context of use?", "305a21f4-80f2-4d2b-9d75-6745caca88fb": "How can organizations assess the potential positive and negative impacts of GAI systems on public safety and democratic institutions?", "d4751075-70d8-4ec0-ae48-12518f492e05": "What are some common assumptions and limitations that need to be documented for GAI systems?", "9a023a00-7a74-40a4-9616-0444aeadb0bb": "How can socio-cultural and domain experts contribute to the assessment of GAI systems?", "01f67521-6c20-4fd8-8434-616c85133c1e": "What are the potential risks associated with individual and group cognitive biases in the design and use of GAI systems?", "2fd099a7-bb3b-4ae7-97cf-d4a44e7b14fd": "How should organizations document risk measurement plans for GAI systems?", "c09872bc-2fe0-44c2-92ff-4737772a9980": "What are some known past incidents and failure modes of GAI systems that should be considered in risk assessment?", "01e6920d-c467-4660-aeea-b58e1ad39cbf": "How can organizations address the issue of over-reliance on quantitative metrics in the evaluation of GAI systems?", "1ef08275-0345-43e2-87d8-2c0014a954c9": "What are the potential consequences of harmful bias and homogenization in GAI systems?", "5d1ea4e8-03c0-4918-a4b0-ee3fb7de061a": "How can organizations ensure that GAI systems align with social norms and expectations?", "f15a27f7-7498-4ada-be1c-3cac898dc341": "What are the key expectations for automated systems in terms of notice and explanation?", "b3491e6c-e129-4aa9-81b0-cb27a5390728": "How should an automated system provide notice of its use?", "02f41084-6947-4e5d-a014-2701aa7a8524": "What type of documentation should be provided for an automated system?", "37901011-f77e-472d-bc43-758d35691417": "Why is it important for the documentation of an automated system to be in plain language?", "d152bc28-9605-4041-84f0-75278caf5c62": "Who is responsible for ensuring that the documentation of an automated system is accessible?", "29842944-f55f-426a-b9fe-f93af91ad99a": "What should the documentation of an automated system include?", "8eb02857-2af2-4998-a936-ddb76db5f513": "How can an entity ensure that the documentation for an automated system is easy to find?", "7bebfe19-175e-4123-abe1-e08b0c2d683d": "What are the benefits of providing clear and understandable explanations for automated system decisions?", "98de5d6b-fc75-4dd6-90ed-3be730aeab0c": "How can automated systems ensure that their actions are transparent to users?", "45c83246-49f0-4517-8449-22f61e1ac6a2": "What role do human components play in the documentation of automated systems?", "d0176225-348b-4c83-ab7d-82277925e6e9": "What is the Blueprint for an AI Bill of Rights?", "f75dba8f-4254-4fad-b3ba-5ce51b3065e9": "How do AI and data-driven systems impact Indigenous communities like Tribes and Clans?", "d6be5ece-a538-4e8a-a374-7789a34d34c8": "Why is the concept of community integral to the Blueprint for an AI Bill of Rights?", "a1d04dee-74ab-4d43-8717-6e2684c5670b": "How does United States law currently protect the rights of individuals against AI harms?", "48ff1eaf-6834-4862-8e80-624bb71169e7": "What challenges exist in protecting communities from the effects of automated systems?", "7b9de209-f9fa-4ef0-8ced-8969ee2ae73c": "How can the harms of automated systems be evaluated at the community level?", "a2d29100-ebeb-4076-9ddf-1f64b365290a": "What are some examples of formal organizational ties mentioned in the context?", "fd57bf95-f994-4cf5-b914-af75dc140b11": "Why might the impacts of AI be more visible at the community level than the individual level?", "4eae01bd-26ed-4b64-9606-abf42d1039e7": "How does the Blueprint for an AI Bill of Rights propose to redress harms caused by AI systems?", "c93c5ccd-cd7b-4a31-b8fe-2730399bf76e": "What are the potential benefits of evaluating AI impacts at both individual and community levels?", "8e93918d-d152-4bda-a527-51eb1e4a563a": "What are interpretability and explainability methods in AI systems?", "8bd0ab3a-519e-4f8b-bd09-52e086f06ab1": "How can we evaluate GAI system decisions for alignment with their intended purpose?", "9ea19168-923a-4776-b043-9cc4c8d33b84": "What is the importance of monitoring and documenting instances where human operators override GAI decisions?", "48f1823c-987c-43b8-ba09-2f7f30c39478": "How can content provenance issues affect GAI system decisions?", "69904a33-e3bc-4725-a589-ae8903083507": "What are structured public feedback exercises in the context of AI system design and deployment?", "1ff81582-703f-4f1c-9591-b190d995ac44": "How should the results of public feedback be incorporated into AI system decisions?", "70b48d78-cf62-458d-9cc8-cdee5b6b30ce": "What are the roles of AI deployment, domain experts, end-users, and operation and monitoring in AI systems?", "3cbba8c5-608e-41ba-acc1-e266269a8d04": "What is the significance of verifying information integrity in AI systems?", "c0dbcb52-583f-455e-ae5b-a4baefe89ffb": "How can harmful bias and homogenization be addressed in AI systems?", "022fb6ca-e847-4f32-a2d8-a01f772f1c21": "What are the key considerations for making \"go\"/\"no-go\" decisions in AI system deployment?", "776a11f6-1df2-463f-8f5e-9ab8c838c2ed": "What is the role of the Connected Health Initiative in advancing healthcare technology?", "b183d8c6-087c-4de9-9b29-7b89974ffa14": "How does the Consumer Technology Association influence tech policy and innovation?", "611c347f-fd3b-498b-87ff-095aa469d501": "What contributions has Courtney Radsch made to digital rights and online freedom?", "ccd936e2-e89b-41c3-9b5d-4c171d9f1eb6": "What are the primary research focuses of the Data & Society Research Institute?", "de9aab5b-822b-43a9-97f2-e9d6959e33ba": "How does Data for Black Lives use data to address racial disparities?", "daa39ee8-a5c1-4bef-bb7a-eadbcc70f33b": "What projects are currently being undertaken by the Data to Actionable Knowledge Lab at Harvard University?", "cc6d42a1-429f-4ea8-a0c8-481571c586ea": "What services does Deloitte provide in the realm of technology consulting?", "1fa9c10e-8e74-472e-98c1-62ee3c81305f": "How does the Digital Therapeutics Alliance support the development of digital health solutions?", "3d8c9a00-451d-4097-95da-7db467588dc5": "What is the mission of the Electronic Frontier Foundation in protecting digital privacy?", "d5fd9d65-12bb-4f39-a0f1-3cb54d012fe7": "How does the Electronic Privacy Information Center advocate for consumer privacy rights?", "cb0bf5f3-38b6-4eec-81ff-530a778e0328": "What are the key findings of Smith et al (2023) regarding the use of neuroanatomy as a metaphor in large language models?", "207c0258-6572-48ec-ab8d-8f6558681013": "How do Soice et al (2023) propose that large language models can democratize access to dual-use biotechnology?", "eba2a62c-de0b-4649-8f1e-438799536f1a": "What methods and considerations are discussed by Solaiman et al (2023) in \"The Gradient of Generative AI Release\"?", "015148ae-410b-409f-a89d-73d1a425a37d": "What privacy concerns are raised by Staab et al (2023) in their study on violating privacy via inference with large language models?", "b4463b2e-409a-48a2-9a74-455373d37285": "According to Stanford et al (2023), whose opinions do language models reflect, and what implications does this have?", "6ae4fc70-185d-4aae-bc07-faedd4dbae8e": "What are the energy and policy considerations for deep learning in NLP discussed by Strubell et al (2019)?", "f67fd894-f032-407f-8771-8f564f4f9ca2": "How does the White House's Circular No A-130 (2016) relate to managing information as a strategic resource?", "ae278041-1973-4238-aa7f-a6cac1551a8c": "What are the potential risks of hallucination or confabulation in large language models as discussed by Smith et al (2023)?", "f6cbd86e-2f15-4311-9c83-e407554ff032": "How might the democratization of dual-use biotechnology through large language models impact society, according to Soice et al (2023)?", "38192107-9ce8-4cda-b34a-cc111e33be8a": "What are the ethical considerations in the release of generative AI as outlined by Solaiman et al (2023)?", "af1bb742-8ec7-4495-9141-a44df3bda1a0": "What is the Jigsaw Unintended Bias in Toxicity Classification competition on Kaggle about?", "379fa7b3-02e9-4abb-a67d-f5611f67453f": "How does the paper by Lucas Dixon et al address unintended bias in text classification?", "938ae17a-7800-4fa0-9d13-64d885add576": "What are the key findings of the AAAI/ACM Conference on AI, Ethics, and Society paper by Lucas Dixon and colleagues?", "c1a34901-9205-4c40-81b9-92d9bccfe577": "How has Google reduced racy search results for terms like 'Latina teenager'?", "fcd299fd-aad2-49b8-9fd4-d6d51e7d6d06": "What impact did Google's reduction of racy search results have on search outcomes?", "8fe9af88-6425-4dce-ba89-5f8170af3a50": "What are the main arguments presented in Safiya Umoja Noble's book \"Algorithms of Oppression\"?", "dbf83cba-a1ec-4513-a5ee-dbb91d87f238": "How do search engines reinforce racism according to Safiya Umoja Noble?", "07560e9e-9c12-44bd-8b3e-a2aa039ef9bc": "What measures has Google taken to address bias in its search algorithms?", "773283eb-6b5a-4256-8ddf-8f255df3f593": "How effective are Google's efforts in mitigating unintended bias in search results?", "d57e3ef2-9ba0-4083-91e5-bbbc46ea63c6": "What are the ethical implications of unintended bias in text classification and search algorithms?", "ae1eb349-2033-4722-ae50-edd77d230dee": "What are the expectations for automated systems in sensitive domains like criminal justice and health?", "f123f8ad-0f9d-473a-a63f-f10e7686745d": "How can human oversight be implemented in automated systems to avoid discriminatory impacts?", "61295097-606f-4249-968f-47372578c40c": "What safeguards should be in place for automated systems used in employment and education?", "663dd7a9-c1b9-432e-8fbf-ea8ec15a775b": "Why is it important for automated systems to have narrowly scoped data and inferences in sensitive domains?", "a0a14c6f-86f9-4164-99b9-4db3716b659e": "What are some examples of inappropriate impacts that automated systems should avoid in sensitive areas?", "5b5f3867-1803-4238-af24-bd788829ee4c": "How can technical standards and practices be tailored for specific sectors when developing automated systems?", "5e60b645-855e-47df-8ee9-48b1a1a83efd": "What role does human oversight play in ensuring the proper functioning of automated systems in sensitive domains?", "7e2af9b3-68e7-46c0-9213-acdc636f4273": "How can automated systems justify each included data item or attribute in sensitive domains?", "3176e372-80fb-44f7-ba7b-210022be925b": "What are the potential risks of using automated systems in criminal justice and how can they be mitigated?", "dc35ee01-7ac3-4ccf-8b45-14b5aa5a6c9e": "How can automated systems be designed to meet the expectations laid out in the framework for sensitive domains?", "d599c961-25c4-4fee-8ed1-d5c62a73647f": "What is AI red-teaming and how does it work?", "73118592-ad30-419d-b6b2-3dd0d9384da9": "Why is it important to have large groups of AI red-teamers?", "e5807faa-9a9f-4510-807d-c4553fcd9780": "What are the benefits of having domain experts in AI red-teaming exercises?", "d70f0a08-4229-4e3c-96e1-75b97e2bd955": "How can AI red-teaming be applied in the field of cybersecurity?", "e56fdc33-7ce6-4293-9d14-efd06aa134c7": "What challenges are faced when recruiting specialists for AI red-teaming?", "e3954bfb-ba3d-4cae-bbcf-993ccb500b97": "How can a combination of experts and non-experts improve AI red-teaming exercises?", "1bbd547f-2198-4ec1-ae87-5a5ac9ebc2d1": "What specific skills are required for effective AI red-teaming in medicine?", "54655f06-a110-44d5-84da-2b618c017d76": "How do AI red-teaming exercises help in identifying harmful model behaviors?", "775f37c2-5451-467c-8c55-81cbdf42dc44": "What are some examples of harmful behaviors that AI red-teaming aims to prevent?", "6092bd99-29ad-4e06-bb27-ad1ae9006a58": "How can AI red-teaming be beneficial in the biotech industry?", "bc600deb-fbcd-400f-9140-0d915fcdaa91": "What are the limitations of current pre-deployment TEVV processes for GAI applications?", "2aef734a-ac35-429c-a7ed-45f4bc41a30b": "How do organizations measure performance and capabilities in pre-deployment testing for GAI?", "14c90494-2295-4e90-ae42-73e7ae4a413c": "Why might anecdotal testing through video games be inadequate for GAI systems?", "cb928351-26a1-491c-9262-10297fb46ede": "What are some recommended \"pre-deployment testing\" practices for GAI?", "90e2f5ef-ab38-4486-ac93-f172d2500a85": "How do current pre-deployment TEVV processes fail to reflect deployment contexts?", "d860db03-3ac9-4ee9-8e9d-68730a2dbd6e": "What are the risks associated with using standardized tests designed for humans on GAI systems?", "7629f220-d4f3-493d-8160-89a311a495a7": "How can organizations improve the validity and reliability of GAI system testing?", "e54dbc2a-f5cc-47d4-b7f5-2cdad36a161d": "What is the role of risk measurement and estimation in pre-deployment TEVV for GAI?", "864610a5-0a37-4ef4-98d4-1b6d7d32f886": "Why might jailbreaking or prompt engineering tests be insufficient for assessing GAI systems?", "8fc6576e-192f-4f3b-af15-d1c21b908da9": "What are the state-of-play methodologies for pre-deployment testing of GAI systems?", "9bec12eb-4fa7-477e-a667-fd165c16f38d": "What are the key principles of the Privacy Act of 1974?", "ce2ec10e-da61-4462-b06b-0aa052282a80": "How does the Privacy Act of 1974 limit data retention in federal records systems?", "782d7b57-c109-49a4-aee8-fce11007be0d": "What rights do individuals have under the Privacy Act of 1974 regarding their personal information?", "04329baa-7e91-421f-b129-dcab9672ade8": "How can federal agencies determine what data is \"relevant and necessary\" under the Privacy Act?", "f84ca6ea-58d1-48f6-a5a1-cd8ef9f4658f": "What are some real-life examples of laws that protect data privacy?", "f2ce84d8-0c07-4e0e-9bcf-3aaecc8dc8a2": "How do technical approaches help in protecting data privacy?", "366462e4-8d9e-40ee-b30d-f4817377ba05": "What are sociotechnical approaches to data privacy, and how do they work?", "e8433dc8-3bc8-4eee-9a40-71e4005f3386": "How can policies be designed to ensure data privacy in practice?", "ead8c5d0-e517-4da2-b4b7-885f6c6031b4": "What are the limitations of the Privacy Act of 1974 in protecting personal information?", "7f75740d-9d77-4338-9f3a-78ab85ae1906": "How can individuals access and correct their data under the Privacy Act of 1974?", "caeaec67-6507-428f-8b4b-41751a7af496": "What are the common data privacy violations in system training data?", "2987bea3-66c4-4ccc-92fb-5c073f60fd97": "How can organizations address intellectual property concerns in AI training data?", "48581f7f-03f6-4ee0-abe0-09bdb7f54800": "What measures can be taken to prevent obscene or degrading content in AI outputs?", "66f09013-2126-4c82-bc9a-c3a11320b3d7": "How can harmful bias and homogenization be mitigated in AI systems?", "a7a8bc83-2e72-4a24-a793-f1b816f0c283": "What are the risks associated with dangerous, violent, or hateful content in AI-generated outputs?", "cfb4d915-c088-4618-925e-02e974b919bd": "How should organizations re-evaluate safety features of fine-tuned models when risks exceed tolerance levels?", "ae31cf33-efff-4297-a26b-ea5db0b92c1c": "What steps should be taken to review GAI system outputs for validity and safety?", "c8138f61-21b5-4802-ac6d-f18233410054": "How can organizations ensure that generated code does not lead to unreliable downstream decision-making?", "0f1e8318-43b1-4523-8576-17ceb1fc8061": "What are the best practices for verifying that GAI system architecture can handle and recover from security anomalies?", "25ff123c-f46a-4669-9903-16149a4ec2d2": "How can confabulation and information integrity issues be addressed in AI systems?", "1e6c80b3-721c-45ae-88a6-cf0f8ced7156": "How can organizations effectively leverage feedback from boards or committees when deploying GAI applications?", "df6054be-5a75-416a-9eaf-0be1a05ee1d6": "What are the best practices for using human moderation systems to review AI-generated content?", "288339c7-c00a-4dd4-aa6b-e2002ba889ce": "How should organizations align human-AI configuration policies with socio-cultural norms?", "deaad193-bda5-46ff-afa9-72206425c873": "What criteria should be used to evaluate the performance of pre-trained AI models?", "2d47d0ae-c76d-4ae5-a638-a6517c6220ba": "When is it appropriate to decommission or retrain pre-trained AI models?", "76fab1b0-0e24-4c35-a7ed-8d49cfe2720b": "How can organizations determine their risk tolerance for AI deployment?", "495b8ac6-a421-46e9-a6c3-985e29b71272": "What are the key considerations for integrating third-party pre-trained models into an organization's value chain?", "9326e08e-8852-4d72-a983-7e09ff35af6e": "How can human moderation systems be configured to handle AI models that perform poorly?", "a4b17ec5-fc35-40d7-9721-d18501274964": "What role do organizational boards or committees play in ensuring information integrity in AI applications?", "9bc8756b-e0e6-4b2c-8ff9-a26618ca918d": "How can organizations monitor and manage the risks associated with AI deployment and operation?", "fc3b2287-133b-4a7d-beb3-436fe108fc3f": "What are the best practices for reporting AI system errors and near-misses?", "b35cb75c-71fc-4af1-9c57-c5ae6ccd8cb3": "How should incidents involving AI systems be communicated to regulatory bodies?", "381e4eab-6b5b-4ca7-8571-31aba3dfe943": "What policies should be in place to track negative impacts of AI systems?", "08bba123-7cf2-4473-830e-9c28dc6dbce4": "How can organizations ensure information integrity when dealing with AI confabulation?", "ff25d5f3-7350-498b-8341-45dae4e79977": "What are the legal requirements for reporting AI-related incidents?", "8ae5b2b8-aeff-4c5e-aa72-6924b2fc4dd3": "How can companies establish effective procedures for recording AI system errors?", "c1f383d3-687b-4a57-a98a-c2f2212368bf": "What steps should be taken to maintain information security in AI systems?", "a8540ee7-471f-42c8-8ff5-4d74af3e9e84": "How do you handle confabulation in AI systems to ensure accurate information?", "abee8f0e-95b6-4a66-9584-2fa2c87344cd": "What are the key components of an incident communication plan for AI systems?", "86a7086e-9edd-4247-96a7-f4bd37a9d20e": "How can organizations track and mitigate the negative impacts of AI systems?", "582fa583-c146-4227-a529-e7428ecadbd5": "What are transparency artifacts in AI, such as system cards and model cards?", "f5ab7c6f-0d8c-4379-a524-ea0abad40378": "How do transparency artifacts help in managing third-party AI models?", "62dbacb4-0a6c-437d-b130-68d38e4fb182": "What is the importance of monitoring pre-trained models in AI systems?", "a8ba6c23-7fb6-47bc-8dcd-4242dca46afe": "What are some techniques used in explainable AI (XAI)?", "495f990c-cfe1-4bd2-99e7-728aaca889b1": "How can explainable AI (XAI) techniques mitigate risks in AI systems?", "dbf021da-491b-4d03-b6c9-7641362406aa": "What is model compression/distillation in the context of explainable AI?", "f2e9d547-10c8-470f-be55-7919d3b9fc76": "How does gradient-based attribution work in explainable AI?", "333ba50b-1816-4a01-9ec5-0dc8942bd96e": "What is the role of counterfactual prompts in explainable AI?", "f743f398-d354-4757-a791-669763758105": "Why is it important to document adaptations of pre-trained models?", "493410a8-8da9-4c05-af25-02344ce5fb36": "What are the risks associated with unexplainable generative AI systems?", "d43b5165-b424-4efb-8ac5-c70839068a9b": "What are the key components that should be included in the reporting of accessibility evaluations for sensitive domain systems?", "e745cebf-169d-42f5-a769-451d4e947df1": "How should training and governance procedures for technologies in sensitive domains be documented?", "48cc0ef1-0635-4131-ad9d-a75b6f87ba6c": "What kind of information should be included in the documentation of goals and their assessment?", "77a87b11-faf6-450d-bc8c-8fc3bb450471": "Why is it important to consider the data included in the reporting of accessibility evaluations?", "32dbfbc4-6fc4-4307-879f-1a14277108ac": "How can the governance of reasonable access to technology be documented effectively?", "d129f587-db8c-42b2-b15b-f48854e829b5": "What are the benefits of providing reporting in a clear and machine-readable manner?", "595e4aba-2967-46e1-9276-4619eaf23ee3": "What are the best practices for documenting training procedures for sensitive domain technologies?", "70ad54f1-132d-4b05-b0d5-ec776fc6a092": "How can organizations ensure that their accessibility evaluation reports meet the required standards?", "e782da3f-efb4-4a2b-952d-815b7704c1c2": "What challenges might arise in documenting the governance of access to technology?", "d7780233-e970-44f8-941c-d04776c461f0": "How can the assessment of meeting goals be effectively reported in accessibility evaluations?", "411f2cb4-761f-4481-acc9-3379b98be822": "What are gradient-based attributions in the context of GAI risk measurement?", "aff655e4-7805-4005-96dd-97e57a4ed1f6": "How does occlusion/term reduction help in improving the transparency of GAI systems?", "b2cca45d-920c-4cfe-81d7-5dfa18ea4ac3": "What are counterfactual prompts and how are they used in GAI risk assessment?", "c834f9d2-569f-4a49-aecb-ae1382201dda": "Can you explain the role of prompt engineering in mitigating risks associated with GAI systems?", "15467914-2915-4097-a975-a2b7a2bf6e0d": "How is the analysis of embeddings useful in measuring GAI risks?", "7233a36e-44dd-4945-bdca-92ff30a47933": "Why is it important to assess and update risk measurement approaches for GAI systems regularly?", "22334989-4055-455c-a95e-7e517df2a381": "What are the benefits of using standardized measurement protocols in GAI risk assessment?", "d818454e-443f-4926-8398-9edbf7ce1b23": "How can AI red-teaming contribute to the risk measurement of GAI systems?", "895f4711-def6-4b59-833c-f71a070facef": "What is the significance of independent external evaluations in the context of GAI risk measurement?", "c4d00e1c-836a-4ceb-973d-e0df3779b23f": "How do policies, procedures, and processes help in detailing risk measurement for GAI systems?", "1057d1c5-3ac4-4d8c-9193-566dac0f5640": "How do new surveillance technologies in education disproportionately harm disabled people?", "8c3f757b-977d-4050-b35d-aea49c010cf6": "What are the impacts of surveillance technologies on disabled individuals in the workplace?", "1468fc8a-bccd-4e2e-a99c-fd18b4bd12c2": "How does policing with new surveillance technologies affect disabled people?", "ff6dc571-3da6-4cb6-ab29-a41494392ed0": "What are the specific ways in which health care surveillance technologies harm disabled individuals?", "5e523726-a9d1-4807-9295-3881cd46f457": "How can we mitigate the negative effects of surveillance technologies on disabled people in various sectors?", "83c676ae-600e-4c27-98f4-24e784b1d7e9": "What are some examples of ableism in new surveillance technologies?", "c2b9fec3-5182-456a-8d05-894e91037f8d": "How does the Center for Democracy and Technology suggest addressing disability discrimination in surveillance?", "0da8dd83-5ddb-4544-a4ca-0fe64ff97e5d": "What are the ethical concerns regarding the use of surveillance technologies on disabled people?", "3d03cf3e-8e29-4424-b5ee-db3ce2aef10c": "How can education systems ensure that surveillance technologies do not harm disabled students?", "9466ba73-2db9-4dbb-a45c-9cf5fc4b9233": "What policies can be implemented to protect disabled people from the harms of surveillance technologies in the workplace?", "c8753999-1abf-4fe0-b602-96c7badbe1b8": "What is the main argument presented by Tirrell in \"Toxic Speech: Toward an Epidemiology of Discursive Harm\"?", "b146064b-3808-4dc7-ae9c-1fd32c51462d": "How does Tufekci address the challenges of computational agency beyond Facebook and Google?", "26296ada-303b-4757-99c1-d0b666b5ebe6": "What are the key findings of Turri et al in their study on AI incident documentation practices?", "22a9a1b9-5625-453a-8f38-7e6e1faece16": "What concerns are raised by Urbina et al regarding the dual use of AI-powered drug discovery?", "456281a3-2fc9-42d0-aaee-b9e644b01898": "How do Wang et al evaluate the energy and carbon considerations of fine-tuning BERT?", "d0fe0b08-146c-467e-8042-9160ccec66de": "What is the purpose of the \"Do-Not-Answer\" dataset introduced by Wang et al in their 2023 study?", "1c65bc00-7698-4aac-ac75-f4cf0a56d7d7": "How does the concept of discursive harm relate to toxic speech according to Tirrell?", "3b23f18c-6948-482c-ad4b-4094da577801": "What emergent challenges of computational agency are highlighted by Tufekci in her 2015 paper?", "4474dccd-e072-4606-bf97-01a59797d81c": "Why do Turri et al believe it is important to improve AI incident documentation practices?", "bc98f064-db21-492e-8cbe-6bb73bfc8422": "What are the potential risks associated with AI-powered drug discovery as discussed by Urbina et al?", "660c7ba7-9614-4425-afbe-8e9230f9e5a3": "What are the main risks and harms associated with automated systems discussed in the panel?", "3d79fef1-7ec1-4629-8dbb-c4c88e55354f": "How do automated systems impact consumer rights and protections?", "19a6f91d-d99b-479f-974f-60046204684a": "What insights were offered regarding the use of automated systems in the criminal justice system?", "926d89a3-3c21-4df7-9dfd-ced249915ecb": "How can automated systems promote equal opportunities and civil justice?", "8aee8607-430b-469e-86d5-4232adc7e194": "What are the policy opportunities for regulating artificial intelligence to align with democratic values?", "f7611088-a63f-4ae9-a027-929e84a6f966": "How do automated systems affect social welfare and development?", "93b98b07-e59b-4886-bc4c-6b487d0c9dc7": "What are the benefits of automated systems in the healthcare system?", "47a623c2-1493-45ff-82a5-c288ad0a01c7": "Who were the key experts and practitioners involved in these panel discussions?", "b42c870c-4507-4740-8c36-662932a8b9fc": "Are there any specific case studies or examples mentioned in the discussions about automated systems?", "38e32513-a3a7-4720-96e6-35e11ae830b5": "How can the public access the recordings of these panel discussions?", "1c8ce2b7-1996-402e-b151-319c407a4805": "What is the purpose of the framework developed by non-profit organizations and companies for machine learning systems?", "0ac24fc9-606c-4eaa-8576-5bafe235903d": "How does the framework for machine learning systems go beyond simple notice to inform the public?", "66d10b11-4e1c-4323-90e6-162723085f38": "What are some of the reporting elements included in the framework for machine learning systems?", "be510be5-869b-457f-8999-878fdb0b09ba": "What federal laws require lenders to notify consumers about certain credit decisions?", "e5d619fa-49b5-435a-adc3-565acb71c415": "What is an \"adverse action\" notice in the context of credit reporting?", "c5c5e007-9f35-46e7-aa48-ce8e0f608d5b": "Under the Fair Credit Reporting Act, what must be included in an \"adverse action\" notice?", "5437738f-c791-44c0-bb4c-acc4b6d2ff09": "How do the Fair Credit Reporting Act and the Equal Credit Opportunity Act protect consumers?", "cfe9c2ed-5bf9-478c-89c7-794e93148715": "What are disparity assessments in the context of machine learning systems?", "48425d12-7cb9-4383-b947-e6e9566b9e7e": "Why is transparency important for machine learning systems used by companies and non-profits?", "692bc9da-55ae-42fc-b205-f3bafca5a068": "How do safety evaluations contribute to the transparency of machine learning systems?", "d7c50441-1bb2-4a13-a9b7-608f14fd60fe": "What are the key actions taken based on the results of independent evaluations?", "614fef06-0fe5-4830-8050-73ba9a266f5a": "How can I ensure that my reporting is in plain language and machine-readable?", "1a1246aa-f540-4d9b-970a-69324e141925": "What procedures are typically followed during independent evaluations?", "c5c03047-1888-498b-8f37-9d697a2d08d0": "Why is it important for reporting to be in a machine-readable format?", "0b6f15fa-f4eb-4918-81c3-03c0a661c2c6": "What are the benefits of using plain language in reporting?", "69197eb3-9af4-4e2f-a207-c4e672d92d0c": "How do independent evaluations impact decision-making processes?", "018142f3-7555-4ee1-9679-91e57ae8774f": "What tools can be used to create machine-readable reports?", "fb522bed-6381-4de1-8e9f-51b4ea153f04": "How often should independent evaluations be conducted?", "1cb575b3-9f7b-4da7-8af1-f027dd6c2a12": "What are some common challenges in making reports machine-readable?", "433a4b30-f091-441a-af1e-ce2dcc4d8afb": "How can I improve the clarity of my reports using plain language?", "1437e5d7-0d57-47c3-a4a7-051a8d7ff1b3": "What are the best practices for allocating risk management resources in the context of GAI use?", "ad5327c9-e60c-470b-8f4b-f68f13ef62c3": "How can organizations effectively assess the severity and likelihood of negative impacts from GAI?", "e37a9ed4-73f7-42c6-8260-88f6a0063f99": "What are the differences between model-level and use-case level risk mitigations in GAI?", "f2c16031-33cc-4d57-989d-4964ca15cdae": "How can unknown GAI risks be scoped or evaluated given the uncertainty about its scale and capabilities?", "118530e2-7547-4cc3-ad5b-6568ecae31c4": "What challenges do organizations face in estimating known GAI risks?", "c573b1a0-67f0-492f-bcc7-67d803c58308": "How does the lack of visibility into GAI training data affect risk estimation?", "e343ee0e-f5b4-42b5-bc79-b18001db5dc5": "What are the current limitations in the science of AI measurement and safety?", "55a01f18-ee4f-4970-ac12-de3cd610e0b0": "How can empirical evidence be used to address GAI risks?", "0bd8ff53-8c08-4507-a5f3-550b439e5efc": "What are some examples of speculative risks associated with GAI?", "bb56381e-85cc-43fb-8e88-7117f7aacd52": "How can stakeholders manage the wide range of inputs and outputs in GAI to mitigate risks?", "a975c7d1-2ee6-4b57-849b-847b4a68ac46": "What happens to former Apple employees' titles when employers verify their resume information?", "0611d75e-5339-4d6b-8f5d-9768777ec2f5": "Why does Apple replace former employees' titles with a generic title?", "afb164b5-c73e-4af5-984b-7ac0babf5ec6": "What is the National Institute of Standards and Technology's Privacy Framework?", "290e48c4-183f-4050-a61b-d3d835bca368": "Where can I find success stories related to the NIST Privacy Framework?", "6bb17fa3-0268-4f77-a5f6-77274be300b9": "What is the ACLU of New York's stance on facial recognition in schools?", "741f4783-a448-4014-a7f1-a230bb81f3da": "What should I know about New York\u2019s temporary ban on facial recognition in schools?", "46545ce3-f86d-4a59-89f3-76f23173b07e": "When was the amendment to the New York State Education Law enacted?", "9cc2b4c1-cc01-463e-bc84-67dd2629f727": "How can I access the text of the New York State Assembly's amendment to the Education Law?", "818ef24e-1ab9-460f-ab5c-12753804fd50": "What is the Labor-Management Reporting and Disclosure Act of 1959?", "5741db49-72b8-4e42-bb0c-8b664a4b6a54": "Where can I find information about the amendments to the Labor-Management Reporting and Disclosure Act of 1959?", "243360b2-c711-4a99-94ee-2d017ae0b586": "How can generative AI contribute to the spread of disinformation and misinformation?", "a6579796-6adc-43f8-b162-a104e18bfd58": "What impact did the synthetic image of a Pentagon blast have on the stock market?", "4e1b2a11-72d4-409a-bff8-53dbfd748eef": "How do generative AI models assist malicious actors in creating propaganda?", "867fc430-fe4c-420b-a096-016280fb19a5": "What are the characteristics of trustworthy AI?", "1c1161cf-f257-43b3-820c-89d59cf30c25": "How can generative AI be used to create fraudulent content?", "b06a86d1-7e66-456c-ac18-f879ce265907": "What are the potential downstream effects of disinformation facilitated by generative AI?", "78140874-f80e-4e3b-929f-f5a10c950800": "How can AI-generated imagery affect public trust in valid information?", "cffecf56-bb7c-43ce-ac11-2bc5285c2d7a": "What are the standardized practices for information security in computer systems?", "782f973d-10c8-4637-b58d-78028023f937": "How can generative AI models be made accountable and transparent?", "c1507631-a4af-4438-86d8-01fe4c764de7": "What are the challenges in ensuring the safety and reliability of generative AI systems?", "77d8a4e8-7880-4a14-9f78-6951755d3877": "What are the five principles identified by the White House Office of Science and Technology Policy for guiding the design and use of automated systems?", "31445acf-bbb3-4ba2-815f-8f4fc6c17277": "How does the Blueprint for an AI Bill of Rights aim to protect civil rights in the age of artificial intelligence?", "45302ae8-2e25-49cc-bf44-08b27c61b02b": "What role does the right to privacy play in President Biden\u2019s vision for civil rights?", "3bbac743-bb7b-4abb-901a-036782abe40d": "How can the Blueprint for an AI Bill of Rights reinforce the highest values of society?", "3c04f443-114b-4094-b7ea-0850a1205413": "Who contributed insights to the framework for the Blueprint for an AI Bill of Rights?", "5f301a21-1a6e-4fe4-bbc4-3ed0207e093c": "What is the purpose of the technical companion to the Blueprint for an AI Bill of Rights?", "117fc83e-30f4-4fe8-b500-30f64f143ed2": "How does the Blueprint for an AI Bill of Rights respond to the experiences of the American public?", "0cb33d22-ac9a-4005-af6e-aa9926c5be8b": "What are some threats that the Blueprint for an AI Bill of Rights aims to protect people from?", "bb631381-802a-4eaf-ad0f-69f0760683bd": "How can policymakers incorporate the protections outlined in the Blueprint for an AI Bill of Rights into practice?", "f58c6ed9-bdbc-40f5-a23e-b7a8a3b74b39": "What is the significance of the right to privacy in the context of automated systems and artificial intelligence?", "c4d1371c-ac60-4fbb-9bbf-22fd9a717c1f": "What is the role of the Innovation Foundation in the tech industry?", "fd3f2838-886a-4dc2-b79c-47708237c0d6": "How does the Information Technology Industry Council influence IT policies?", "1c90fdb5-fe0e-4f62-81ef-68f4f27e5611": "What are the main objectives of the Innocence Project?", "7d7082bc-3629-4c33-bf06-8d1de10409f5": "What research areas does the Institute for Human-Centered Artificial Intelligence at Stanford University focus on?", "b9df2f99-799c-46bd-a20d-5f8d2914b226": "How does the Integrated Justice Information Systems Institute contribute to law enforcement?", "a6631d31-0e87-473e-a319-90fcda9c1c3d": "What initiatives does the International Association of Chiefs of Police undertake to improve policing?", "d7d6e108-130a-4ab9-915e-d9ee5a20c635": "What are the key functions of the International Biometrics + Identity Association?", "b9ccc9b0-0028-4ba4-bbb6-5233720451b2": "How is IBM (International Business Machines Corporation) involved in AI development?", "609aa6d8-e41e-4fc7-83a5-2cf32901c497": "What humanitarian efforts are led by the International Committee of the Red Cross?", "2e407155-794d-4127-a539-3071466d8964": "What is the mission of the Lawyers\u2019 Committee for Civil Rights Under Law?", "b3a14e62-cbd5-446d-bce9-c73acd2baea8": "What are the main challenges in mitigating bias in AI systems?", "236f0306-90cd-49d3-8ec0-6a0cba6252a5": "How do datasets contribute to bias in AI?", "e08d701d-e244-404e-a35d-5e7a74b18eb5": "What are the best practices for testing and evaluating AI for bias?", "a0516a4f-2bd5-4c53-9ddc-49b6d709e2ef": "How can human factors influence bias in AI?", "18f7d41a-eacc-4b77-bff7-dd63adf0b0e5": "What is a socio-technical perspective in the context of AI bias?", "b1ff8e57-f9c8-4c9c-b80d-0b8cab8414d0": "What preliminary guidance is available for addressing AI bias?", "99396743-8ac7-4e6a-8174-ca002a8bc22c": "How can algorithmic discrimination protections be implemented in AI systems?", "e1119908-110c-41ef-a480-2c1db95e1206": "What role do datasets play in algorithmic discrimination?", "59a7e5e5-28fd-40e7-9eef-7a8c47988333": "How can we ensure fair testing and evaluation of AI systems?", "f6fccc6d-ae1d-4832-a6a1-6a02d216d0ef": "What are some strategies to manage human factors that contribute to AI bias?", "2ad0d5f2-d9ad-4172-9854-643c6a273f4a": "What are the ethical considerations for human subject experimentation in medical and scientific research?", "d378ac98-249d-4ce1-9350-5282c48770f8": "How can organizations ensure the quality of data in sensitive domains?", "7dee64d8-bf12-4366-bc43-ff71821a2047": "What are the consequences of using flawed or inaccurate data in decision-making?", "79cbd8a2-e147-44bc-8a55-26401ebfb951": "Why is it important to conduct regular, independent audits of data?", "5841722f-bf15-4fbd-a76a-b321b179aec4": "What measures can be taken to maintain accurate, timely, and complete data?", "6b8892ae-acb4-4abd-9d0e-befbaa39382f": "How should entities handle the burden of reviewing and correcting data?", "d7804551-0ff7-4e12-b264-7be2a008bed1": "What are the best practices for limiting access to sensitive data?", "f7d942c1-1e8f-484b-8edf-c8d0a64caf8b": "Why should sensitive data and derived data not be sold?", "42a9d92b-e27b-472c-ad56-07cb1140ffd9": "What governance procedures are necessary for human subject experimentation ethics?", "480733dd-d93f-4c4b-9b40-56e5910e831f": "How can organizations prevent adverse consequences from decision-making based on inaccurate data?", "7798df83-4529-4317-b89d-746485eb64a4": "What is the purpose of the yearlong process led by the OSTP?", "f695f50d-54d0-49c4-be89-203b99be0e61": "How did the OSTP gather input from people across the country?", "0daa74c8-7975-4a0e-ad33-5c760795c2d8": "What types of stakeholders were involved in the OSTP's process?", "28c42deb-d5d5-4709-b15d-3cea81950b3f": "What are some potential harms of algorithmic and data-driven technologies discussed?", "ae24e00b-3f03-4260-b4ed-5e6848c7864b": "How did public listening sessions contribute to the Blueprint for an AI Bill of Rights?", "8b794925-1544-465c-82ec-bc7f37298137": "What role did the Center for American Progress play in the panel discussions?", "605617f7-69f2-410c-b50a-c0974457693e": "How were impacted communities involved in the OSTP's process?", "47c40993-468f-4792-b50a-d2624c30ed04": "What is the Blueprint for an AI Bill of Rights?", "8f2a4098-0fb5-4199-b811-3a63eacf1485": "How did the OSTP ensure the process was inclusive of various experts and policymakers?", "ebef13ba-6679-4da6-92e5-0cf132673dba": "What were some of the promises of algorithmic and data-driven technologies mentioned?", "bc196094-f147-4147-8ab1-25cf7cad041b": "What are the key components of effective change-management controls?", "aa3c17a6-1a44-48e0-9bac-245822879aee": "How can businesses ensure data provenance in their commercial operations?", "d4eb8817-0f37-4caa-ab0c-ed5583f18564": "What are the best practices for implementing change-management controls in a commercial setting?", "74a83c47-6ea7-4e21-9325-9cd0a9b29316": "Why is data provenance important for commercial use?", "40cace51-c6f7-4263-909a-934539ab4313": "How do change-management controls impact data integrity?", "104aa783-0d34-44c5-bf81-967d2c43daf8": "What tools are available for tracking data provenance in commercial applications?", "e1f816cd-f3d5-4510-b7af-29babd74b1d9": "How can change-management controls help in regulatory compliance for businesses?", "561ce82a-df07-4979-adaa-2450c3b8fcda": "What challenges do companies face in maintaining data provenance?", "8868798f-7d64-4e60-bc77-56725a2a1c37": "How can change-management controls be integrated into existing business processes?", "8474f531-52b9-49b1-96e3-7e64a855cafe": "What are the benefits of ensuring data provenance for commercial enterprises?", "2f0ad5b7-bd5d-4662-a86c-d0ad96f67935": "What is the DeepNude app and how does it work?", "6a01a7c9-235a-4a09-9bd5-e38c1dbc4b94": "Why is the DeepNude app considered horrifying?", "b8db359d-0588-4011-aae6-c6838c31805d": "What are the ethical concerns surrounding the use of the DeepNude app?", "754f8572-6872-48c8-a9c5-c998b8091f62": "How has the public reacted to the release of the DeepNude app?", "eeb0bfdc-2e3d-4d88-b02e-9c4f33d462a8": "What measures have been taken to address the issues caused by the DeepNude app?", "5412af4a-850c-44fb-9945-95686cb9ad6a": "How do Amazon\u2019s AI cameras monitor drivers?", "6351f3cd-667a-4aad-857f-81df7b601da0": "What kind of mistakes are Amazon\u2019s AI cameras punishing drivers for?", "8dbba1cb-269d-4957-b4fd-308f00b91882": "What are the implications of AI cameras punishing drivers for mistakes they didn\u2019t make?", "fcbbdd00-85b9-4396-9b7a-5de417c1179f": "How have Amazon drivers responded to the AI camera system?", "4d05c32c-5866-4426-a745-2d2c554a5691": "What steps can be taken to improve the accuracy of AI cameras used by Amazon?", "7f590c18-47a2-438e-977a-cdd098e7fa86": "What are participatory engagement methods in product development?", "edde2af3-d243-4ef8-869b-bbc0626bc609": "How can focus groups with experts be used in AI development?", "541450fd-838a-4b58-94cc-20e3944691ea": "What is the role of small user studies in evaluating new products?", "b0e29ede-db9c-4382-943f-c21b4946bdb7": "How do anonymous surveys help in gauging reactions to specific features?", "e6832258-3b72-4cb6-a1d3-f906eb781da4": "Why are participatory engagement methods often used in the early stages of AI development?", "b6d68612-46b8-42cc-ab3e-31be5702199b": "What is the difference between field testing and participatory engagement methods?", "f46074c9-19da-4ec7-90c0-400d08cb34d4": "How can field testing simulate the conditions under which an AI system will be deployed?", "a8106443-688c-4550-a5fe-1c5ab81fcab9": "What are the benefits of using structured settings in field testing for AI systems?", "4b8e23f4-abf1-4143-93a4-47f53784794f": "How can field style tests be adapted to focus on AI risks and impacts?", "fb69db56-a88a-4b72-818e-8881601a2bc5": "What are the key differences between field testing and red teaming in AI development?", "b78a1120-785c-4e9b-adaa-d3f2d69f0561": "What initiatives has the Biden-Harris Administration taken to increase the number of Health Care Navigators?", "18726698-c2d1-4390-aa93-ba12c019bdec": "How has the number of Health Care Navigators changed ahead of the HealthCaregov Open Enrollment Period?", "05e386f4-de4f-4b24-911b-1719161ea196": "What are the key findings of McKinsey & Company's report on the state of customer care in 2022?", "3cd1a4cc-ca2b-4d1e-8ada-446a7c42cb20": "What customer service solutions are recommended for small businesses according to Business News Daily?", "182886ff-5412-4948-8295-701e0d7fbfe1": "How can small businesses improve their customer service based on Sara Angeles' article?", "37631ff1-592b-4472-94a4-2ce2caf8eafc": "What are the benefits of co-intelligence between robots and humans in customer service, as discussed by Mike Hughes?", "75e3d9c3-d911-48df-9ddb-8914d28e9008": "How effective are bots in customer service according to the Forbes article by Mike Hughes?", "421b5545-cea3-4524-bdc8-d3a22c30dd6c": "What strategies can businesses use to get the best out of their bots in customer service?", "83379563-52c0-4b59-b40b-f4418017f011": "What are the latest trends in customer care as reported by McKinsey & Company in 2022?", "d76b947b-8f88-4163-9e86-05a0d2e86981": "How has the Biden-Harris Administration's approach to Health Care Navigators impacted the HealthCaregov Open Enrollment Period?", "9bd4e2e2-cd2d-4535-9dce-679178b1e43c": "How can we reduce the environmental impacts of AI models during inference time?", "d3ba2ee8-7ef9-41ad-92b7-b27c7adbe4b0": "What are the challenges in estimating the environmental impacts of Generative AI (GAI)?", "787e31c9-98ed-4d18-a73c-6e891f3e92a6": "How do AI systems perpetuate harmful biases?", "0649769f-3bca-4cfa-8515-d227e06e7fe9": "What are some examples of biases in text-to-image models?", "83843f77-efa5-4c8e-b8ea-95ff293e2900": "How can AI systems be made more accountable and transparent?", "94bc029d-f508-4768-a427-507ea45dfe8e": "What steps can be taken to ensure AI systems are safe?", "679f0e14-0fda-4866-baa6-90cbd9892af0": "How do biases in AI systems affect different demographic groups?", "93f909c5-8ab0-445e-893d-16ef7d118607": "What is the impact of harmful biases on society when using AI systems?", "571ae548-9f61-4e1e-95a1-4357dbb1c8b8": "How can we address the underrepresentation of women and racial minorities in AI-generated images?", "30e3bc0e-5f22-4216-a2c0-f27f861b5634": "What are the consequences of biased or stereotyped outputs from image generator models?", "1b6a2ed0-d374-4c90-9b2f-26802ca2cc85": "What are examples of sensitive domains that require enhanced data protections?", "6c6d9102-09bf-4244-b8ba-21686bf078c4": "How do sensitive domains impact human rights such as autonomy and dignity?", "397d1460-bd65-4980-8b60-2ff58fcfc82e": "Why is health considered a sensitive domain?", "bc39145c-d1dd-4e69-a545-04944b0249af": "What makes family planning and care a sensitive domain?", "c567b788-ba2a-471d-8a61-f8acc8154f67": "How does employment fall under the category of sensitive domains?", "29011969-35fb-4185-9b93-65a5945bf067": "Why is education considered a sensitive domain?", "5f790da9-449c-41e8-865a-7f51afe460fc": "What are the implications of criminal justice being a sensitive domain?", "cd10682a-f510-462e-b688-e6038e4d2166": "How is personal finance classified as a sensitive domain?", "20c5ca9a-d0a2-4fa0-b02f-0f7351890b70": "How do societal norms influence what is considered a sensitive domain?", "7d213d18-b9dc-4003-bf54-21ec8dfb8397": "What is the role of surveillance technology in sensitive domains?", "575386df-5852-4b96-98b2-146127539141": "What are the established security measures to assess vulnerabilities and threats in AI systems?", "f0b324e0-dd60-4f57-b879-e56ece38360e": "How can backdoors and compromised dependencies in AI systems be identified and mitigated?", "5d3ecf42-2ce1-468b-9dbf-e4bbc5707389": "What are the best practices for benchmarking AI system security and resilience?", "e4d68479-8a79-4216-bc1a-4c8b156f3b37": "How do industry standards for AI system security compare to state-of-the-art methods?", "fc9490b3-396e-4ac3-95ef-80d13f6cf544": "What methods are used to ensure the integrity and security of AI-generated content?", "1cd90900-c572-4975-b7b8-fc93f4189d8e": "How can data breaches and eavesdropping be prevented in AI systems?", "2e2511f7-776d-400f-99e8-22c70ecc04d5": "What are the common threats to AI system security, such as model theft or exposure of model weights?", "e3b81075-8ba7-49f1-a7d2-87168a99617e": "How can user satisfaction with AI-generated content be effectively measured?", "e5339170-f4f7-4d0f-851e-a4b684f4de56": "What are the implications of man-in-the-middle attacks on AI systems?", "db8ab762-0912-47d7-9d30-648164bab39b": "How can the security of AI inference and extraction processes be ensured?", "59c7ac21-d712-480d-b1f2-638dff0439e1": "What are the potential risks of using technology in social welfare systems as discussed by the panelists?", "00e72162-956b-4ed9-94b5-b0ce264a828b": "How can digital ID systems impact the efficiency and cost of social welfare programs?", "0dcfc118-6d07-45b7-9542-8c8fa5036354": "What concerns did the panelists raise about the burden on individuals interacting with new technology in social welfare?", "60fe80fd-12ab-4a19-acb4-511d6cade220": "How can feedback loops in technology systems reinforce inequality according to the panelists?", "6152c7f7-4267-40c0-a522-620e1574f605": "What are some methods suggested by the panelists to mitigate the harms caused by technology in social welfare?", "bc2942af-52bd-4fbb-816d-9db9e02d78c1": "Why is community input important in the design process of social welfare technologies?", "ba3309a2-8a2a-4ac9-a89a-5df5b997a37c": "What role does data collection play in the potential harm caused by social welfare technologies?", "8f11cb0a-3059-4f8b-991a-c534983d2ab8": "How can individuals opt out of technology systems in social welfare programs?", "18631367-2871-45d9-8b6f-049eee6e413e": "What are the benefits and drawbacks of using technology for fraud detection in social welfare?", "da3446ed-9014-4635-b576-0dc495e48111": "How can the implementation of technology in social welfare systems affect government agencies and the people they serve?", "bf7e5553-71b4-4912-8f36-d5596c343802": "What is the Blueprint for an AI Bill of Rights?", "99d4fab5-34b6-4be5-ae63-ed96a8f6f688": "How are federal agencies ensuring AI systems comply with the Executive Order?", "cee03924-6fe3-43d2-8378-250f3240ea48": "What are the key principles outlined in the Blueprint for an AI Bill of Rights?", "1ccab29f-bc4b-428c-98d7-f783de1b512b": "How does the National Highway Traffic Safety Administration ensure vehicle safety?", "e3ea9516-f177-4b9a-b7a2-f1e46892ec09": "What role do local rules play in the implementation of road safety measures?", "bc2b4942-0fd8-48fa-989a-4c49c46c7b96": "How can strong safety regulations enhance innovation in complex technologies?", "6c84c34b-2170-4b7d-b82c-331dd7539cbd": "What measures are in place to address harms caused by AI systems?", "278b5143-0278-4559-bf04-512ef1f0c323": "How are AI use case inventories being utilized by federal agencies?", "0059e842-db08-4e0a-a061-7d5f37511184": "What is the relationship between safety regulations and innovation in the automotive industry?", "ae4d086c-aee0-4107-8d11-6d98cd132c52": "How does the law and policy landscape for motor vehicles relate to AI systems?", "0f1efd9c-7fbd-4b7b-ab5b-c3e90282a9ce": "What is algorithmic discrimination and how does it affect individuals?", "067133f9-c096-431e-b340-aaafffcd7f18": "How can automated systems be designed to prevent inappropriate or irrelevant data use?", "f50ad4e5-8546-4e08-aa4a-2f9430c6ef48": "What steps can be taken to mitigate potential harms from automated systems?", "74155138-199c-4f03-8c74-36f550d07afc": "Why is independent evaluation and reporting important for automated systems?", "bc6e0508-7335-48f1-89e2-85a0b185e8d7": "How can the public access the results of evaluations of automated systems?", "8c0cd3e7-b416-4eef-9685-68b4a810756b": "What measures can be implemented to ensure equitable use of automated systems?", "90cd38de-1740-48c6-b5cd-21899eb63f8b": "How does algorithmic discrimination impact people based on their race or ethnicity?", "f5303ea3-19f1-4a4f-952a-803e65a5a0fa": "What are some examples of algorithmic discrimination in automated systems?", "4d896f9d-2d20-47a9-8054-3f5b528ef495": "How can organizations ensure that their automated systems do not contribute to unjustified different treatment?", "18fd587a-4be2-4840-9c67-9ea224d4de50": "What protections should be in place to prevent compounded harm from the reuse of data in automated systems?", "dfe37234-9ca3-43f7-a3e8-b86a80b04f3c": "What is the AI Bill of Rights?", "f23d3c7e-3ec7-4ff7-bdab-5cdac88803fa": "Why was the AI Bill of Rights created?", "702c8d56-ad6f-4de1-9926-285169f470d4": "How does the AI Bill of Rights aim to protect American citizens?", "be6e8191-35d3-4f3f-94a2-c509ba2c3e49": "What are the key principles outlined in the AI Bill of Rights?", "4f8e4872-d2dc-4de1-aea1-da76b4471020": "Who is responsible for enforcing the AI Bill of Rights?", "e614182a-3697-4fd6-9166-b61c28d345dc": "How will the AI Bill of Rights impact the development of automated systems?", "d01414e7-9f4e-4a8f-b650-88cbc28f03f1": "What are the potential benefits of the AI Bill of Rights for the American people?", "d6520c9d-85d3-4d22-81a2-22649cf66c30": "Are there any specific guidelines for companies developing AI under the AI Bill of Rights?", "ec9b0344-0489-4878-b603-f0e89be5311b": "How can individuals report violations of the AI Bill of Rights?", "85e83a32-66ea-4124-934e-cc197072ddea": "What role does the government play in the implementation of the AI Bill of Rights?", "00bb38cc-b363-4224-9cad-242db0bd67bb": "What are the applications of General Artificial Intelligence (GAI) in CBRN information management?", "a221d1ab-5fd6-45a4-b6d8-8b93d9ab4bdd": "How can AI be used to detect and prevent the spread of obscene, degrading, and abusive content online?", "12a024fb-ee6f-4e56-a00e-3f762d802ec5": "What are the key considerations for data privacy when deploying AI systems?", "857a98f9-c84c-4e60-a015-c7857dfc889e": "How can AI help in monitoring and preventing civil rights violations?", "77ca506b-f725-4298-bb8b-1192ff1727c0": "What tasks are involved in AI development and how do they differ from AI deployment?", "6ad116cd-414d-4fbe-9971-f3c0165700e9": "What governance and oversight mechanisms are necessary for responsible AI deployment?", "06c33b54-b7f9-4684-ae49-4ef790e2a2ee": "How can AI improve capabilities in handling Chemical, Biological, Radiological, and Nuclear (CBRN) threats?", "3af5e01d-6e1d-41c7-a864-f35d9f4c081a": "What are the ethical concerns related to AI in terms of degrading and abusive content?", "1e4703d7-50c1-48e9-9662-fd835aca738f": "How can AI be used to enhance data privacy protections?", "d84d75d8-f21b-48d5-87ba-40e65b40b8e5": "What roles do governance and oversight play in ensuring ethical AI development and deployment?", "dff03de3-cf16-47fe-8c62-7acc7396c8e5": "How is technology helping farmers grow food more efficiently?", "f5dd8a98-1cb1-46cc-9437-9e36b92e9027": "What role do computers play in predicting storm paths?", "4c8db1bc-307b-4041-a352-9f9af6cbb286": "How do algorithms identify diseases in patients?", "8ec278ab-326a-4b26-81cf-07a2748a5f1c": "In what ways is data revolutionizing global industries?", "8e131803-f72b-4edd-a60c-cdf1ae47f020": "How is American innovation redefining different parts of society?", "c4536521-f7e6-42da-876e-138c80d5af1d": "What are the potential benefits of these technological tools for society?", "28061b71-fa18-4a73-973b-595f5e9f23b5": "How is President Biden's administration addressing civil rights in the context of technological progress?", "6a32e6c5-5b4d-425c-9310-ac9e34058595": "What steps has the Federal government taken to root out inequity and embed fairness in decision-making processes?", "8b41d54a-0e4f-4063-9bfb-e5633ef01f3d": "How is the Biden administration advancing civil rights, equal opportunity, and racial justice in America?", "620c90fd-87a8-4508-aef6-49ce040c25b3": "What are the urgent challenges to democracy that President Biden has spoken about?", "b2771425-f693-4b11-8a2c-cd5ece2a9f39": "What are some examples of sensitive data that have changed over time due to societal norms?", "d5648e81-154d-4e7a-9f98-89e26af03143": "How do societal norms influence what is considered sensitive data?", "8c9a7763-5902-48de-9624-1fe4f06180aa": "Why is it important to understand that sensitive data can change over time?", "71023f86-db69-4922-aa49-0c7884cda337": "Can you provide historical examples of data that were once considered sensitive but are no longer viewed that way?", "95e33fee-d6c2-4676-8f91-f97b88f4e897": "How do organizations adapt to changes in what is considered sensitive data?", "c5122f36-0c06-4f80-9d69-d7b5b7ba4c0c": "What role does context play in determining the sensitivity of data?", "6eda9f1e-d41d-4951-aeeb-6e3163a690d8": "How can individuals stay informed about changes in what is considered sensitive data?", "1ee096ea-f4e1-48d5-8859-1d486e08313e": "Are there any legal implications when sensitive data definitions change over time?", "3908a945-dd75-4d68-b75a-c42acfd1c173": "How do cultural differences impact the perception of sensitive data?", "e20ddb14-9cac-4fc8-8013-cc39c4a8924f": "What are the challenges in managing sensitive data that evolves with societal norms?", "52b9675c-9516-452a-94a4-69f03f5c10d5": "What is a fallback and escalation system in the context of automated systems?", "f678bc09-26c6-49a9-838d-b7ed1c773949": "How can human consideration be proportionate to the impact of an automated system?", "56255088-9e23-47d6-b73f-3890da92fc7a": "Why is it important to have safeguards against human bias in fallback mechanisms?", "046fd2c8-285c-4ea1-b2dc-0acdba7f5f1b": "What are some examples of high-stakes decisions that might require greater human oversight in automated systems?", "7f889e3b-de8e-4bc4-895f-af886478d21f": "How can organizations ensure that their fallback mechanisms are accessible to all users?", "c66a77c5-9ed9-48c7-b414-54d06b37b5c4": "What types of training are necessary for staff involved in human consideration and fallback systems?", "14df2313-e73b-4475-9175-b15e327a4691": "How can the availability of human consideration be increased for automated systems with significant impacts?", "7442ff35-391c-4c27-821d-8246d671bdd3": "What are the potential consequences of not having an effective fallback and escalation system in place?", "6da33505-e013-4410-87b2-c06a8f9a986c": "How can the effectiveness of fallback mechanisms be tested?", "0c6a74d4-9976-4dac-82a0-85562c58e9fa": "What are some common challenges in implementing accessible fallback mechanisms for automated systems?", "90cdaeaa-2d23-47cc-9c62-a678caaff1fe": "What is the role of NIST in advancing AI technology?", "c94bb207-795e-4949-afb5-51060a4c898a": "How does NIST ensure AI is safe and trustworthy?", "aed91732-c782-4c41-b30d-74b4bb0bccdb": "What is the US AI Safety Institute established by NIST?", "9df72cbc-5cee-4e8b-a0f6-a71f2453c828": "How does NIST contribute to the 2023 Executive Order on AI?", "5fb78f30-b8ee-4a7c-9d4b-49a3d7203935": "What are the main goals of the AI Safety Institute Consortium?", "76bff3c9-e07b-42a2-a053-f2b1aaa03c51": "How long has NIST been working on AI research?", "aeb7622a-78ad-4d23-add9-376a60612c11": "What kind of standards does NIST develop for AI?", "11b4686b-199b-42c0-87ea-4cfdf61fc1a0": "How does NIST address privacy concerns in AI development?", "b80438a1-1067-4a5a-820c-4133f009d448": "What contributions did the NIST Generative AI Public Working Group make to the report?", "50466b56-2814-489e-a27c-db75ad0859de": "How does NIST ensure AI is fair and transparent?", "720b9aed-8f99-4d96-9c31-c592612cad56": "What constitutes sensitive data under the AI Bill of Rights?", "52379352-3058-4c02-8a14-bf94e6ff2793": "How is sensitive data defined in the context of AI technologies?", "a6730cb7-c308-49ef-976d-2cd8dae8fea7": "What types of data are considered sensitive for individuals who are not yet legal adults?", "eb718d42-c1f0-4bc4-b821-2f51d3d1c876": "How can sensitive data lead to meaningful harm such as identity theft?", "9d567b63-4cdc-4526-bc7c-9227afac30df": "What are some examples of sensitive domains mentioned in the AI Bill of Rights?", "c673db12-f99b-450f-b738-61159c6ee5c3": "How does the AI Bill of Rights address data related to criminal justice interactions?", "7d8746aa-a1a7-4dbc-bf5e-40f65e995b9f": "Why is biometric data considered sensitive under the AI Bill of Rights?", "449278d1-b673-4878-8c11-d1d5ab0d6db5": "What measures can be taken to protect sensitive data in AI applications?", "5969e8c6-02b2-4c2a-b739-bdc23cad38f9": "How does the AI Bill of Rights define the potential for data to cause financial harm?", "205446ad-e139-481b-81fb-2548db84a0e0": "What are the implications of handling sensitive data in AI systems for privacy protection?", "928e6923-bcf0-4580-ad02-7769964afe49": "What are the implications of digital surveillance in a post-Roe world?", "a1b627aa-a1ff-4a68-a3ac-2a726f36747d": "Why did the FTC sue Kochava for selling data that tracks people at sensitive locations?", "70fa19b9-7985-4eb6-9cc2-7bf86776c53a": "How does digital surveillance impact reproductive health clinics and places of worship?", "16562b7c-6b93-483f-a689-d28eab8e4aea": "What companies are being acquired by private equity firms that collect data on children?", "b86ef95b-cdcf-455f-bdc0-474aac5b35eb": "What are the concerns surrounding private equity firms collecting data on America's children?", "9aa09b13-c9c6-4729-8029-8b893a1772a7": "How does the collection of data on children by private equity firms affect privacy?", "c971ec0d-ff6d-4adc-8ce4-f31428e4b284": "What are the potential risks of data tracking at reproductive health clinics?", "85942b84-5c76-435a-b971-83296f24e8fc": "How does the FTC's lawsuit against Kochava highlight issues of data privacy?", "7a683ec1-c238-48c5-bf76-00845333806c": "What are the ethical considerations of collecting data at places of worship?", "aa558c0c-2ab3-40c8-bdd9-3daf1f4f43a0": "How does the job database classification of former Apple employees as 'associates' affect their career prospects?", "19703e97-9d93-49e6-864a-23847656346f": "What are the best practices for ensuring data privacy in AI systems?", "52e88558-d8cc-413e-80e0-9f094bf4270b": "How do you manage intellectual property when using AI models?", "6ad81596-3e02-481b-b7ac-73940d631df3": "What are the key components of AI governance and oversight?", "dd2e0e56-2211-4a81-9712-aecf2fda1a6b": "How can you ensure information integrity in AI applications?", "bcd77494-a31b-4fe9-b2c7-3876f1f5306e": "What are the different access modes for underlying AI models?", "83631b6a-fd3c-4582-973c-c108cbc54c76": "How do you handle sensitive data in AI projects?", "fa5423e6-cb1e-4043-af5e-e939741381e2": "What is the importance of value chain and component integration in AI?", "45fc2c2b-6bc0-4a8d-8731-dfa83c3ba355": "How do you configure AI systems to align with human-AI interaction guidelines?", "94cf27b3-d287-4e48-b4f0-1acdf891f42f": "What are the challenges in managing proprietary data in AI?", "47124446-7ae2-4444-8f9e-e86adddd76bc": "How do you oversee the tasks performed by AI actors?", "af389dfd-cca2-4851-9450-166e9fbd38d2": "What are the key technical and governance interventions needed to protect against the harms of new technologies?", "9bc603e1-c26c-4926-b91a-c8d4e1a57fbf": "How important is community input in the design and use of new technologies?", "be75402c-f043-4191-9c8b-7d5af25bef31": "Why is public reporting on crucial elements of technological systems necessary?", "0dc744a1-6026-46d6-aeb0-8af84f722ae7": "What are better notice and consent procedures, and why are they important for privacy?", "66f03050-aedc-47b1-aa15-51450e4329a2": "How can users opt-out of using certain technological systems and revert to a human process?", "9c5cf2e4-1467-4a23-8857-28214677eca8": "Why is it important to provide explanations of decisions made by technological systems?", "710fdf52-6834-4b9d-9ce6-a653d5cece1b": "What kind of training is necessary for the proper governance of new technologies?", "c505559b-8a65-4f47-80ef-b510d054137a": "How can we ensure that technological use cases are genuinely related to their goal tasks?", "b0c53b5c-f74e-4529-a8d6-a123ed691081": "Why is local validation important for the effectiveness of technological systems?", "86fad521-a1f8-4c03-9313-ef0a9cee5091": "What is the role of third-party audits in maintaining the accountability and validity of technological systems?", "55bec949-76bc-49c1-a3ae-5f874eedfddc": "What are the risks associated with the synthesis of CBRN materials?", "9cd11b1f-87d1-484b-aba4-b76a5cb30945": "How can confabulation in AI systems mislead users?", "9ee178ca-364a-4792-8ee5-68de0f17f7cc": "What measures can be taken to control the spread of dangerous, violent, or hateful content online?", "616332c3-0795-4741-9e22-e94ffa2fcb13": "How does the unauthorized use of personal data impact data privacy?", "d00c3c3b-c827-4205-bc58-f5520ba6e148": "What are the potential consequences of de-anonymization of biometric data?", "bf6e3576-bc85-4045-a9ec-7701f2ba852c": "How can access to CBRN information be restricted to prevent misuse?", "3a2a0084-eefa-406c-b6a6-ae09a47d2c23": "What are the challenges in detecting and preventing confabulation in AI-generated content?", "3139865a-00dd-4da3-9038-84cb5a1b8b12": "How can online platforms mitigate the risks of inciting or radicalizing content?", "ef18f0e4-616a-4bde-9199-8407daad074d": "What are the best practices for protecting sensitive health information from unauthorized disclosure?", "65c26694-76cc-42b2-9982-8baf476bbaed": "How can individuals protect their location data from being leaked or misused?", "ca38f0f2-707f-4776-8565-5a88ac901073": "What is confabulation in the context of GAI systems?", "dc4c667a-b953-456e-a31d-8b4221f41335": "How do confabulations differ from hallucinations or fabrications in GAI systems?", "c90a6481-11b9-4db8-b24d-cbb9f232a4a5": "Why do generative models like LLMs produce confabulations?", "f989e408-3f17-4129-8bd2-87ae2ceecc72": "Can confabulations occur in all types of GAI outputs and contexts?", "dac82b9b-8e84-414d-aafc-7f3764ca2b23": "How do statistical predictions in generative models lead to factually inaccurate outputs?", "df4dfdd7-6c4c-4dde-b6dd-5123571b1254": "What are some examples of confabulations in GAI systems?", "5a88e70b-4815-451a-bc8f-e1606c9f23e7": "How can confabulations affect the reliability of GAI-generated content?", "9882e344-3c67-494a-b083-c0b2ae59a9df": "What measures can be taken to reduce confabulations in GAI systems?", "26c3c033-6002-4d01-8f46-96bad11d3572": "How do confabulations impact the consistency of GAI-generated statements?", "6536996f-b790-440e-afc7-66774db4a2e3": "Why is it important to understand the phenomenon of confabulation in GAI systems?", "f9e8bebc-884c-4775-95e1-78d70dc642b2": "What are the best practices for verifying information sharing and feedback mechanisms in AI systems?", "6a36fd5d-98db-42d5-b0d4-493641747d7b": "How can organizations ensure data privacy while sharing information about AI system impacts?", "d4f86701-19bc-43eb-9713-57eb57e073f6": "What steps should be taken to conduct an AI impact assessment effectively?", "ff6fe932-7482-44fc-bd67-812520e17bcf": "How can feedback from affected individuals and communities be integrated into AI governance?", "830c3554-63ea-4a61-a4f9-95e5c8297675": "What are the key components of an effective AI governance and oversight framework?", "621c0613-588a-4422-951a-45a7ed78fca5": "How can organizations prioritize feedback from external stakeholders regarding AI risks?", "df860556-2e58-4780-972c-1293c0231372": "What resources are necessary for effective outreach and feedback processes in AI system development?", "9c78e9c5-3af3-4520-a602-f9a45cebb1a3": "How should interactions with GAI systems be documented to ensure transparency?", "359d8ba9-c8b2-4d18-b5c2-9c0369390b86": "What are the potential risks of harmful bias and homogenization in AI systems?", "e1de377f-ddbf-4599-99e2-116dc45fac8e": "How can organizations address the issue of confabulation in human-AI configurations?", "e5df3265-2f97-4196-b75a-ab39e66e7b97": "What are some examples of time-critical systems where immediate human intervention is necessary?", "ae852f4b-1551-4734-b962-337bb0060682": "How do automated systems impact the criminal justice system?", "5ce6cbf8-e3c8-414e-8e92-59e44f212a1f": "What safeguards are necessary to prevent unfair outcomes in automated systems used in sensitive domains?", "9a8e63e5-0caa-4533-b223-17d6c3c56133": "Why is human oversight important in systems that use automation for pre-trial risk assessments and parole decisions?", "fee74500-d0b9-468f-a6c0-cbb66d324f73": "How can automated systems in healthcare lead to dangerous outcomes without proper safeguards?", "7e4ebd6f-c29f-4c5b-a9e6-c00489ace11f": "What are the potential risks of using automated systems in employment and education?", "cf2fab83-75d6-4fea-a72d-449833ab78e8": "How do existing human processes complement automated systems in providing public access to government benefits?", "bcb8ba55-d026-4851-a522-40e9b1e175af": "What are the benefits of having a building manager available when an automated card access system fails?", "f89691af-5a2e-4c6b-8894-fd6d9cd424b7": "In what ways can automated systems in sensitive domains be made more accurate and fair?", "6c7e2aa2-558c-4280-93b7-bf73de34e57b": "Why is it important to have fallback options when using automated systems in critical areas?", "9f5142e7-fda2-481b-a08d-fddf4197454e": "What are the main findings of Northcutt et al (2021) regarding label errors in test sets and their impact on machine learning benchmarks?", "0cdcb840-24f9-49b7-b4b6-bd409bb4d50a": "How does the OECD (2023) suggest advancing accountability in AI to manage risks throughout the AI lifecycle?", "4dfa9162-0bd0-4926-a27a-2fe9231160ed": "What are the key points discussed in the OECD (2024) paper on defining AI incidents and related terms?", "49ca8dea-a08f-410a-a643-8e37fc896455": "What information is provided in the GPT-4 System Card released by OpenAI in 2023?", "4636292e-8f3e-4711-a175-b28428d0eaa9": "What technical details are covered in the GPT-4 Technical Report by OpenAI (2024)?", "69456d83-d8c3-4d1c-bd31-704d2c934902": "According to Padmakumar et al (2024), how does writing with language models affect content diversity?", "d62ca080-832e-4f4d-960d-bf8bb0d1b97e": "What examples, risks, and potential solutions are surveyed in Park et al (2024) regarding AI deception?", "fb2ddd6a-239b-4b2f-a336-f921a876390c": "How do pervasive label errors destabilize machine learning benchmarks, as discussed by Northcutt et al (2021)?", "9208081d-3da9-46db-b27d-f25ed1c64ad2": "What governance strategies does the OECD (2023) recommend for ensuring trustworthy AI?", "edb639b7-e492-4442-a339-62b5e921ceb2": "What are the implications of AI deception as explored by Park et al (2024) in their survey?", "572c1b8f-43fc-46ab-ac02-ef9756b67234": "What is the importance of proof history in AI content management?", "5e70d1eb-366b-403f-b833-9bb92e6a6cc5": "How does version control contribute to transparency in AI systems?", "f9e5f377-fe4c-43e8-8cc5-f004e49964e9": "What methods can be used to ensure traceability in AI lifecycle management?", "be867fe3-bca3-4b2b-9a5e-471f230b333d": "Why is user testing crucial for verifying the adequacy of GAI system user instructions?", "4b9a0a17-252c-4972-9be3-1b4156f5b5ce": "What are the key components of Human-AI configuration in AI systems?", "0f22a8c0-7a87-4416-a640-3c2187b8aa3b": "What tasks are involved in AI deployment?", "338a84b7-78f1-442a-8e90-093c313eceb1": "How is AI impact assessment conducted?", "bd811679-d8f6-4568-8799-aa826cd0ca8d": "What roles do domain experts play in AI operation and monitoring?", "6015d9b2-454a-4af5-9a8e-ac87189a84e1": "What does TEVV stand for in the context of AI systems?", "dc4d23d4-5c52-47f4-b5b8-01c23fc3bff9": "How can robust version control systems improve information integrity in AI?", "a6399ec7-f330-4390-8da1-35f4821069f7": "What are the safety and efficacy purposes of notice and explanations in automated systems?", "ec418640-346c-4981-a095-cbed724bdd60": "Why is it important for the American public to know if an automated system is being used?", "7feb0289-a3ca-4a4b-9809-7ff2b8a6518f": "How can clear, brief, and understandable notice help achieve protections in the framework of automated systems?", "c7d1871b-b9ea-4a5f-9ab2-e5b06fe26a8c": "Why are the decision-making processes of automated systems often opaque and complex?", "1f235bf9-1d62-49c3-9f64-b74ff53e1c0c": "What challenges do explanations of automated system decisions present?", "f370e6a8-32c6-4260-8fa8-013612614ab1": "Why should explanations not be avoided in the context of automated systems?", "ee9515a6-b640-4034-98fb-4a5837748cf7": "What is the importance of clear and valid explanations in automated systems?", "745e45f4-bba9-45ec-a7de-e6ad0e0e47e1": "How can experts verify the reasonableness of a recommendation made by an automated system?", "a02fa5da-7bd9-48d0-ade1-35bcb7529e09": "What are the potential harms of not informing the public about the use of automated systems?", "4a54ea01-870a-4fe8-aa51-bbdd95af1a67": "Why should clear and valid explanations be considered a baseline requirement for automated systems?", "d9589a42-1bfd-4e7f-b849-f21901e4405c": "What are the key expectations for automated systems regarding data privacy?", "a2cf57d9-906a-4fa1-83ca-9bcc8e64edbd": "How can individuals access and correct their data collected by automated systems?", "6bb0aeee-f203-4ae1-bdb1-ef25d1fcd53a": "What steps should entities take before sharing data with other entities?", "168e18c4-5764-40f7-83a7-4648c98b69c9": "How can users know who has access to their data in automated systems?", "251640f5-543a-41f8-9cf2-21a40ea67aa2": "What is the process for withdrawing consent for data access in automated systems?", "c1e447ca-42dc-4355-a5b2-c55e9ace2bbf": "How should entities handle the deletion of user data upon consent withdrawal?", "2fd28c08-3fd3-41b6-927b-527d88dc0694": "What records should entities keep regarding shared data and its recipients?", "756d6e43-205c-48de-84fb-df3fe1ff1fd8": "How can users ensure their data is removed from machine learning models after consent withdrawal?", "fb40f206-d170-40e2-8c0d-6fed13736a04": "What legal limitations might affect the withdrawal of data access consent?", "ffc49df6-1959-4e1b-983e-d5d6e8ee184b": "What are the best practices for entities to follow when developing technical standards for data privacy in automated systems?", "4570505c-0e36-451c-a22f-787893a90d09": "How can patients ensure their medical data from CPAP machines is not shared with insurance companies without their consent?", "6218a6ac-9d82-4ee1-92b9-bcd6637a0981": "What are the ethical implications of using predictive analytics to target advertisements based on sensitive personal data?", "5a330f85-885f-4b40-85e8-c3a6dca75ffc": "How can schools balance the need for safety with students' privacy when using audio surveillance systems?", "129f0f97-98ba-42e8-9d21-f3d2c6917470": "What measures can be taken to protect students' privacy during online proctoring exams?", "0b4f9884-918b-4031-a687-d99338d8456c": "Are there any regulations in place to prevent insurance companies from using medical device data to deny coverage?", "6e60a744-5a8f-4a68-97f1-ddbd13855330": "How can consumers protect their personal data from being used for targeted advertising without their knowledge?", "d4c7738c-fc98-4fc6-b879-f6dc0a273084": "What are the potential risks of using predictive analytics in retail for both consumers and businesses?", "3ed724ad-1946-407a-990c-be9a281312fc": "How effective are current data privacy laws in protecting sensitive information in educational settings?", "76c3b003-88f0-4704-84f6-839b2a2119c4": "What are the best practices for companies to follow when collecting and using sensitive consumer data?", "587362e8-2dd6-4f47-98cb-003f14dac0ab": "How can individuals be more aware of how their data is being used by various devices and services they interact with?", "b3192f95-ad30-4882-b2d8-fa1e9be2394a": "How does the curing process help voters with disabilities ensure their votes are counted?", "22645ff1-03b4-424e-a146-2bbf328e966b": "What steps are involved in the curing process for voters who have changed their names?", "e87363bb-640a-4feb-9d9f-9d2e0f866665": "Why is the curing process important for voters with shorter or hyphenated names?", "149c1365-2c5a-4e3c-9c87-1105b2746f03": "How can voters confirm their signatures during the curing process?", "13982a84-3a75-4f82-a35a-e52df15dc29c": "What are common voting mistakes that can be corrected through the curing process?", "e7d71aac-05de-4e78-b73e-39d585314e96": "Is the curing process a standard practice in all states across the country?", "77de8575-cb53-451b-8b8f-7da0b09fd6ec": "How do election officials assist voters in the curing process?", "b624cd05-c166-4243-91a7-4a3f693f185f": "What happens if a voter does not correct their voting mistakes during the curing process?", "68a8f208-c093-4f42-afcc-c1e71dc344bf": "Are there any specific challenges faced by voters with disabilities during the curing process?", "a001c1a3-207f-458e-8638-0dbbd1a23b20": "How can voters ensure their votes are counted if they have made a mistake on their ballot?", "be2fad5e-2033-4a8a-9741-2b83774c1ac9": "What are the risks associated with identity theft for minors?", "93c0ee03-f986-4d51-b9b3-8367e10d9256": "Why is data about minors considered sensitive even if it's not related to a sensitive domain?", "9e6f708d-f474-42bc-8052-4762c8d80510": "What types of data are considered sensitive for minors?", "ba8b12ee-5d56-421f-ace2-208edb8cb51b": "How can identity theft impact a minor's future?", "3f587118-82b2-499c-ae3a-e1ec3142c71a": "What are some examples of sensitive domains that require enhanced data protections?", "b29a4d65-c495-480a-89f0-fc4e91aa0c70": "How do sensitive domains affect human rights such as autonomy and dignity?", "4e5b85e2-a644-40e2-8abe-4c376884d3eb": "Why is public expectation important in determining sensitive domains?", "27cee14e-d18d-48ea-a59d-8e81fb9daa11": "What are the potential harms of not protecting data in sensitive domains?", "b3dd37e2-835e-4821-94fb-6bf7178aa938": "How does the framework define sensitive domains in relation to existing laws?", "1d89baef-c455-48c0-8ef3-307b96c366e3": "What measures can be taken to protect data in sensitive domains?", "aa7473ee-3668-4669-ae62-9f1a1516a53d": "What are the potential impacts of surveillance technologies on individual rights and opportunities?", "fac36b7d-de59-49ac-bb84-23feb28373e8": "How can one ensure that their data decisions are respected in the context of surveillance technologies?", "4e0354ea-fffe-4a01-a4f1-d51ae008059a": "Why should surveillance technologies be avoided in education and housing?", "0fc325e8-7e09-4ea1-9db6-258a0ae665f4": "What kind of reporting should be available to confirm data decisions in surveillance contexts?", "7d295303-fcad-4c6b-8c7b-f3d3081439ac": "How can surveillance technologies limit access to opportunities?", "96eff3ab-89f0-4b9d-a269-f0357278d6bd": "What measures can be taken to assess the impact of surveillance technologies on personal rights?", "2d48e1ac-0686-4e2e-bd95-e2db7f8c5339": "Are there any guidelines for the ethical use of surveillance technologies in the workplace?", "0b0edbb6-e73f-4cef-a8d2-76c2d7e8176c": "How does data privacy relate to the use of surveillance technologies in various contexts?", "d3db021a-8f8a-4113-b5cb-0ef5de176095": "What are the risks associated with the use of surveillance technologies in education?", "6c3c2809-c142-4336-94b6-0cf80dba8956": "How can individuals protect their data privacy against surveillance technologies?", "ed5fe850-efcd-40d3-a5d3-0cdc40a8a5d6": "How can equity assessments help improve the safety and efficacy of systems for LGBTQI+ persons?", "1aa205a1-c8ec-44b5-99e2-66b9acede8d5": "What are the best practices for collecting representative and robust data for persons with disabilities?", "107537bf-1e76-4d1e-ba37-84a3eb8872c7": "How can qualitative evaluations be integrated into the assessment of systems for older adults?", "ed6516af-d770-452e-88cb-cb49fe224a96": "What methods can be used to ensure data is free from bias when assessing systems for persons living in rural areas?", "aaec1c72-545e-420c-83dd-44ee03eb5ac9": "How can persistent poverty and inequality be factored into the development of automated systems?", "4b3d37df-b2be-4662-b131-e87a2cfbd191": "What are the potential harms of using biased data in system development for marginalized communities?", "404568f3-40d8-42a1-a031-1c920e94bdb4": "How can we guard against the use of demographic proxies in automated systems?", "b41f2aca-4395-4b37-bfec-fdbe04578292": "What role does historical and societal context play in reviewing data for bias?", "14ac205f-14d8-45f2-b537-5aafa71096fe": "How can we ensure that data used in system assessments is sufficiently robust to identify biases?", "86ffb580-7422-420e-8dcc-d891924d573c": "What are the challenges in conducting equity assessments for diverse populations?", "7d0b283d-f144-4a82-b19e-c2112a913ed5": "How does data harvesting impact mental health?", "0fe16b30-1cf2-4712-a01d-4f6ebe321037": "What are data brokers and how do they profile communities?", "30bc10d0-919a-4251-8d8d-b540b171b181": "How can data harvesting affect democratic processes?", "86589cc8-2f3e-4292-a99e-3a491d7a3790": "What measures are companies taking to protect consumer privacy?", "7bf74331-98cb-4cc6-a850-92ab97ea0e1c": "How does data collection chill speech and protest?", "6a18c1c1-6ac2-497c-a1fe-11422ba30b35": "What legal protections govern federal government surveillance?", "0af459fc-739a-4dd4-8b24-8e56ff57fd4f": "Why is it important to minimize data collection?", "8d37a2d1-6bd5-4294-8e51-2cadcc1361ac": "How can improving security practices protect consumer privacy?", "d141a6e7-cd0a-4576-a702-58134cba4442": "What are the risks associated with data aggregation by data brokers?", "940504a7-7a40-4be4-be3c-175d09bb3ccb": "How does data harvesting breed distrust and anxiety?", "2c185373-e28c-400c-a25c-f72dfe3eaa57": "What are general fairness metrics in machine learning pipelines?", "35d3df9b-ca04-45d6-ba61-910781efea5a": "How can demographic parity be applied to business outcomes?", "9dc2b214-8912-4afb-9003-08f83e541c16": "What is the significance of equalized odds in ML fairness?", "a5eacbd0-3001-4167-8925-1156e278be54": "How do you measure the prevalence of denigration in generated content?", "fbb7532d-4e3e-41f8-8fa9-f7b5a02d5cf0": "What are some methods to identify harmful bias in GAI systems?", "a9a83f45-168b-4db5-ace9-45e7fc8c91b9": "How can custom, context-specific metrics be developed for ML fairness?", "3d684fd5-d832-42dc-b175-14f788a2e83f": "What is the role of domain experts in creating fairness metrics?", "fd5250fe-6121-425e-b4d1-c8e0bb203d19": "How can potentially impacted communities be engaged in assessing GAI systems?", "08d9cafd-06aa-44de-ba02-3e37b37a6b17": "What are the implications of harmful bias and homogenization in AI?", "98218ea7-de37-4779-8228-a2bbdb9ff990": "How can environmental ecosystems be affected by GAI systems?", "3a930566-71ec-465c-b726-c04b4278a904": "What are the key characteristics of trustworthy AI?", "f5324885-70fb-44aa-abcc-bacc277ed26a": "How does information integrity impact the trustworthiness of AI systems?", "48616ea8-c86b-4f86-aa5b-e94d1bf34068": "What does it mean for AI to be accountable and transparent?", "db5d6aab-79c3-433c-9530-afa6fd56678c": "Why is it important for AI to be explainable and interpretable?", "adb92956-0666-4e9c-83d6-139c568fedf1": "How can harmful bias in AI be managed to ensure fairness?", "00d0dc92-3f12-4a6f-97a5-9bfb57bdd801": "What measures can be taken to enhance privacy in AI systems?", "1391f61d-1d2c-486f-88ab-108403d31a1c": "What defines high-integrity information according to the 2022 White House Roadmap for Researchers?", "1b6fc2b6-eb9c-4f80-9435-aed7e4c8c00b": "How can the accuracy and reliability of information be verified and authenticated?", "b79e8099-38c5-4a4f-b636-f1054ae0ca4f": "What is the significance of having a clear chain of custody for information?", "3fdc8d07-1b55-425f-93f6-e66f1429df36": "How does acknowledging uncertainties contribute to information integrity?", "a9dc1f08-5014-4341-8489-8745d5cc3daa": "What are the best practices for logging and recording AI incidents?", "1b659988-353f-4357-8611-7249beebc92b": "How can change management records help in managing AI incidents?", "ddbf64e7-c5b6-434d-8d6e-31d1e5d33d28": "Why is version history important in AI incident management?", "4a4cbdcc-49b2-4fb2-8c24-812bca763c37": "What role does metadata play in the analysis of AI incidents?", "1c1c0ea6-3ed1-421c-a55f-e58d82db9618": "How can inconsistent access control affect the distribution of content through plugins?", "62ddbc5e-ccb4-4713-b294-20cbbd2eb010": "What are the benefits of regular information sharing among AI Actors?", "ffd63cbd-ea55-4aad-aeaa-0dbe6fa096c0": "How can insufficient access control be mitigated in plugin distribution?", "483b6c63-303b-4f23-8f34-5b22a4ec2765": "What documentation practices are essential for smoother information sharing in AI?", "ac189b5e-6349-44af-bf0f-702798289347": "How can AI Actors use change management records to respond to incidents?", "8a3d3981-24a8-498a-b509-25543fd6afeb": "What are the challenges of maintaining consistent access control in distributed plugin systems?", "638b3c99-e6cb-49bb-95c2-f267b2d1ec00": "What are the risks associated with the validity and reliability of GAI systems?", "f0977b5a-0de8-435b-bfee-e6a457db0442": "How do measurement gaps between laboratory and real-world settings affect GAI systems?", "d2628c21-64b0-4b61-a595-7097a4f233c5": "Why do current testing approaches for GAI systems often fail to assess real-world impacts?", "17c050f3-312d-46b8-b066-2bf66ab0142f": "What are the challenges in estimating the ecosystem-level risks of GAI?", "1116721c-2698-4938-a6bb-f96c2698380d": "How can prompt sensitivity and context heterogeneity exacerbate measurement gaps in GAI systems?", "68367977-e97f-4f35-a78a-63620dbd4fb8": "What is the role of structured public feedback in evaluating GAI systems?", "4770227b-6fd3-4a08-93e2-eac625aa3e2f": "How can structured public feedback help calibrate traditional measurement methods for GAI?", "a50d0a5a-b180-4209-b057-8a3ce7ca5c8a": "What are some examples of structured public feedback for GAI systems?", "e538cb20-92e5-427f-b9ac-cbe7f61c3654": "Why is it difficult to estimate the political, social, and economic impacts of GAI?", "108fa021-85ba-4c80-b094-bc316a5210da": "How can real-world conditions be better integrated into the testing of GAI systems?", "25bfeba9-9dba-40e4-ad97-8c839ca8c51a": "What methodologies can be used to evaluate biases in AI-generated content?", "67f472ad-fa27-4999-b17b-c75cccb0ef3b": "How can computational testing methods help identify stereotypes in AI outputs?", "f40897d1-e74b-4ae9-bed5-132e9360c38f": "What are some common biases that might emerge from AI-generated content?", "07c1cc8e-f86c-4899-82ed-bf9b470ddf20": "How can structured feedback input be used to assess AI content for harmful biases?", "e43daa73-3e5d-4c9e-a3ca-075dec799ed3": "What is the impact of homogenization in AI-generated content?", "ac6364d5-5041-446d-b5d7-e6d6fc9fa97b": "How can we mitigate harmful biases in AI systems?", "eae909c9-9200-428d-9d2d-0cf8ae39ea48": "What role does user feedback play in identifying stereotypes in AI content?", "e04d6574-6d9f-42a2-bf82-086109b4bfec": "Can computational testing methods alone ensure the integrity of AI-generated content?", "43545e6a-0aa6-4c7c-89f0-464c8c0cb1f9": "What are the challenges in evaluating potential biases in AI-generated content?", "9ad47d0f-1d88-4ffb-b427-4f8ecaeaee01": "How can we ensure that AI-generated content does not perpetuate harmful stereotypes?", "5612aea9-1a55-450a-830f-c05c88a34e6e": "What is AI red-teaming and how is it conducted?", "69417cf7-74ea-4fe6-bb4e-c4424621c4e4": "How can organizations collect feedback on AI model outcomes and user experience?", "85b1f1c5-fb0a-43ae-ab3f-703b8aa0663b": "What are the best practices for implementing feedback activities in AI systems?", "1d2c36f2-6923-4498-8504-c5c93c6eec30": "Why is informed consent important in AI research involving human subjects?", "4d3e7bb9-45c2-4918-ad20-9225ef78885c": "How does AI red-teaming help in identifying potential adverse behaviors in AI models?", "886bdf88-e7ed-48b2-8616-3b50743a28b8": "What are the human subject standards that organizations should follow in AI research?", "f6f13394-12e4-41df-8d73-8f8a59b625c8": "How can stress testing safeguards benefit AI models?", "7b479921-3a77-4f29-8406-2f7b03ab8878": "What is the role of subject compensation in AI research?", "c60ab7a5-60a1-49f6-b20a-5707d699cb8e": "How can AI red-teaming be performed in a controlled environment?", "32fe7223-3f84-4ef2-a178-0fba0a0e178f": "What are the potential risks and impacts of AI models in real-world interactions?", "19b3ef87-4631-4218-ba95-80b2c105535a": "What are the privacy concerns associated with social media monitoring?", "005c7c98-75a6-4242-878e-530c529e641f": "How do ankle monitoring devices impact individual privacy?", "1ba49c65-ac05-43c5-806b-21171820c4da": "What are the potential risks of using signature matching tools in voting systems?", "deebad53-75a3-40d9-83f0-b09ff89b0b35": "How do smart home systems collect and use personal data?", "04ad04d2-396e-4701-a6e9-2422c69accc7": "What are the privacy implications of systems that collect health-related data?", "d70da12d-584e-4e52-9eae-1f9e2f62e745": "How do education-related data systems impact student privacy?", "56387c3a-2259-48da-88f8-d5890f89f39d": "What are the concerns with using criminal justice system data for big data analytics?", "857b6f51-d048-46ae-85a7-5495bb30553f": "How do ad-targeting systems use personal information to build profiles?", "8ae7d54f-19dc-4a4f-9065-c5993f18db14": "What is algorithmic discrimination and how can it be prevented?", "fe8e6119-c018-4ce9-9b76-272740890ecc": "How do algorithms that detect student cheating or plagiarism work?", "b1a35db2-e2c3-411a-9942-84f2ce3db112": "What is the AI Risk Management Framework by NIST?", "985b4d2d-1090-4510-b623-7cd70b25fb56": "How does the NIST AI Risk Management Framework address AI risks and trustworthiness?", "a0eb8337-f777-4726-a13b-d7ec625951a6": "What are the key characteristics of AI risks according to NIST's AI Risk Management Framework?", "6c03ed54-6ab0-4045-a251-0dce83ca395f": "What are AI RMF Profiles as described by NIST?", "402b5f7a-b09f-487b-b5a9-72eced264252": "How can organizations use the AI RMF Profiles to manage AI risks?", "18dc0b62-513e-4205-8429-d0f22c15a613": "What tasks are described for AI actors in Appendix A of the NIST AI Risk Management Framework?", "8ce49ea6-adba-418b-a59c-9007b6ae81da": "How does NIST categorize different AI actors in their Risk Management Framework?", "f8ab2a45-69cd-4dc5-a52f-d340a7e463c6": "What are some common AI attacks and their mitigations according to NIST?", "5e9092a5-7669-45cc-af06-639af0388ab2": "How does the NIST AI Risk Management Framework help in improving AI system trustworthiness?", "d829fb79-fb10-4f92-b38b-e155bb3fcee3": "Where can I find detailed descriptions of AI actor tasks in the NIST AI Risk Management Framework?", "a023b48b-2c54-40ef-8767-5d3f03fe2914": "What are the key policies needed to oversee AI systems effectively?", "33fa9547-975a-4352-8f53-81d66f44c49f": "How can independent evaluations improve the oversight of GAI systems?", "a6fa8d04-3c9b-457d-88fd-dfa23ce40ff3": "What types of risks are associated with GAI systems that require robust evaluations?", "8f6d5636-40eb-47a8-ad69-b87dfebea48d": "How should organizational roles be adjusted across the lifecycle stages of large GAI systems?", "a71268a9-4141-4a0e-b1fa-0076d05cd42e": "What is the importance of test and evaluation in the development of GAI systems?", "691fbe00-89ad-498d-8d41-323cf1e97661": "How can harmful bias and homogenization be mitigated in GAI systems?", "e6e14601-db3d-4f7d-8676-d4eab8ecb25a": "What are the best practices for GAI content moderation?", "7068ff06-d301-4011-b487-7cfba94a4861": "How can increased accessibility of GAI tools and interfaces impact their oversight?", "e8dab0e4-d104-4378-a736-39d0227294b1": "What are the critical components of incident response and containment for GAI systems?", "aca5d4a8-7b84-4056-b0e6-e04d36ece68b": "How can information security be maintained in human-AI configurations?", "6897739c-d427-406f-ad1d-d7c4c77294d3": "What are the ethical concerns associated with text-to-image models?", "e0dacf35-3db5-49c4-9766-2c3dad84baab": "How can text-to-image models be misused to promote dangerous or violent messages?", "226859d7-3332-4379-8494-f26272702d7d": "What are the risks of GAI systems producing content that recommends self-harm or illegal activities?", "b791d661-0b16-4057-b391-7d89660c8a6d": "How do current systems attempt to restrict harmful outputs from GAI models?", "18041e17-7026-4c7d-9458-03b95bed2260": "What is \"jailbreaking\" in the context of GAI systems?", "efafd5fa-5e44-473a-89e5-972ae57c28f2": "How can \"jailbreaking\" be used to circumvent output controls in GAI models?", "f6f5b288-0e56-4759-a22e-70b60f145993": "What are the limitations of GAI systems in terms of data privacy and information security?", "06185446-c806-4fde-a2bc-435e67f172e3": "How can GAI systems be harmful in the context of CBRN information or capabilities?", "e23e338c-f249-4c47-9177-bde54d129a27": "What measures can be taken to prevent GAI systems from producing obscene or abusive content?", "20366931-36fc-4932-be26-4a2236da5849": "Why is it important to study the potential harmful effects of GAI systems on mental health disclosures?", "c02e5bf5-5f98-4e9a-9951-b476de779a36": "What are the best practices for applying organizational risk tolerances to third-party GAI resources?", "67fb8dc5-8f12-4f4c-b3dc-891c8219e312": "How can organizations effectively assess personnel credentials and qualifications for GAI projects?", "6df4dda4-63e2-45ab-82e3-2ad196f97ad3": "What are the key steps in performing background checks for individuals involved in GAI development?", "1626e1c6-eaaf-458a-9d62-6539ba4af917": "How can organizations filter GAI input and outputs to mitigate risks?", "ade6ed4e-ae4a-4354-9696-9be41db683dc": "What is retrieval-augmented generation, and how does it apply to GAI risk management?", "c37cf334-7c3f-4151-88b8-baf4569164a2": "How should organizations reassess risk measurements after fine-tuning third-party GAI models?", "8ccf5159-8cbc-4786-9c65-58c3df77213f": "What are the common risks associated with the GAI system value chain?", "91601426-b8bf-4362-b7fe-360c419ef972": "How can data poisoning affect GAI systems, and what measures can be taken to prevent it?", "822d55ee-6b63-4714-a957-a43dc8d843c9": "What are the implications of data privacy and localization compliance in GAI systems?", "284bee81-fb81-49a5-bbe0-49ef74483c20": "How can organizations ensure geopolitical alignment when integrating third-party GAI resources?", "bfa377f7-0ab2-4134-b646-98d6866d752d": "What are the main priorities in information integrity research and development?", "bd0492b6-4b03-4c9c-af31-313a235c4c93": "How does information integrity impact cybersecurity measures?", "8eebfbca-8240-4861-bd05-6a53ad7ccd8f": "What are the latest advancements in information integrity research?", "3ceb3870-78e8-4ec0-9491-241fccb93ec5": "Why is information integrity crucial for data security?", "7d63dee3-6d48-4393-8be6-8480ce6ced6d": "What role does artificial intelligence play in information integrity?", "e027c23e-9be3-40d9-a43f-99b9cc85df42": "How can organizations improve their information integrity practices?", "eaf96d9f-b112-43f1-ac2a-15c56c9e2ed8": "What are the challenges faced in information integrity research?", "4752b74b-3b45-4293-84a6-9129151862be": "How does blockchain technology contribute to information integrity?", "c98a9050-6d52-4601-8554-821c0659bcb8": "What are the best practices for maintaining information integrity in databases?", "015b7ed6-dfc9-419c-b332-8d22138cadfb": "How do regulatory standards influence information integrity research and development?", "b4881d3d-9b06-4fa0-94ca-6df2782ac2fd": "What are the minimum thresholds for performance or assurance criteria in deployment approval processes?", "c187cc72-ec40-4262-a0a8-dd0a7d8236a5": "How often should the reviewed processes and approval thresholds be updated to reflect GAI capabilities and risks?", "ad51c1b1-347d-4f10-b264-363ddcdfa938": "What is the significance of establishing a test plan and response policy before developing highly capable models?", "a7fc40f7-d119-4980-b911-ad106e77b51e": "How can one evaluate if a model may misuse CBRN information or capabilities?", "102600d4-12bd-4196-b867-7b5b9f746130": "What are the key components of a \"go/no-go\" policy in deployment approval?", "307c1ef5-e97b-48be-a89d-57c5db8c3e6b": "What procedures should be followed to ensure information security when dealing with CBRN information?", "761bbb97-b978-43a2-8f4c-50adc790fd0b": "How can confabulation in models be detected and mitigated during the deployment approval process?", "9e565cf7-9f25-43f9-9c04-55f723fcfde1": "What are the risks associated with dangerous, violent, or hateful content in highly capable models?", "b14dae2d-32f4-43d6-80c9-6f7752b5ddba": "What steps should be taken to periodically evaluate offensive cyber capabilities in models?", "b976da61-57a7-4e5f-a8f0-1328f4cd25f4": "How can one ensure that the test plan and response policy are effective in preventing misuse of CBRN information?", "a21ab356-0089-4dbf-b788-2b90e85ee801": "How do remote proctoring AI systems impact students with disabilities?", "b444448c-39c6-4825-a428-c35b6541fd7a": "What are the concerns of the National Disabled Law Students Association regarding AI in remote proctoring?", "af11b4d9-49c2-4c1e-a752-6ee9983d2405": "How do healthcare algorithms contribute to racial disparities in patient care?", "667c8aaf-0338-4557-83a6-11d3be7ef5b8": "Why do AI systems in healthcare assign lower scores to Black patients compared to white patients with similar conditions?", "51f1057f-cd68-4ea1-bf25-3d2e6c150aad": "What are the implications of using sociodemographic variables in clinical algorithms?", "efd5daa3-ad2f-403d-9faa-cfc0f273c6bc": "How can AI in healthcare be improved to avoid race-based health inequities?", "cb42a96f-6b88-4a1d-933c-4b66bf56803e": "What specific disability-specific access needs might cause AI proctoring systems to flag students as suspicious?", "54e478ee-901e-4bfd-b77b-ae6dbf060187": "How do clinical decision-making algorithms affect healthcare outcomes for different racial groups?", "0c29d394-6d79-42ea-ac56-98ef3e0c1ddd": "What steps can be taken to ensure AI systems do not discriminate against individuals with disabilities?", "334d816c-6309-49be-8b84-552c577f1ecc": "How do sociodemographic adjustments in healthcare algorithms impact patient treatment and outcomes?", "ef292182-a80e-4c2e-8112-92ec138b322a": "What mechanisms are typically used to supersede or deactivate AI systems that are not performing as intended?", "9a613a68-8a96-44f3-924a-fd86cb2bd642": "How can responsibilities be effectively assigned and understood for managing AI system deactivation?", "21861de8-59d7-4dbe-a656-594afc0c20c0": "What are the key components of a communication plan for informing stakeholders about AI system deactivation?", "1744f37c-51db-4c5a-b1a0-80edb5bd1d47": "Why is it important to inform AI stakeholders about the reasons for deactivating a specific GAI system?", "4bc70be6-98e2-4044-81b3-a337497b444d": "What are some common workarounds provided to users when an AI system is deactivated?", "73b8b746-f1dc-41ac-8aec-5465b68d6593": "How should user access be managed during the deactivation or disengagement of an AI system?", "55b0124a-dcc6-42f6-b033-337a237adf54": "What alternative processes can be implemented when an AI system is deactivated?", "96eddcb3-606d-4fcf-8ecb-1aabb6410965": "What contact information should be included in communication plans for AI system deactivation?", "7c0b2c60-6f69-4f22-bb40-b69c5ea6020d": "How can organizations ensure that open-source AI models are properly deactivated or disengaged?", "fc8a6a87-f230-4bb1-bcbc-b84d5e624924": "What are the risks associated with not having a proper deactivation plan for AI systems?", "586aeb54-40fd-4a75-a7c0-246e11eb3ae8": "What are the common causes of sensitive data leaks in organizations?", "aa626c2d-93f9-4ce6-a685-92c1a419dc4e": "How can companies ensure that their data sharing practices do not present a sensitive data risk?", "3e4a7bfc-a423-403b-aecb-603a13cbe16b": "What are the best practices for conducting ethical pre-reviews of data?", "dccc7390-abc6-409c-835a-9ebe4f7d0b1d": "How should organizations report the outcomes of their ethical pre-reviews?", "731906bd-9376-465c-885b-47262c0ac060": "What types of data are most commonly sold, shared, or made public by companies?", "a869d0fb-d027-4edc-be16-ca89dad5107d": "How can organizations assess whether their data sharing practices are ethical and safe?", "05fba6f9-2740-4b27-beb4-8b707e3fa913": "What ongoing risk identification and management procedures should companies implement?", "14a3553d-b4f0-4a82-82fd-f4ded8de7679": "How can companies mitigate risks associated with data sharing and public disclosure?", "53598710-70e3-4180-bfa6-20045b17b328": "What are the key components of a clear and machine-readable data reporting format?", "d156ee61-a192-4285-85eb-3e0f4cd4eb13": "How often should organizations update their risk management procedures to address new threats?", "6aceb768-3368-49cb-90f3-790b8d4131a3": "What are the main techniques used for provenance data tracking in digital content?", "13caf8a9-5882-4525-8431-f068b49ab01c": "How does digital watermarking help in tracking the authenticity of digital content?", "0a4c99ed-0f3b-4ec0-b9c8-91d7a35b593c": "What role does metadata recording play in provenance data tracking?", "7a7a8144-8fa6-4ca8-a505-9a5781f7850e": "Can digital fingerprinting be used to verify the integrity of synthetic content?", "cb3616bd-f4d8-4795-866d-04c8fe1a8b49": "How does human authentication contribute to provenance data tracking?", "77e9d110-209d-445e-8b11-16e0f2209022": "What is the importance of tracking the origin and history of data inputs in GAI systems?", "ad18f30e-051e-4a17-aa6a-1192c3cac7a8": "How can provenance data tracking assist AI actors who lack full visibility across the lifecycle?", "4d0345e3-6423-4930-96b7-564bf85f7af2": "What are the differences between overt and covert digital watermarks?", "af6f7cb4-a265-408b-a84b-4b3fee1fde06": "How does provenance data tracking help in protecting intellectual property rights?", "0718d3de-69b5-45f0-942f-79426b4d5ad4": "What are some potential manipulations that provenance data tracking can detect in digital content?", "3e3a23a7-0b1f-4b74-85bd-a73370b108e6": "What are the main findings of Zhang et al (2023) regarding human favoritism and AI aversion in persuasive content generation?", "c4cf1592-0435-4ae4-8a4c-c5ecb9257eae": "How do people perceive generative AI compared to human experts according to the study by Zhang et al (2023)?", "d6c51c91-05a7-402e-893e-5fc29584968b": "What is the significance of human-GAI collaboration in persuasive content generation as discussed by Zhang et al (2023)?", "e0587b4e-ddbf-4754-8557-bd12c8e644da": "Can you explain the concept of \"hallucination\" in large language models as surveyed by Zhang et al (2023)?", "2544321b-4f79-413c-9a51-2b0b5346a1b3": "What are the key points from the survey on hallucination in large language models by Zhang et al (2023)?", "9011cc3e-7c14-4371-99d2-06a06ec529e7": "How does the study by Zhao et al (2023) propose to implement robust watermarking for AI-generated text?", "0048257b-c29b-4594-829c-28fc43d6a999": "What are the benefits of provable robust watermarking in AI-generated text according to Zhao et al (2023)?", "99ba8f01-a9f5-4945-bc6b-2c290ee16f8f": "How does the research by Zhao et al (2023) contribute to the field of AI-generated content security?", "78f31d73-92bf-4caf-ade9-8bed953bdfa5": "What methods are suggested by Zhao et al (2023) for ensuring the robustness of watermarks in AI-generated text?", "33b17f33-82da-409e-8c46-2ba705142a99": "How do the findings of Zhang et al (2023) and Zhao et al (2023) complement each other in the context of AI-generated content?", "8d45f0a9-2470-406e-845a-42ae6faefaaf": "What is the NIST AI 600-1 publication about?", "e1b36541-9713-4537-8ae0-68c508ee31cb": "How does the NIST Trustworthy and Responsible AI framework address AI risks?", "9761ef9d-05ad-4eb6-84fe-a3e40e15721f": "Where can I access the NIST AI 600-1 document for free?", "6f912bc7-a00d-4201-a5ed-67ad675fb196": "What are the key components of the NIST Artificial Intelligence Risk Management Framework?", "d653daf1-bc71-4825-b8c2-d7a2bbbad861": "How does the Generative Artificial Intelligence Profile fit into the NIST AI 600-1 framework?", "c65c29ab-b699-4074-98c3-1d194961096b": "What are the main objectives of the NIST Trustworthy and Responsible AI guidelines?", "2a5dd66d-4d91-48e5-b4d1-0e97ead849bc": "How can organizations implement the NIST AI 600-1 framework in their AI projects?", "7ce28472-4d89-4da8-9e89-4fc46c2a13c5": "What are the benefits of using the NIST AI 600-1 framework for AI risk management?", "afa13ecd-3449-42b8-90ba-ea5b76fb4649": "Are there any case studies or examples included in the NIST AI 600-1 publication?", "83bfcfa0-0f58-42c0-b0fe-8d24efe51b48": "How does the NIST AI 600-1 framework ensure the ethical use of AI technologies?", "efd3986d-bd53-4b00-8759-234ee59b5935": "What is the new initiative announced by the Justice Department to combat redlining in 2021?", "b9552078-c814-4194-ad10-5170b3d76f76": "What are the key objectives of the PAVE Interagency Task Force on Property Appraisal and Valuation Equity?", "24d775f9-adea-4036-a91b-2b8dbdf09f8d": "How does the PAVE Action Plan aim to close the racial wealth gap?", "dbef7a7c-d59a-44a9-85d0-0922c437187c": "What are the main concerns addressed by the EEOC regarding the use of software, algorithms, and AI in assessing job applicants and employees?", "06d81d53-f1b8-45b1-9d99-aad6159cf2f1": "How does the Americans with Disabilities Act relate to the use of AI in hiring processes?", "3e1a2639-ad92-41c2-b64f-74852b3978d5": "What guidance has the US Department of Justice provided on algorithms, artificial intelligence, and disability discrimination in hiring?", "137541ed-d9cd-4d7e-b4eb-09e87ee778a6": "What are the potential risks of using AI and algorithms in employment decisions according to the EEOC?", "f54b5a57-e54a-4eb4-b5ea-3861f01c7a4a": "How does the PAVE Action Plan propose to address mis-valuations for families and communities of color?", "dc89246f-d70d-485b-a723-08770bd02f19": "What steps are being taken by the Justice Department to address redlining practices?", "9a719c6a-76c4-4af6-ae8c-1028bbe8568e": "How can employers ensure compliance with the ADA when using AI and algorithms in their hiring processes?", "2f7c682f-8e96-43da-8397-67ce42d16885": "What are the main concerns regarding the use of biometric technologies in the public sector?", "fdb90e56-c634-47ef-8f82-cf244064da4f": "How does the private sector utilize biometric technologies differently from the public sector?", "03bc26cf-d584-4f6c-952f-6da051e2ec27": "What were the key findings of the OSTP\u2019s Request for Information on biometric technologies?", "b36444b8-7e2e-4cc0-923b-d70acd084d36": "How does the National Artificial Intelligence Initiative Office contribute to the governance of biometric technologies?", "a8d11191-6908-46c0-9ccd-5526ac9ac777": "What are the potential privacy issues associated with biometric technologies?", "4e4d21bb-0a0d-49fd-948f-d1510ecdda24": "How can biometric technologies improve security in public and private sectors?", "4ba397c0-1b13-4613-9e30-01238dcdb761": "What are the ethical considerations in the deployment of biometric technologies?", "c38be416-9682-4d97-8adc-600dfa5d5218": "How do public opinions vary on the use of biometric technologies in different sectors?", "d8c53a02-255b-4261-813a-b50fa08cd969": "What are the recommendations from the Science and Technology Policy Institute regarding biometric technologies?", "7568d3e5-b407-4b82-a31a-2d7048871862": "How is the governance of biometric technologies evolving in response to public input?", "fe6be310-abb6-4d17-b332-42b573870741": "What are the key expectations for automated systems in terms of accessibility?", "4da20bc3-35b4-40d8-8699-3a01d413ac86": "How can organizations ensure that their automated systems are accessible to people with disabilities?", "e3b5fbce-4994-4bee-bfde-419e661a8a25": "What types of disabilities should be considered when designing automated systems?", "e6f6d4ff-5bc4-4809-aa55-4639adf8fbe5": "What are some relevant accessibility standards that should be adhered to during the development of automated systems?", "f4c81b62-7898-4380-bbe0-2e9ef2568424": "Why is user experience research important before and after deploying automated systems?", "1270cceb-5232-47d9-b785-280f19ce1fb4": "How can organizations identify and address accessibility barriers in automated systems?", "77d31311-acc9-4a0a-9ff7-8cff6cda43c8": "What is the importance of disparity assessment in automated systems?", "6409d026-b4de-4585-b70c-a6aa6203ecdb": "How should automated systems be tested to ensure they do not produce disparities?", "e6682ed3-e17b-47ae-9d07-f50e8f6c2df3": "What measures can be taken during pre-deployment testing to assess disparities in automated systems?", "13d62abf-9a46-4079-9a23-4ed18c3d22a9": "How can in-context deployment testing help in identifying disparities in automated systems?", "e4777246-3922-49ed-a2cb-5205495a2534": "What is the definition of equity in the context of fair treatment?", "0f7bdfbc-7ab6-4861-8efd-a3729e4128ad": "How does equity address the needs of underserved communities?", "3fbd4df7-eb9c-4760-afb0-2f341544e453": "Which groups are considered underserved communities in the context of equity?", "9861991b-cae5-4ecc-952d-0c2a6be10285": "How does equity ensure fair treatment for LGBTQI+ persons?", "4219fb8a-0ac8-4ecb-9d80-7ff321f4cd59": "What role does equity play in addressing systemic inequality?", "74b3d860-da7f-439b-8e61-5c69ac15c28b": "How are rural residents included in the concept of equity?", "bf920e1a-ff07-4d59-b892-8609aa91fdce": "What is meant by \"rights, opportunities, or access\" in this framework?", "19f1343a-292b-4bb5-8567-c39e272ec0ae": "How does equity relate to civil rights and civil liberties?", "8e6fc90c-607d-4e5f-8946-0f2b984a4975": "In what ways does equity impact older adults?", "e2f0625f-8931-45c4-b593-bb284f09ddd2": "How does the concept of equity address persistent poverty?", "7a5e1a08-397a-440e-8f6c-1af66ee702d0": "What are the key characteristics of trustworthy AI that should be integrated into organizational policies?", "75e53249-b463-44d5-83be-bdd902fc92ce": "How can organizations establish transparency policies for documenting the origin and history of training data in AI applications?", "aa76d60e-073a-408e-bf7f-44ee6a8d0a9b": "What are the risks associated with data privacy and information integrity in AI governance?", "66ee0108-3649-4abc-95cd-c102ba9c3451": "How can organizations balance the proprietary nature of training approaches with the need for digital content transparency?", "67c464c5-77f9-4875-8e31-8402a617f457": "What policies should be in place to evaluate the risk-relevant capabilities of AI before deployment?", "2b18ac77-f66e-449f-bb8c-641d6d6e522c": "How can organizations ensure the robustness of safety measures in AI applications through internal and external evaluations?", "a8055010-820e-4e2a-acbd-4363d93ffeef": "What are the specific risks related to CBRN information or capabilities in AI governance?", "b1c0e6a1-f07d-4e7e-a97c-2264522b8bcc": "How can organizations determine the needed level of risk management activities based on their risk tolerance?", "421c1f70-89d3-45a7-92c0-6b6fd7393f78": "What are the best practices for integrating trustworthy AI characteristics into organizational processes and procedures?", "41710f51-0c31-4835-a239-bba06e0b5003": "How can organizations manage intellectual property risks in the context of AI governance?", "e9ef61d9-acfb-4295-8166-81d2d8e29cd1": "What is the Department of Defense Responsible Artificial Intelligence Strategy and Implementation Pathway?", "63251280-da3b-4d49-9e3b-8b79726b8e55": "Where can I find the Department of Defense's AI strategy document released in June 2022?", "c739a52b-1410-4168-82eb-52163e1c9cc2": "What are the key principles of Artificial Intelligence ethics for the Intelligence Community according to the Director of National Intelligence?", "7da3dff9-04d3-4cea-814c-cd1c71e8f2fa": "How does the Department of Defense plan to implement responsible AI practices?", "70a71142-ccc1-46c3-85cb-f7e5573d1ea0": "What ethical guidelines has the Director of National Intelligence set for AI in the Intelligence Community?", "b1801b81-b269-4946-9f2f-f3bd71107763": "Can you provide a summary of the Department of Defense's AI strategy and implementation pathway?", "22b9c165-4460-449c-a296-6369c392e632": "What are the main objectives of the Department of Defense's AI strategy released in June 2022?", "f40a3247-588b-4f4a-8c27-0234b1af2a95": "How does the Intelligence Community ensure ethical use of AI according to the DNI's principles?", "55315bb2-f065-44ae-bc39-ffa0fe34eb65": "What are the challenges mentioned in the Department of Defense's AI strategy document?", "f2e63c22-7347-47c9-b0a7-ff7f363ad3c1": "How can I access the full text of the Department of Defense Responsible Artificial Intelligence Strategy and Implementation Pathway?", "9128749a-0d1d-42e5-94c3-9b820c40125b": "How does automated test proctoring software discriminate against disabled students?", "aab3924b-582c-4dc6-9118-49c1c8b51b5a": "What are the main findings of Ziad Obermeyer's study on racial bias in health management algorithms?", "aa7f061b-c839-40ea-ba44-f24f4ab55467": "What are some examples of discrimination faced by disabled students using automated test proctoring software?", "bd8aaa1f-1382-47e3-9ce1-09f9e7164ebc": "How can automated test proctoring software be improved to be more inclusive for disabled students?", "048678e4-9c45-4540-a0c6-72c404e2106b": "What impact does racial bias in health management algorithms have on patient care?", "51ba72f1-b32c-479c-820e-2d02252ecc21": "Are there any legal protections for disabled students against discrimination by automated test proctoring software?", "4ec83564-814d-4175-8f0f-704d3e58d2f7": "What methods were used in the study by Ziad Obermeyer to identify racial bias in health algorithms?", "3569ca2c-24e6-4785-ba6e-317a3d302202": "How prevalent is the issue of discrimination in automated test proctoring software?", "2869107a-ce0c-41b3-8508-c60d12c31fe7": "What are the ethical implications of using biased algorithms in healthcare management?", "cc253ba2-15d7-47dc-a233-8bc4a26cdedc": "What steps can educational institutions take to ensure fair testing conditions for disabled students using automated proctoring software?", "153cebaa-730a-43c5-99b7-10c18bec1538": "What are the best practices for managing risks associated with Generative AI (GAI)?", "dba1f4e2-98a6-4d42-b384-1a54e1d6ad1c": "How can organizations govern risks across various stages of the AI lifecycle?", "becc940f-e73e-4aa7-8581-f1ba6def41f0": "What are the common risks associated with the use of large language models (LLMs)?", "7a90fb5d-f437-4b59-9931-c0af0ec35339": "How can cross-sectoral profiles help in managing AI risks?", "43e9e772-adf1-40b7-9b1a-9fc44772e900": "What are some suggested actions for organizations to manage risks in AI?", "c2a5af10-b86f-4924-8010-b80c2782b006": "How does the AI RMF profile help in mapping and measuring AI risks?", "6e36882c-0b7c-4f22-a9d0-560ddc1859f5": "What are the novel risks introduced by Generative AI?", "7e0b3a69-208b-4140-8f56-b1041396d409": "How can cloud-based services impact AI risk management?", "7968a9ac-bfaf-4a59-bb35-1125136d6a96": "What are the risk management priorities for Generative AI?", "c942dcb0-ebf6-4dc8-aef1-d3b7642ef3c5": "How can organizations use AI RMF profiles to manage risks in business processes?", "aab11cf2-1200-420c-a291-040945c00aad": "What are the best techniques for managing statistical biases in GAI content provenance?", "96ade2c4-dc96-4d8d-ab36-a0860916d265": "How can re-sampling and re-weighting help in reducing biases in AI evaluations?", "1b1d4db6-e59f-4967-9a3b-17ad799ac4b4": "What is adversarial training and how does it mitigate biases in AI systems?", "ca0cdce9-6039-460a-8ebc-0d2da442b1a8": "How should content provenance data be documented to ensure privacy and security?", "0df51587-f84b-43eb-9c03-341d92892edd": "What are effective methods for anonymizing data to protect the privacy of human subjects in AI evaluations?", "add464c4-0e7d-4348-91dc-15a25c5a309d": "How can privacy output filters be leveraged to enhance data privacy in AI systems?", "36e32c02-9ee0-485c-8d4c-110273810e28": "What steps should be taken to remove personally identifiable information (PII) from AI datasets?", "877d24bb-418d-4075-b1e9-a626881ec675": "What are the risks associated with not managing statistical biases in AI content provenance?", "3da8d02e-d37f-4c4d-a06c-1f61edd78d4b": "How can human subjects withdraw their participation or revoke their consent in AI evaluations?", "841e4207-02f3-4f15-b3cc-0574eccf296b": "What are the potential harms of not protecting human subjects' privacy in AI evaluations?", "b4cfa32e-3647-4808-9718-9b77c8956ff5": "What is the importance of assessing notice clarity and timeliness in user notifications?", "34a1099f-5eb1-4cdb-bd14-0904a0d6883c": "How can the validity and accessibility of explanations be evaluated effectively?", "baf6bb09-9e41-48d9-b4c6-1d06d90b9bb2": "Why is it crucial to assess the level of risk in system impacts and inferences?", "fd7da839-a0e8-4f92-a7fd-2e894b6e5c04": "What are the best practices for tailoring explanations to different recipients and purposes?", "a81b08ef-f745-43dc-8405-02f77e2ebee0": "How can individualized profile information be made readily available to users?", "a3e89095-24b0-4e5b-957d-ac24b193a418": "What methods can be used to ensure that reporting is clear, plain, and machine-readable?", "bf278d53-8d4e-44d1-a215-6464076e0e89": "How does the assessment of notice clarity impact user trust and understanding?", "52b91d4f-d324-4333-8d00-e513e1af51b5": "What factors should be considered when evaluating the timeliness of notifications?", "e8955df1-dc4d-47af-b782-993b5b795595": "Why is it important to provide explanations for system impacts or inferences to users?", "5cbad06d-9274-443a-8465-7a7347fd1bb8": "How can organizations ensure that their explanations are tailored to the appropriate level of risk?", "098c6a18-502f-48ec-999e-13d995b28661": "What is the Blueprint for an AI Bill of Rights?", "699f9227-0a04-4b34-9fbb-bcdabe65806c": "How does the Blueprint for an AI Bill of Rights inform policy decisions?", "da24d51e-024a-422a-976c-cbe53754d0ff": "What is the role of the White House Office of Science and Technology Policy in the context of AI?", "d984a686-20d3-4998-bddb-d15ef2a55d67": "How does the national values statement and toolkit guide the responsible use of automated systems?", "7769d886-01e5-43d6-8808-a34ed6787c48": "What are the key principles and frameworks published by consortia for automated systems?", "3f2e20af-2d2f-40e2-9c9d-5f447f5fd470": "How does the framework address sector-specific privacy laws and oversight requirements?", "8cc369e6-5dbf-4195-bdc7-59d477935e01": "What kind of input has the White House Office of Science and Technology Policy sought from the public?", "2a846355-ce0a-4a42-bf39-c58e28255baf": "How are impacted communities and industry stakeholders involved in the process of addressing algorithmic harms?", "97341ca2-a8ed-46fd-8b73-27cf49a776db": "What are the potential remedies for algorithmic and data-driven harms discussed by the White House Office of Science and Technology Policy?", "ed70d285-bd85-4086-a63a-22c25a560082": "How can policymakers use the national values statement and toolkit in the technological design process?", "8c7fb32c-c935-4651-8283-e8c472a0b365": "What are the key civil rights and liberties protected under the Blueprint for an AI Bill of Rights?", "78c6ff01-8630-4cd4-a808-4a934eacb25f": "How does the Blueprint for an AI Bill of Rights address issues of unlawful surveillance?", "cd574726-80a1-47a9-b260-413ed3f52311": "What measures are suggested to ensure equitable access to education through automated systems?", "85722140-1b7b-46da-9ec6-432923b12175": "How does the Blueprint propose to protect individuals from discrimination in employment using AI?", "eded288b-f225-4e3b-be1d-6732eeceebb3": "What guidelines are provided for ensuring privacy in both public and private sector contexts?", "d91e1537-de8d-491c-b125-765de2f2afe8": "How does the Blueprint for an AI Bill of Rights ensure access to critical healthcare services?", "c91b6fc5-1d16-48e6-b88c-8dd1e3af6559": "What are the recommended practices for providing non-deceptive information about goods and services?", "3cd4fd6f-9208-4ceb-a69d-dc920993a98d": "How does the Blueprint address the issue of excessive punishment in the context of automated systems?", "e138b6b8-2968-4bab-8d4a-edca29460bcd": "What protections are in place for voting rights within the Blueprint for an AI Bill of Rights?", "1c59e241-7137-41cc-946d-c1cb0d30a4b1": "How does the Blueprint ensure fair access to financial services through automated systems?", "a4b6ef42-b7bd-444a-970f-c4708415a0ea": "What is NIST Dioptra and how is it used to evaluate GAI trustworthy characteristics?", "6b536f29-aa04-4434-b133-bb4e20ecd6f1": "How can data privacy be ensured when using AI systems in CBRN information or capabilities?", "bde91ad8-b00a-4c6b-ab35-cbc33641a8a4": "What are the risks associated with confabulation in AI systems?", "6fdb5ca1-7096-4613-ad70-ef413dcf51b2": "How can information integrity be maintained in AI deployments?", "5d29075f-46e0-4828-b712-657535007fca": "What measures can be taken to prevent harmful bias and homogenization in AI systems?", "3b68fd1d-60d7-4479-bc67-e72dc34b2baf": "What are the key tasks involved in AI deployment and TEVV (Test, Evaluation, Validation, and Verification)?", "b8bcfc18-dfb7-45f2-94fb-686ffb671400": "How can the limitations of generalizability of AI systems be documented effectively?", "448bc715-abc3-4d6f-a386-a2e6c48b434f": "Why is it important to avoid extrapolating GAI system performance from narrow assessments?", "35077bce-fc70-4f73-9e85-bd50ea0b4421": "How can human domain knowledge improve GAI system performance?", "b299ae9b-dd09-4132-a8d7-685ac966b912": "What are some methods to document the use of human domain knowledge in enhancing AI systems?", "26969f38-1ae0-4596-8d89-e2796ab42cc5": "What are the trade-offs involved in early-stage model decisions for AI systems?", "4fcf7347-858d-4761-95f8-19fb1af11339": "How does selecting a watermarking model for robustness impact computational complexity?", "589055f4-8d10-4b06-b68f-9cfd4048cefe": "What is the importance of tracking the provenance of training data in GAI systems?", "40defdf1-6880-44b7-a1ac-b88f466b074a": "How can documenting provenance data limitations benefit GAI systems?", "8f3e8e77-90af-4d0d-8dd3-5d5f2e614d74": "What are the cascading impacts of early-stage model decisions on downstream performance?", "ca4886da-af0a-420f-b881-1207c2a9181b": "Why is it important to manage organizational risk in enhancing content provenance?", "c13ec0d2-6f92-4b7d-8743-62c761213c6e": "How does prioritizing robustness in watermarking models affect other aspects of AI performance?", "b1ec710f-242e-4b53-a427-76970b24a311": "What methods can be used to track the provenance of metadata in GAI systems?", "edc582fd-2c53-4a88-ac44-3c229de7085a": "What are the potential risks of not documenting provenance data limitations in GAI systems?", "f9e0f746-0407-4024-882b-ca132aac8938": "How can organizations balance the trade-offs between robustness and computational complexity in AI models?", "547e953c-3c15-4ee0-b243-3c2761a19028": "What are the main civil rights concerns discussed in the Shared Statement Of Civil Rights Concerns document from July 30, 2018?", "8787fe19-eb36-4f63-8355-085d09e9c966": "What is the focus of Idaho Legislature's House Bill 118 passed on July 1, 2019?", "4558a005-edd2-4a8a-a3a1-96e8a91bdc99": "What are the key findings of the Executive Office of the President's report on Big Data and algorithmic systems from May 2016?", "905133de-f72a-42a8-9a2f-e194bee80d01": "What are the main arguments presented in Cathy O\u2019Neil's book \"Weapons of Math Destruction\"?", "0093d7ea-7e1a-4cad-a33f-26434d5e9417": "What is the central theme of Ruha Benjamin's book \"Race After Technology: Abolitionist Tools for the New Jim Code\"?", "d802150d-0458-44ad-94e6-d48a2bafddc7": "How has facial recognition technology led to wrongful arrests, as discussed by Kashmir Hill?", "3dec342d-89f1-48ea-b474-124dabac2778": "What are the potential civil rights implications of pretrial risk assessments?", "26e9fdf4-b265-44ac-aab5-a4811434f00b": "How does the use of big data in criminal justice impact civil rights, according to the 2016 report by the Executive Office of the President?", "e9a896b0-946f-4635-90a0-2abae8017e85": "What are some examples of algorithmic discrimination mentioned in the provided context?", "2a1a746c-d31f-4062-9339-f652ee1cd0fc": "How do the books \"Weapons of Math Destruction\" and \"Race After Technology\" address the issue of racial bias in technology?", "4adc70dc-e01b-4a8d-bd8c-adef4690717c": "What are the benefits of having a human fallback system in automated processes?", "81c46268-ad7f-45a4-ab44-b39eec645df2": "How can users with disabilities request reasonable accommodations in automated systems?", "6c21060a-4d83-46b3-a846-697b9a09c91b": "Why is it important to test automated systems for accessibility for users with disabilities?", "7753fc6c-a2dd-4e2f-9694-1ad7f0d6c64f": "What mechanisms can be implemented to ensure human fallback is not burdensome?", "ed7e713d-1382-464c-aaa9-eb64bdc88fbc": "How can companies ensure that their automated systems are accessible to all users?", "26951caa-8dbb-4149-81de-40d042c190ba": "What are some common issues users face with automated systems that require human assistance?", "19fd9fc0-5705-4c8f-9471-301741097255": "How can human consideration improve the user experience in automated systems?", "1d8d1efe-bee3-41c4-a9a1-549978df9821": "What steps can be taken to make human fallback mechanisms more convenient?", "c886eb12-d921-44b8-8452-f1f5bc1225f4": "How do automated systems typically handle requests for reasonable accommodations?", "0a84141f-d8b5-468a-a16b-93a9a0a47587": "What are the challenges in balancing automated systems with human fallback options?", "42804d52-5b54-4e4a-aa0b-0f295b8b73bb": "How can systems be tested to ensure they handle inappropriate or malicious queries effectively?", "beed6021-c77f-4dd5-b636-dddc90934522": "What are the best practices for evaluating AI system vulnerabilities regularly?", "7f734e3f-9eb5-4d60-99e6-c12b8c79fe06": "How can AI systems be protected from being used for extortion or targeted impersonation?", "b5c02363-613b-4083-a3a5-ae724917ecbb": "What methods can be used to prevent AI systems from facilitating cyber-attacks?", "84e252fa-dec3-4ec1-9b3d-a0e88f6a8b55": "How often should safety measures in AI systems be reviewed and updated?", "fd9556ae-448f-4d3d-af10-8089d6b043b8": "What role do domain experts play in the operation and monitoring of AI systems?", "e580a445-024f-4b81-a591-c79fa6f7fb0a": "How can AI deployment be managed to minimize the risk of illegal usage?", "ee7f5729-19f1-41d3-ba40-12390626207d": "What are the key components of an AI impact assessment?", "0b0819ad-858f-4cc2-9a1c-41f9c77d93d6": "How can TEVV (Test, Evaluation, Validation, and Verification) be applied to ensure AI system security?", "3850de5f-db60-43a0-94d4-63536424c707": "What strategies can be implemented to prevent the circumvention of safety measures in AI systems?", "dcec4fe7-764a-4c8a-b07f-90fcbb7708ba": "What are examples of time-critical systems in various industries?", "e9fb9f53-24c3-4408-8e9d-508409b7f92e": "How can organizations ensure that new decisions are effectively enacted in automated systems?", "1c6c655d-fa19-4fa2-8143-ea52a0a44611": "What safeguards can be put in place to prevent future errors in automated decision-making systems?", "97179ce5-9493-4f29-90be-01818f234c9c": "How do automated systems in healthcare impact patient safety and care?", "10be3d49-59a6-4675-8216-5d2a7bb76336": "What are the consequences of errors in automated systems that control financial penalties?", "1b29da2d-0fb6-41d6-8e7a-38493f5b169f": "How can voting-related systems be protected from errors and ensure accurate results?", "43d4dced-d40b-4a55-9481-6f383f7be02f": "What processes should be in place for human decision-makers to reassess and overrule automated decisions?", "5f442e2b-07cc-469f-ac6d-9e468c48364d": "How can automated building access systems be made more secure and reliable?", "ed225928-3594-4648-8270-ea357b99ac8f": "What are the best practices for maintaining human consideration and fallback processes in automated systems?", "2ac2ed73-c6e1-4af9-8a79-5c62a839798c": "How can organizations overturn previous repercussions from incorrect automated decisions?", "fb32bc70-21c5-4841-beea-62de5c884172": "What are the key components to include in a service level agreement (SLA) for GAI systems?", "a012d748-c92a-4b5c-ba2d-b54a3914756e": "How do you ensure content ownership and usage rights are clearly defined in contracts for GAI systems?", "37d2418c-90ce-43c2-809f-d6c0c96ef7c3": "What quality standards should be specified in SLAs for GAI systems?", "8af40417-015a-48e0-965b-fc636209e614": "How can security requirements be effectively integrated into contracts for GAI systems?", "805a7e12-3609-48b5-8320-157d4b583f8e": "What are the best practices for maintaining information integrity in GAI systems?", "baa6b25d-2b92-4536-9b88-465f65d6d2fe": "How do you address content provenance expectations in SLAs for GAI systems?", "4e4d7968-ecd4-4428-9b66-54567b3811a3": "What are the common challenges in drafting contracts for GAI systems?", "7e5eef6e-1818-4a3c-941b-7cbfd29ab0a4": "How can intellectual property rights be protected in GAI system agreements?", "4ed36e1b-43e7-48b4-a4cc-ef3087d2246b": "What role does information security play in the value chain of GAI systems?", "5d208543-8d35-4012-8c41-f13b2978eae6": "How do you ensure compliance with information integrity standards in GAI systems?", "a8dacfd0-ba17-40c9-b350-208d2e297cd2": "What are the best practices for conducting diligence on training data to assess intellectual property risks?", "32da3eaf-6181-4aab-a4f0-41e9f8e2c415": "How can AI actors ensure that the use of proprietary training data complies with applicable laws?", "f713140e-cb77-498a-b734-2e3226c38cfa": "What are the key considerations for assessing privacy risks in AI training data?", "5aa920d1-a55e-40d6-99a9-e31c968672cf": "How can organizations document the likelihood and magnitude of AI system impacts?", "3ebe14d9-c0f7-44b2-afb8-cd0da676e8be": "What methods can be used to gather feedback from external sources about AI system impacts?", "32b1ece3-14d1-4487-853e-c72b7d522734": "What are TEVV practices, and how do they apply to content provenance in AI systems?", "5dd34d24-f3ea-4aa7-b42b-0f26e197f095": "How can AI systems be probed for potential misuse or vulnerabilities in synthetic data generation?", "3f3496fe-0f67-405f-9bec-0181454b415d": "What are the governance and oversight tasks for AI actors in relation to training data use?", "98b6ca14-c6f1-4733-90a9-e8fcaac3e450": "How can past uses of AI systems inform the assessment of current AI system impacts?", "dd5071c9-0180-4916-b959-c6a07c56db03": "What are the risks associated with information integrity and security in AI systems?", "aa2070d5-b6bc-4073-a4cb-b2a83531da3c": "What is the role of the Office of Science and Technology Policy (OSTP) in coordinating science and technology policies?", "ca96d7d3-1320-4416-8cdd-7963833fa563": "How does the OSTP assist the Office of Management and Budget (OMB) with Federal research and development budgets?", "ab6fdd15-c2c9-4c64-9836-5410506b43a7": "What are the main topics covered by the OSTP in their policy coordination efforts?", "cc9ce0e0-cd3b-452e-97cd-67a557ba37e0": "What is the purpose of the Blueprint for an AI Bill of Rights published by the OSTP?", "31087be7-62d7-4c98-a630-dd1d81d1181b": "How does the Blueprint for an AI Bill of Rights aim to protect civil rights in the context of automated systems?", "8afd8443-7a29-410c-96b7-f41aea383947": "Is the Blueprint for an AI Bill of Rights considered US government policy?", "4a65f47c-1889-45a4-9ff3-a8bf32191dba": "What are the democratic values promoted by the Blueprint for an AI Bill of Rights in the governance of automated systems?", "8cda0b73-a4d4-4d29-9816-3f0cf0bbc8b4": "How does the OSTP provide scientific and technological analysis for the President?", "b25eb9c4-f4bf-4134-b0d3-e134b79ace2d": "What is the significance of the Blueprint for an AI Bill of Rights being non-binding?", "27b484ed-f229-4b08-be96-75c3abc5f8a3": "In what ways does the OSTP influence major policies, plans, and programs of the Federal Government?", "b3f70511-7f40-4e59-81c3-ea17760970fe": "What are the key characteristics of trustworthy AI?", "6541a0ad-b207-4b2d-a359-3650fafa8943": "How do energy and carbon emissions vary in GAI systems?", "669d0023-9a92-4a21-b449-8fa1a6af2b88": "What is the environmental impact of training a single transformer LLM?", "a31427f5-6815-40aa-9f6e-73e09d22a931": "How do generative tasks compare to discriminative tasks in terms of energy consumption and carbon emissions?", "7084c993-fcea-49d4-b9d7-2a857b9f45df": "What methods can be used to create smaller versions of trained AI models?", "e6cc306a-b275-43af-a0a7-799fe2f76eb9": "Why are generative tasks more energy- and carbon-intensive than non-generative tasks?", "f593dd60-a1e1-4ced-b812-c96907870b2a": "How does the hardware used affect the energy and carbon footprint of GAI systems?", "c8be6a16-64b0-4b54-98c7-079cb7ec7717": "What are the potential environmental impacts of maintaining and operating GAI systems?", "b5e8f4bf-d285-47ed-945d-af92d42d30ff": "How does the modality of content influence the energy consumption of GAI models?", "8e4c0306-cc08-4518-83d4-6f690dd17d67": "What is model distillation and how does it help in reducing the size of trained AI models?", "a380299e-40fa-430f-992f-328134b228ce": "What role do advisory boards play in ensuring the accountability of new technologies?", "7cfa0b20-7c5f-4732-bbfb-bb227472a8c9": "Why is transparency alone not sufficient to achieve accountability in technology development?", "fd5786c9-4268-4527-8e19-6afd150f1710": "How can compensated community input influence the design process of new technologies?", "2ce45131-027f-4fee-8260-1dc2b57ba04e": "What are some additional system needs for validity in technology development, according to panelists?", "8c04225d-b7a3-4ebc-825e-8b08770983d5": "Why is it important to include community input early in the design process of technology?", "e0ebe468-8026-4719-8e3c-8f244e4b01e9": "What types of regulations are necessary to limit the type and cost of new technologies?", "75212bde-5e1c-41e8-98c8-52a538a82b6a": "How can early community involvement impact the success of new technologies?", "e1c0318c-d858-4aed-b2c0-ef0e1203fda2": "What are the benefits of having advisory boards in the technology development process?", "a96ba846-68e3-499f-be1d-9a65c95f7103": "How can regulation help in achieving accountability in the use of new technologies?", "0ad0778a-9494-4e3e-af88-c4694c4eddcc": "What are the potential consequences of not including community input in the design process of technology?", "f974b90c-f825-4ac8-b640-a6316a1d595e": "What are the best practices for post-deployment AI system monitoring?", "1fbd890e-3037-4f08-8ca6-d2ccc5b997e6": "How can organizations effectively capture and evaluate input from AI system users?", "77cb5cc4-479c-46ed-b80f-f461753002f9": "What mechanisms are recommended for handling appeals and overrides in AI systems?", "6dba3c3f-8828-4ed8-a5aa-eacdf7a2c278": "What are the key considerations for decommissioning AI systems?", "d9669bfc-ca24-4606-a9da-8cdef4498029": "How should organizations prepare for incident response and recovery in AI systems?", "decf6dd1-9ab9-49ac-b195-232ac4b5b5fb": "What is the role of external researchers and industry experts in managing AI risks?", "e7207f21-6721-4ef3-9f8b-fcd88334e364": "How can harmful bias and homogenization be mitigated in AI systems?", "8db3af9c-f69b-43a8-8614-e16adacd3ea1": "What processes should be established for monitoring potential confabulation in AI systems?", "30fbacfb-b9b9-4404-99ca-aa7dd91fb39f": "How can sentiment analysis be used to gauge user sentiment regarding AI systems?", "491c9db9-eec0-45a7-8d35-3b865c6cf096": "What are the challenges in maintaining information security in post-deployment AI systems?", "d5096416-f388-46a8-ba7c-efe00591a79f": "What are some practical ways to reduce bias against Black patients in medical care?", "047f16f9-4bbb-46e2-bc04-02caae32fad3": "How does focusing on active chronic health conditions help reduce healthcare bias?", "cc1bd459-a505-4eb3-bb21-c7ab7b777125": "What are avoidable future costs related to emergency visits and hospitalization?", "6a4f1ce1-50e2-4a89-94f5-90f724c34e60": "What best practices have large employers developed to scrutinize hiring data and models?", "c758fef6-ab25-497e-8fcf-971551bff0b3": "What is the purpose of the Algorithmic Bias Safeguards for the Workforce initiative?", "a39fde86-c926-4df1-9cf1-2c15908c1429": "What specific technical questions are covered by the Algorithmic Bias Safeguards questionnaire?", "9c9c9015-c691-44e8-9284-f62ef5306243": "How can businesses proactively use the Algorithmic Bias Safeguards when procuring software?", "417326fd-a953-4bf5-a161-0e5892bb974e": "What are some biases that can be identified in the model training process?", "d89c4058-3ac5-421d-b581-879d16d5de44": "What mitigation steps can be employed to address biases in hiring algorithms?", "8e1c792b-4ced-4778-8949-6daa22e033fc": "How do standards organizations incorporate accessibility criteria into their guidelines?", "2ea244fd-8aca-4018-b8b0-223714fba699": "What are the key methods discussed in the Partnership on AI's glossary for synthetic media transparency?", "2fb245a9-6b49-454b-9ab8-d94912761aeb": "How does the paper \"Unsafe Diffusion\" address the generation of unsafe images and hateful memes from text-to-image models?", "dd48491b-a737-4616-be6a-745b00f5915b": "What strategies are proposed by Rafat et al (2023) to mitigate the carbon footprint in deep learning model compression?", "f027880e-a213-48a4-9139-9393e699642e": "What legal attitudes are explored in Said et al (2022) regarding the nonconsensual distribution of intimate images?", "c704d4cf-6b24-44ed-9113-aa42008d5f83": "How does the study by Sandbrink (2023) differentiate the risks of language models in the context of biological misuse?", "366bacdd-bb77-47f2-a39c-c7bf6169f8b7": "What are the main findings of Qu et al (2023) on the generation of unsafe images from text-to-image models?", "25533810-cb28-4c5b-9792-7183df9cc659": "How does the Partnership on AI suggest implementing indirect disclosure for synthetic media transparency?", "26e99d87-0a41-4952-ae1b-35a9302a5920": "What are the environmental implications of knowledge distillation in deep learning, according to Rafat et al (2023)?", "907ab49a-3d4c-4679-8038-f781b13ee780": "What role do legal attitudes play in the victimization and perpetration of nonconsensual distribution of intimate images, as discussed by Said et al (2022)?", "aaa30f81-e6c7-4103-8a63-152c74a2b091": "What are the potential risks associated with artificial intelligence in biological misuse, as highlighted by Sandbrink (2023)?", "b0182517-507f-402c-9d72-7b621f64e5b4": "What are the additional expectations for automated systems that handle sensitive data?", "1c9c675b-2d74-4673-9aa9-cb84ac14fa32": "How is sensitive data defined in the context of automated systems?", "6d42e83b-e788-4ab9-a002-cd545c1513d7": "What types of data are considered sensitive according to the provided context?", "b6530c27-37b3-4728-bac0-f9d4473dbdbf": "Why is it important for automated systems to meet additional expectations when dealing with sensitive data?", "9179505b-25f1-43ea-8329-1f120ce64c61": "How can sensitive data expose individuals to meaningful harm?", "d7d542fb-ce5a-4048-8f5f-b650b4c35bb2": "What are some examples of sensitive data mentioned in the context?", "e1052cc7-4c2d-41cb-92c3-998f3211584f": "How can automated systems infer sensitive data about an individual?", "f0f3d3f7-c4a4-4fb7-acfc-b992f475242a": "What are the potential risks associated with the misuse of sensitive data?", "235fe3e8-a203-480d-b7b7-9f411ea2d613": "How does the context define sensitive domains?", "46d01f66-a474-4e8e-ac51-c8d522ef006c": "What measures can be taken to protect sensitive data in automated systems?", "874107ea-6ed8-441f-968f-3704b981bf64": "What are the key capabilities and limitations of GAI systems in terms of digital content transparency?", "2cdd6080-bdc8-4e6c-b474-90b02b47b693": "How can training materials be developed to educate AI Actors and the public about the societal impacts of AI?", "a0fb147c-38c0-4fef-9b75-18e76cf8a420": "What role does diverse and inclusive content generation play in the context of AI systems?", "df1759f8-c9a0-4abf-9108-e0f81494dbaf": "How can feedback from operators, users, and impacted communities be effectively recorded and integrated?", "1cfd9941-237a-46a0-be9f-b60e2511126e": "What methods can be used to gather structured feedback about content provenance?", "4d85eb98-cd48-4fdc-80d4-2923f78551af": "How can user research studies, focus groups, or community forums be utilized to assess content quality and biases?", "3e96a9b6-2c8d-490f-906a-bf61de6087dd": "What are the best practices for seeking feedback on generated content quality and potential biases?", "55df2562-e98c-4baf-a911-341e5304f4a6": "How can the general awareness among end users and impacted communities about feedback channels be assessed?", "364d50e7-0d67-4aee-b2a4-4e278b9be1a9": "What are the implications of harmful bias and homogenization in AI-generated content?", "759c5df7-8c61-4805-b255-184f63b68e04": "How can AI deployment and monitoring be improved to address issues related to information integrity and harmful bias?", "cf41653e-3c18-45ed-a00a-19afbd790804": "What are the potential risks of insurers collecting data from social media for determining life insurance rates?", "8ae367c1-1ed9-4fe1-b0c5-25212e409675": "How can data breaches by data brokers lead to identity theft?", "624e6325-9549-4672-912d-2863775ecf64": "What are the privacy concerns associated with facial recognition systems in public housing authorities?", "c0e48c62-1775-4503-bc52-2b14d98033f4": "How does surveillance software used by companies to track employee discussions about union activity impact data privacy?", "8c95bd57-9819-4866-946c-34d2b7751cb5": "What measures can be taken to protect personal data from being harvested and exposed by data brokers?", "9134a063-6c7d-43f6-b9f4-d7e0712c9e59": "How does the use of facial recognition software by law enforcement affect community trust and privacy?", "afa24cd5-0d80-4e11-bb7f-28b1ef159c0d": "What are the ethical implications of insurers using social media data to determine insurance rates?", "07d3959c-fd15-441f-a064-d94b454936be": "How can individuals protect their personal data from being collected and misused by companies?", "accc5413-f9c8-492e-954c-db2935a83a8d": "What are the legal protections in place to prevent misuse of surveillance software in the workplace?", "154a4887-57d4-48b2-a0e2-f01ef4a05f25": "How can public housing authorities balance security needs with residents' privacy rights when using facial recognition technology?", "51d26025-f397-4282-ab4c-e614b1909136": "What are the key considerations when integrating third-party GAI models into an organization?", "e8ca0409-014b-43ca-aa66-18baf30c5856": "How can organizations manage the risks associated with using third-party GAI tools?", "618930f7-ea49-4631-8ec5-748399ab123e": "What are the implications of using third-party GAI systems for data privacy?", "0e830dc5-978e-40ef-b148-0701cdbac3f2": "How should organizations address intellectual property concerns with third-party GAI integrations?", "82ab0ec3-0073-41f1-a863-66c4ff3d8b4b": "What guidelines should be established for transparency in third-party GAI data usage?", "e774a66c-375b-45ad-8d72-28706054193e": "What risk controls are necessary for foundation models versus fine-tuned models in third-party GAI?", "babb4c10-950c-47c0-8a65-383a64edc068": "How can enhanced processes improve the management of third-party GAI tools?", "30032779-8c22-4bdf-9a98-a280bd457baa": "What are the legal implications of incorporating third-party GAI systems in an enterprise?", "490002ec-c8f2-49a0-aca4-451a75374463": "How can organizations ensure compliance when using third-party GAI-generated data?", "b9f8fe8e-7ff3-4431-89f5-73948788cb99": "What role does IT services play in managing third-party GAI integrations?", "787f8222-d352-49ea-b919-7db70989aa72": "What are the consequences of not providing explanations for errors?", "4172ceb7-f347-40ad-8c68-0fe2cc0a23e7": "How does the lack of explanation affect error correction?", "f4bfb069-4ab5-4b30-81a1-2ec30f5ec8ac": "Why is it important to explain errors when they occur?", "b419532f-ec57-4460-bf35-fcdb757ee380": "What are some methods to ensure timely error correction?", "a3239f78-3895-4866-83da-fa3a4f26ff43": "How can the absence of explanations impact overall productivity?", "a349e851-d625-49e2-a919-8040f988ec05": "What strategies can be implemented to improve error correction processes?", "055cba72-28db-40d2-99ce-d761f2ee85b1": "How does the lack of explanation for errors affect team communication?", "2bc860b5-65ed-47ff-800d-bace4aa01b70": "What are the best practices for documenting errors and their corrections?", "4b57ffb5-80b3-461f-a283-70e5cf3971bd": "How can organizations ensure that errors are corrected promptly?", "cbad517a-de7d-4da6-9786-47e6dd684d94": "What role does transparency play in error correction and prevention?", "2d9f919a-f792-4b1b-a10e-a0f578dfc7cf": "What are the main findings of Epstein et al (2023) in \"Art and the science of generative AI\"?", "476325e4-24b4-429b-9788-e84684fab8a1": "How does Feffer et al (2024) evaluate the effectiveness of red-teaming in generative AI?", "6d1dbac6-21e1-487b-b666-a788c3d485db": "What offensive security capabilities of large language models are discussed in Glazunov et al (2024) \"Project Naptime\"?", "58543296-0157-4a5a-9ddc-522f60ee6867": "What are the potential risks of indirect prompt injection in LLM-integrated applications according to Greshake et al (2023)?", "e9b7254b-b94e-4e18-a453-c58d8436079a": "How does Hagan (2024) propose to establish quality standards for AI legal help?", "6a69d169-7541-4d51-93a8-35fb4587ee8e": "What strategies does Haran (2023) suggest for securing LLM systems against prompt injection?", "ccca527c-b69c-4c41-b2ea-9aff9b44255f": "How does the study by Feffer et al (2024) contribute to the debate on whether red-teaming is a silver bullet or security theater?", "c66d08da-f096-41ae-8483-61507f48f6c0": "What are the implications of the findings in \"Not what you've signed up for\" by Greshake et al (2023) for real-world applications?", "a2e8cb0d-b35c-4690-bb95-38babd0775ce": "What methodologies were used in Glazunov et al (2024) to evaluate the offensive security capabilities of large language models?", "087fbc2e-973a-4823-936b-0d0243f5c688": "How does the research by Hagan (2024) differentiate between good and bad AI legal help?", "3bcb7050-a5e3-4025-86e9-b9c7a3eb2196": "What are the key responsibilities of AI Actors in monitoring reported issues in GAI systems?", "0bc4535d-2b67-4804-be77-c8734789e122": "How can AI Actors effectively evaluate GAI system performance using content provenance data tracking techniques?", "c73bd3d7-f752-4f5f-9676-e8acebd65d4c": "What steps should be taken to ensure prompt escalation of issues by AI Actors?", "fdd55fcd-d79b-4a87-ae51-09c717172f37": "What are the measurable activities for continual improvements in AI system updates?", "3102119a-0e38-44ee-bcfc-a58a411db0ef": "How often should regular engagement with interested parties, including relevant AI Actors, be conducted?", "7b74d871-b727-476b-99de-f23821860093": "What are the suggested actions for managing GAI risks related to harmful bias and homogenization?", "5ef4015c-16b3-4fc4-983b-5266130165bd": "How should regular monitoring of GAI systems be conducted and what should be included in the published reports?", "e587f1fa-cbd2-454a-85bb-44b757a0046f": "What are the best practices for following incident response plans for inappropriate or harmful content generation?", "48910454-45b0-4445-9366-f056ee60b60b": "How can processes be adapted based on findings from incident response plans in GAI systems?", "7c2d9a6a-5497-4fdd-a880-598d0303db6e": "What roles do human factors play in the operation and monitoring of AI systems?", "b252c530-ffef-4684-adb9-1fadc10df9a1": "What is a use-case based supplier risk assessment framework?", "0c235183-3b08-4ff7-9116-62f623af1175": "How can organizations monitor third-party entities' adherence to content provenance standards?", "3d779012-1e50-47b9-a393-b2536d277ca6": "What are the key components of value chain risk management?", "bb8eff08-42f2-4110-a840-121f230e4c81": "Why is it important to include clauses in contracts for evaluating third-party GAI processes?", "7dd59eab-597e-4402-bbde-5219259f1663": "How can organizations ensure information integrity when dealing with third-party entities?", "8a1a0e9b-c1d6-43a4-b808-6de57e66e46e": "What steps should be taken to inventory all third-party entities with access to organizational content?", "044efb34-2fe0-49fe-8893-7ccb84e34d86": "What are the benefits of establishing approved GAI technology and service provider lists?", "a3484e6b-9afe-4e3d-9b40-45c9b71a39f0": "How can maintaining records of changes to content by third parties promote content provenance?", "8fdd2306-5671-47b8-a6a4-613dcd55f9fd": "What types of metadata should be recorded to ensure content provenance?", "319e4f98-b5a8-4b4e-968d-98c42d92e7b2": "How does intellectual property relate to third-party content changes and provenance?", "76e21c77-5abd-4e59-8638-acbbb4e196f2": "What are the best practices for obtaining consent for the use of data in GAI applications?", "e92011ba-08e3-4f27-8a90-097f25c195ee": "How can anonymization help in protecting data privacy in AI applications?", "a127a266-08a2-4440-ad55-820b622a4116": "What is differential privacy and how does it enhance data security in AI systems?", "036f4f23-f219-4b3c-a996-f38ce4a3735b": "What are some privacy-enhancing technologies that can minimize risks in AI-generated content?", "b59f0589-8212-4bea-bd66-233cd0e150df": "How can AI developers ensure information integrity when using human data?", "4e38d0ae-2d12-48c0-b6ef-a1288551e7f3": "What are the key factors to consider in human-AI configuration for data privacy?", "b035b709-5421-4c16-9588-36f6df6dd376": "How should AI system performance be measured to ensure it meets deployment conditions?", "3e8e3d35-e9b4-4d82-8549-e0076fb93944": "What are the risks associated with linking AI-generated content back to individual human subjects?", "ac6ecaeb-d774-42e9-a706-23571e9ca244": "How can baseline model performance be used to improve AI systems with retrieval-augmented generation?", "642dbeb3-b3f9-425b-adf3-f3fd7101c7f2": "What methods can be used to empirically validate claims of AI model capabilities?", "41b113fc-3f56-4b6a-b606-bacc03b2a8b9": "What are the key differences between data privacy and intellectual property?", "95092336-d112-46e6-822a-88f81d722e72": "How can companies ensure data privacy while protecting their intellectual property?", "4b019b57-09a1-4397-af47-7cf29ee23a69": "What are the legal implications of data privacy breaches on intellectual property?", "1411f491-e75f-4111-a9e7-b927c26b8c3b": "How does GDPR impact intellectual property rights?", "7caa53ec-adf7-4bd5-b0d6-16e1ec9de7af": "What are some best practices for maintaining data privacy in intellectual property management?", "724b5a82-32f3-49fd-8271-505c3b2a5866": "Can intellectual property be compromised through data privacy violations?", "1bba92c8-2cf6-481c-a916-e2b90863ada9": "How do data privacy laws affect the sharing of intellectual property across borders?", "b57dc1d4-b29e-4d1f-81d7-93ab353d2990": "What role does encryption play in protecting both data privacy and intellectual property?", "241527a6-6383-42f6-80ad-d47448a4e5de": "How can businesses balance the need for data privacy with the need to share intellectual property for collaboration?", "03c92229-db55-4afa-8ac5-34de6b0a3b79": "What are the consequences of failing to protect data privacy in the context of intellectual property?", "abaa83aa-555a-4d21-acf1-7c73b0d057ff": "What is the risk-based pricing rule in lending?", "073ea355-2455-462d-8d7b-c26913cb9ef8": "How does the risk-based pricing rule affect borrowers?", "17d41936-ead2-475b-b785-6dd826930ae7": "What rights do consumers have if their credit application is denied?", "f99de0df-3a30-4c03-be26-e66bce9f385a": "What information must lenders provide to borrowers under the risk-based pricing rule?", "7b39da4e-33ab-41c7-826b-5d251d01cc39": "How does the CFPB protect consumers' rights regarding credit applications?", "4c661a4d-46b0-4361-881d-a2e3bfffc226": "What are the requirements for warehousing employers in California regarding quotas?", "f95da55e-afa4-49de-aabe-bcc697b0c48a": "How do algorithmic monitoring systems impact warehouse employees in California?", "7654615f-4130-42ba-ab22-d08da8a0d3b5": "What must California warehousing employers include in the written description of quotas?", "154ad3bc-8ed0-45e4-9156-62d7e04da73f": "Why is it important for certain decisions to be explained to consumers?", "c8c67888-5617-4df8-aa80-597113c355ee": "How does California law ensure transparency in quota systems for warehouse employees?", "e593b5e5-80a5-4368-8044-4401253e2242": "What are the unique risks associated with General Artificial Intelligence (GAI)?", "58567b4a-dad8-4564-bb4b-78ed4e066585": "How can organizations manage the risks posed by GAI?", "980286b9-57b1-41cb-be5b-1c92d2cade95": "What actions are suggested to mitigate GAI risks?", "c4dd04c4-3824-49be-bbb2-28e519e680e2": "What are the primary considerations for GAI mentioned in the appendix?", "710a2983-1bea-4763-b367-819eea4ef3bc": "Where can I find references related to GAI risks and management?", "91a4617d-ae27-46d6-967d-0120e9f36b4d": "What is covered in the introduction of the document on GAI risks?", "33034f48-184c-4594-80d2-8996aa2b9d20": "How does the document suggest handling risks that are exacerbated by GAI?", "fa471afa-5001-4d53-b1fc-23f57e65bb99": "What are some examples of risks unique to GAI?", "71a166e6-1296-4706-9677-80a0a0a51341": "What page contains the suggested actions to manage GAI risks?", "70bebbf1-bde6-4150-b4ef-a9a63f9f8a4c": "How comprehensive is the overview of GAI risks in the document?", "d01efaa2-b7e6-48c4-9117-00d808d80d03": "How can organizations ensure that notices and explanations are accessible to users with disabilities?", "c50f5c77-e898-4823-a5d9-6ead49445ebe": "What are the best practices for making notices available in multiple forms, such as on paper, physical signs, or online?", "7216a1fd-492e-4455-9b1c-87127423efe4": "Why is it important for notices and explanations to be available in multiple languages and reading levels?", "c4b916ed-6f69-42b4-a36d-b4b84b300ab8": "How should explanations be tailored to the specific purpose for which they are intended?", "e7b46646-126f-461b-8c4a-db9cd8cff6c2": "What are the key differences between an informational explanation and one provided for recourse or appeal?", "d67307ed-cd5b-4202-b68c-269b3cb7e195": "How can automated systems provide clear explanations for their decisions or actions?", "8185582a-dc07-4b2e-b832-9e3abf2e271b": "What steps can be taken to ensure that explanations are understandable and actionable by the intended audience?", "655c7548-ad98-400b-a65a-0d9b4aebe77d": "Why is it necessary to provide explanations in the context of a dispute or contestation process?", "8024511f-e7d3-4a95-83f6-e77cedb7f0f5": "How can organizations balance the need for detailed explanations with the need for simplicity and clarity?", "a6cf7580-b17b-496c-a7cf-7bdb4ad5cec8": "What are the challenges in making notices and explanations accessible to the American public, and how can they be addressed?", "1b8b06c4-65a8-4291-8125-da58b1a2fcf2": "What are the main flaws identified in the tool meant to help low-risk federal prisoners win early release?", "86c6a713-18f3-4788-94d7-798e37572f76": "How is the Justice Department working to curb racial bias in deciding who gets released from prison?", "3b3ac587-1908-4993-ad69-1d204707f0ba": "What were the key findings of the 2021 Review and Revalidation of the First Step Act Risk Assessment Tool by the National Institute of Justice?", "161b4640-9926-4d54-b332-3ae24fe928a1": "How does Google's Sentiment Analyzer exhibit bias against the LGBTQ+ community?", "5ef48cd2-e223-4cc4-935f-7b59b03953aa": "What are the implications of Google's Sentiment Analyzer thinking that being gay is bad?", "36ac78ac-c399-4cc2-bba8-b4c88049ba0b": "What is the Jigsaw Unintended Bias in Toxicity Classification project on Kaggle about?", "5e6dc05e-c94a-440c-badd-059d28a98ecf": "How does the First Step Act Risk Assessment Tool impact federal prisoners?", "b801b3ed-2645-4ab7-8eab-5f44c53f7cbf": "What steps are being taken to address the flaws in the tool for early release of low-risk federal prisoners?", "953c7a65-0054-4509-b437-2ee2f041e51e": "How significant is the issue of racial bias in the context of prison releases according to the NPR articles?", "48e496f1-8380-4006-9052-8e63ea4fdc38": "What are the potential consequences of bias in artificial intelligence tools like Google's Sentiment Analyzer?", "2bd65018-80f1-4a1e-8ff0-c03d11d2f9ac": "How can I opt-out of an automated system and what are the steps involved?", "4eafde78-6428-426b-85a5-fd83ddfc8a8e": "What are the criteria for assessing the clarity and accessibility of opt-out instructions?", "123f10a6-afad-4ad0-990c-17f252217093": "Are there any user experience research studies on the effectiveness of opt-out notices?", "f3cfe9a7-ea97-4243-a8c0-5cb436fb9198": "What are some examples of human alternatives provided when opting out of automated systems?", "3b9d6877-d127-4076-bbd6-390afed34038": "How quickly should a human alternative be provided after opting out of an automated system?", "f239aa99-3443-48c5-8df4-e7d753f4d1f7": "What makes an opt-out process unreasonably burdensome?", "b7ddcdb9-2b6d-4546-a92a-711d76916a7f": "How can organizations ensure that their opt-out instructions are easily findable?", "7baa6242-7aa9-410d-998c-d8d113ee3ed9": "What are the common challenges faced by users when opting out of automated systems?", "5bcd97f0-208e-48bf-b9c3-85ecfdd844d9": "How can the brevity of opt-out notices impact user decisions?", "ffaea519-7960-4f2b-ade8-290a2a8dfeee": "What are the best practices for providing timely human-driven alternatives in automated systems?", "7f2aa8e1-d759-4d16-835a-7d25726466be": "What is the main argument presented by Angela Chen in her article on The Verge about life insurance and online presence?", "5f1f09e0-afde-4833-8819-12492a28f526": "How might social media activity impact life insurance policies according to the 2019 article by Angela Chen?", "7e7ae934-b104-42e1-a4ec-88b2b1c55cd0": "What are the potential risks of using big data and algorithms in life insurance as discussed by Angela Chen?", "4c2801a4-a960-4bb9-bb30-91106cebe0b1": "How does Angela Chen suggest life insurance companies might use online records in the future?", "2bc6c92b-c94f-406c-97c3-94ff5221a4a7": "What are some concerns about discrimination in life insurance based on online presence mentioned in the article?", "f15e8ee8-6df9-4c38-a7a7-724b35d7266e": "How could life insurance companies benefit from analyzing social media data, according to Angela Chen?", "1a097e10-917a-47d2-bbd0-f888b755d76e": "What examples does Angela Chen provide to illustrate the impact of online presence on life insurance?", "83ce9f99-21b4-47f0-bf69-a1bdc6c6943f": "How does the use of big data in life insurance raise ethical questions, as per Angela Chen's article?", "c0058a46-7dfe-4578-a952-21be6f9e70dc": "What are the implications of Angela Chen's findings for consumers of life insurance?", "6d0329dc-668d-490c-8c24-42c4b1e1311b": "How might regulatory bodies respond to the use of online data in life insurance, based on Angela Chen's insights?", "2f8589f2-71e9-45f2-ae32-f28ec7bf2c4b": "What is the purpose of the technical companion in relation to automated systems?", "8cbfa2ab-0832-4346-ac20-86d72deba93e": "Who can benefit from using the technical companion as a reference?", "0424821d-48cd-42b3-a821-45f2d9a02ee4": "What are the three supplemental sections that accompany each principle in the technical companion?", "fc1f5d65-4b48-44ba-bbc9-d567cad9bf4e": "Why is the \"WHY THIS PRINCIPLE IS IMPORTANT\" section included in the technical companion?", "2bac89ba-56e9-4237-bbb9-2b5879e79287": "What kind of problems does the \"WHY THIS PRINCIPLE IS IMPORTANT\" section aim to address?", "dbd6b39e-eb46-4dcc-ae55-b3268513bb63": "What is the goal of the \"WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\" section?", "2a6d8358-e27e-4c93-986f-95adb301ee63": "How can the expectations for automated systems serve as a blueprint for development?", "8cdabbb1-82d4-4201-914a-3a873fbcf68b": "What practical steps are outlined in the \"WHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\" section?", "95cd7988-3d70-4b9a-9079-383a78c28a45": "How do the expectations for automated systems mirror existing technology development practices?", "679bc28a-263c-454f-b2f4-3c5ccf1ae00f": "What is the vision of the Blueprint for an AI Bill of Rights?", "e4229b12-3425-4bac-9532-22a0fd43652e": "What are the key capabilities and competencies required for effective risk measurement and management in an enterprise?", "03a02bfa-6e55-4dd0-a51f-edce76b48834": "How can diverse demographic groups contribute to better risk management functions?", "825dd8bf-7c6c-4446-8011-c25d6f557389": "Why is it important to consider educational backgrounds in risk measurement and management?", "e2909c32-1244-4a0a-a3fd-cf8797be04ac": "How do lived experiences influence risk management practices?", "af541196-0d8c-497e-a398-c3c9253cfec4": "What role do different professions play in the risk management process?", "2855bf3c-9555-42a9-9e98-d7eda34003a7": "How can domain expertise enhance the effectiveness of risk measurement and management?", "5ac3f009-0665-4ce4-bcca-3c6faa028116": "What are the potential risks of harmful bias and homogenization in AI configurations?", "ba3a4a1d-711b-402c-bc07-9ec87c5389cb": "How can enterprises ensure that their data and benchmarks are representative of diverse user populations?", "a75d2170-8371-41de-9317-9b80e2823d28": "What are the best practices for involving diverse participants in structured GAI public feedback exercises?", "eefc16a2-a6f8-499e-a2f7-12e4df65614d": "What tasks are involved in AI deployment to mitigate harmful bias and ensure diverse representation?", "f5398e05-e48c-4721-b625-1d00761faddf": "What are the best practices for ensuring information integrity in data systems?", "7683e79a-d55d-446d-b33d-e51f78b017da": "How does human oversight contribute to the evaluation of information integrity?", "ed4ea430-7625-4ff5-96fb-d43600713d35": "What are some proven cryptographic techniques used to maintain information integrity?", "77d696c3-d291-4500-99d5-f940800f82f6": "How can automated evaluation methods help in maintaining information integrity?", "a3938f64-6ba3-4182-ad9a-f0a734f44349": "What role does the review of content inputs play in ensuring information integrity?", "65e0271e-8df4-4d79-b667-eb66dbe7884a": "Can you provide examples of automated tools used for evaluating information integrity?", "64ee0887-641a-423f-b769-1ceb31077cfc": "How effective is human oversight compared to automated evaluation in maintaining information integrity?", "8ca7525d-cf25-49cf-b7f2-3c8e0c68c8fd": "What are the challenges associated with using cryptographic techniques for information integrity?", "fbce740c-e2f8-4a6a-b5cb-b8e609d9628b": "How do organizations balance between human oversight and automated evaluation for information integrity?", "a19e1eab-0f2e-4afa-8fe7-df69b9bee656": "What are the key components of a robust information integrity evaluation framework?", "95c335f7-1eae-4090-bbd3-d200657a199b": "What are the implications of the data leak involving 235 million social media profiles exposed by a major data broker?", "d4722ec4-cde6-4b3f-a742-a10c5e340440": "How did the exposure of 12 billion records online occur, and what were the consequences?", "f313296f-4197-4b95-a34f-da79b135a5ab": "What are the main concerns surrounding the use of facial recognition technology in public housing?", "8a1416c4-b1bc-4eec-84af-787a472e11f5": "How have Amazon and Walmart used surveillance technology in their efforts to prevent unionization?", "c1dddd25-fdcc-449b-8e21-263bfadff302": "What enforcement actions has the FTC taken against the photo storage app Everalbaum?", "0ddce7bb-a5c7-4fa6-bea6-da3737420a35": "What measures can social media users take to protect their profiles from being scraped without permission?", "7acd2768-ac2c-4dbe-9c92-a27dcf7be867": "How has the public responded to the use of facial recognition technology in public housing?", "0c799549-781b-482b-9074-635d45590cea": "What are the potential privacy risks associated with large-scale data leaks like the one involving 12 billion records?", "24353569-a8b5-4fbb-b2d8-0e04fd061aa6": "In what ways have companies like Amazon and Walmart been accused of spying on their employees?", "cbda8aeb-bccf-46df-ba9f-12a9fc01b0b8": "What are the legal and ethical considerations of using surveillance technology to monitor union activities?", "1e6a21b9-e745-404b-be65-8c7b9e4b0bd7": "What are the key performance expectations for the system?", "a0c122d2-8188-4702-8b56-6d423c5f74d3": "How are human-run procedures integrated into the system?", "a2d4766b-5e37-4f46-adac-a62b1c427c78": "What types of data were used to train the machine learning models?", "1e2f5c68-7e0e-479e-a952-e3d1c6caf4f7": "How were the data sources processed and interpreted for the system?", "63d84cc6-9adb-40a1-896f-b2e6034c339b": "What steps are taken to identify and manage risks in the system?", "b66070c1-0ec6-46ae-9fab-b4445a76fc1c": "How does the organization address missing, incomplete, or erroneous data?", "fbe83be2-10ab-40ca-806f-d957f742ed90": "What concerns were raised during public consultation, and how were they addressed?", "24efaac2-edc5-4a7e-a8a9-2786e4c966e4": "What are the results of the performance testing in terms of accuracy and error rates?", "e9460513-d9f4-4ee8-a89e-9acdf8fdf716": "How does the system's performance compare to previously deployed systems?", "6fff7468-40e8-4590-9f72-615e77d43bc3": "What are the ongoing monitoring procedures and how frequently is performance tested?", "deabd138-adc2-44f5-98ee-322a39c17a98": "What are the best practices for re-evaluating risks when adapting GAI models to new domains?", "ee75ad83-a0d7-4713-99c1-7720e8e912b6": "How can warning systems be established to detect the use of GAI systems in new domains?", "39d5c426-28b0-44e6-9fff-8fb668122c5c": "What are the key risks to consider when adapting GAI models to new domains?", "70c28c2d-9d3b-4641-974e-a1804938ca0c": "How can one ensure that previous assumptions about security and safety hold in new domains for GAI systems?", "adf119f1-b069-4a65-87cd-ad53e5c43a5a": "What methods can be used to detect the presence of PII in generated output text?", "dd282118-49ce-47dd-87c2-d4156e944523": "How can sensitive data be identified in generated images, videos, or audio?", "d771683d-c686-4a52-81ee-292ea31f22d4": "What are the potential risks of harmful bias and homogenization in GAI models?", "f49bb512-05ef-4e2c-b6a6-091cbb905329": "How can GAI systems be protected from generating dangerous, violent, or hateful content?", "f863d5ef-78f1-4960-a14d-66a66caa16f6": "What strategies can be employed to ensure data privacy in GAI-generated outputs?", "828a7539-87cf-49c4-a17e-7be3ed25bfea": "How can intellectual property be safeguarded when using GAI models?", "56a2f1d9-fc30-46e6-9705-569c76ebb6c7": "What are the main ethical and social risks of harm from language models as discussed by Weidinger et al (2021)?", "ae85135f-051c-4806-95a0-2cfaeb1b3924": "How do trolls use AI to create fake images of celebrities like Taylor Swift, according to Weatherbed (2024)?", "6ee2f972-193a-4bfb-bd19-604bde142dc1": "What is the focus of the paper \"Long Form Factuality in Large Language Models\" by Wei et al (2024)?", "941d29d9-1e5b-4e48-add3-1024903e747a": "Can you explain the concept of \"Information Disorder\" as outlined by Wardle et al (2017)?", "cd19f434-9802-45cd-9417-bf530750bf85": "What are the key points in the \"Sociotechnical Safety Evaluation of Generative AI Systems\" by Weidinger et al (2023)?", "17234b23-05e3-4796-9357-c3255713355b": "How does the \"Taxonomy of Risks posed by Language Models\" by Weidinger et al (2022) categorize different risks?", "2d3d1d60-0258-4016-b2e7-21895ee3da0d": "What interdisciplinary framework for research and policy making is proposed by Wardle et al (2017) in their study on information disorder?", "b95decfd-0e22-427d-9a93-294412eab257": "What are the implications of the findings in \"Ethical and social risks of harm from Language Models\" by Weidinger et al (2021)?", "858d0487-9ef0-41bd-beb3-28e4bf4dbd52": "How does the paper by Wei et al (2024) address the issue of factuality in large language models?", "58dd02ef-48f3-4312-83e5-1fb54e25c2cc": "What are the recommendations for mitigating the risks posed by language models as discussed in Weidinger et al (2022)?", "2d3012f3-7feb-48d4-b54c-5107353c03d0": "What are the best practices for documenting AI model details, including data provenance and quality?", "93acef7c-f3cf-4bda-848d-c79f08fefdcb": "How can counterfactual prompts be used to explain machine learning models?", "efa2b789-a244-4680-af19-b891e4c72987": "What are gradient-based attributions, and how do they help in interpreting AI models?", "7173f456-fedc-440d-b22f-efa9cf4e85f1": "Why is it important to document the assumptions and limitations of an AI model?", "5d764367-0f8d-4efd-976f-e89b2c8f684c": "How does model compression or surrogate models contribute to AI model explanation?", "95643527-800b-404a-a62c-a3f7390fbdf9": "What are the ethical considerations to keep in mind when documenting AI models?", "694a90b5-31cd-4f46-b221-2bbccbf2ea0d": "How can occlusion or term reduction techniques be applied to explain AI models?", "0697ed1f-3d26-439c-9818-2971e3df5158": "What role do RLHF approaches play in the training of AI models?", "171d3b23-4f45-49d6-a317-a5fe6cf4016a": "How can organizations ensure the responsible use and governance of AI systems?", "dc38fdd2-c4f6-490f-a7e5-5c3751e542f2": "What are the potential risks of confabulation in AI models, and how can they be mitigated?", "13d44da0-2fef-4c2f-b4ff-19599f9b2afb": "What are the main issues that the principle of safe and effective systems aims to address?", "fe155026-d872-40e9-8736-9e9a69d08e1f": "How can reliance on unproven technology lead to problems?", "d4c4faa8-502a-4bf2-b182-7aa689ad6ae4": "What are some examples of technologies causing substantial and unjustified harm?", "46c0dbae-a959-405b-b3f9-8c2e83761656": "How can historical data negatively impact automated decision-making systems?", "116fa31d-f588-49de-8ed4-d15b3ba118e5": "What are the risks associated with using technology that has not been proven to work within an acceptable range of error?", "fc759a44-cbe0-404c-83b8-dcb5b364028c": "How can technologies be designed to violate the safety of others?", "4ee837f4-d5a5-4d90-a40a-019addf10b09": "What are some unintended harms that can result from the use of certain technologies?", "c824ae9c-741c-4730-a945-d591f13e0967": "Why is it important to ensure that technologies work as intended and as promised?", "472d7e59-379f-4fa7-9356-fe2608a2ee59": "How can irrelevant information from past decisions affect current decision-making in automated systems?", "079cadfe-b827-4d8b-8847-ce41fba6094c": "What measures can be taken to prevent technologies from causing unjustified harm?", "01e765c9-5aef-4c7c-bca4-d9b4b94e54fd": "What is the purpose of the Technical Companion in the context of developing technical standards and practices?", "33df19f9-df9a-4d75-b8cf-7b262bf4f8b3": "How does the Blueprint for an AI Bill of Rights relate to existing laws?", "c702da49-cb6a-498c-b6c2-a377e4973503": "What sectors and contexts should the technical standards and practices be tailored for?", "2f16d6ca-6f2d-4da2-8d77-9da96198f0aa": "How does the framework aim to impact rights, opportunities, or access through automated systems?", "c5ff5f27-e556-4207-8151-5519b2fe84a6": "Does the framework provide detailed analysis of existing laws?", "9ec597d8-9559-44a5-b175-db525d6f9e63": "What is the broad vision shared by the framework for automated system development and use?", "f6dcae70-8e0a-4f47-9c30-589ffa82d968": "How does the framework inform private and public involvement with automated systems?", "d6cc4e55-78e6-4d3b-9425-9f0e1ffa648c": "Does the framework take a position on legislative and regulatory proposals?", "7ac7defe-9d7c-4996-bda6-56a4fe59a5c9": "Are there examples of existing protective measures mentioned in the framework?", "bb7c4021-0d4e-4b15-b28e-1d9d6664c577": "What is the significance of not analyzing legislative and regulatory proposals in the framework?", "e1aec18d-e71a-4d28-be59-e5be3881d70e": "What are the key components of effective monitoring and governance structures for automated systems?", "b2ab71bb-0a72-4068-b1c1-505a308496ea": "How can unmet needs for change in automated systems be identified and addressed?", "eb859b33-b604-4d38-9b9c-36c1c453a82b": "What are the expectations for reporting by entities developing or using automated systems?", "057efbeb-09ad-4580-a430-acb50a9681a6": "Who should the resulting reports from automated systems be provided to?", "94f7d7a1-6947-4bd2-b4fa-6d8db3896c41": "Why is it important for reports on automated systems to be made public?", "c4bcd7fc-8a32-4295-a4f2-b6da221f7546": "What considerations might prevent the public release of reports on automated systems?", "4d3140c8-cffd-4c71-ab31-dfbc34b26f50": "How should information be handled when public reports on automated systems are not possible?", "7b4e0c55-fcda-4bcb-ab4a-e2333b46c568": "What role do oversight bodies and ethics officers play in safeguarding individuals' rights in the context of automated systems?", "fbd293d5-2f7d-46ba-8a9d-ba56d3e2690b": "Why is transparency important in the reporting of automated systems?", "82a4bddf-bde1-4da4-8597-af2b5f75319f": "How can the American people benefit from transparency in automated systems reporting?", "c8815a76-b3d3-4150-9fc2-ffeca9946282": "How do GAI systems contribute to the spread of misinformation?", "e6706517-19c4-47c4-9d33-e88aaa79be85": "What is the difference between misinformation and disinformation in the context of GAI systems?", "c1f9af22-5965-4be1-86a3-e4a301b6af2f": "How can subtle changes in text or images by GAI systems manipulate perception?", "551ffc98-e117-4c8c-aabd-84102ffaa8f7": "What are deepfakes and how are they related to GAI systems?", "5b9a4026-4170-4739-aa59-a82371bcde95": "How can GAI systems be used to target specific demographics with disinformation?", "3a57b342-f32d-4571-bf11-0fd968d2f12b": "What are the potential future threats posed by GAI models trained on new data modalities?", "bdc49d2f-f75f-409c-a019-6bd6e720673b": "How do multimodal models enhance the capabilities of GAI systems in generating disinformation?", "e354dc4e-3651-464a-85e7-a02d6e8c6fe5": "What measures can be taken to mitigate the risks of GAI systems spreading false information?", "e0faed04-d3ac-4ffd-847b-0eb7570099b0": "How do malicious actors use GAI systems to create sophisticated disinformation?", "25e2dfeb-bd2e-4183-bd34-d7a9fb889f4b": "What role do confabulations play in the production of misinformation by GAI systems?", "9d781fd4-80b1-415f-bdc0-cb99a0ad57aa": "What are the key policies and procedures to address AI risks associated with third-party entities?", "5ab4424c-a307-47e8-9524-720e346f1103": "How can organizations categorize different types of GAI content with associated third-party rights?", "3749d629-316f-4418-9f81-884d7050c768": "What are the best practices for managing GAI risks in collaboration with third parties?", "76664a25-080a-4041-947e-a87349ea3aa4": "How can joint educational activities with third parties help in managing AI risks?", "a3d40842-deb4-459e-b156-f9ef4c243572": "What approaches can be developed to measure the success of content provenance management efforts with third parties?", "9ec62f10-d33b-4bcd-a708-9a6b8f4f52b7": "How can organizations ensure data privacy and intellectual property rights when dealing with third-party AI content?", "0e343257-054b-4912-83af-419e09f10542": "What are the common risks of infringement of third-party intellectual property in AI projects?", "8d5d823e-8af6-4d8b-b8f1-c68b7d439a22": "How can value chain and component integration be managed to mitigate AI risks?", "2521694f-ca6a-4805-97d6-166beb459f2c": "What metrics can be used to validate the effectiveness of content provenance management with third parties?", "5985031f-49b1-44cb-9355-1cbdf90a9ec8": "How can incidents related to AI content provenance be detected and responded to efficiently?", "c5db5bcc-a2bf-498d-92ab-5bb0db131203": "What should be included in a report about data collected from individuals?", "46b110ea-13a4-41c5-be85-368d573b2111": "How quickly should an entity respond to a public request for data usage information?", "634c0a75-d94b-4dc9-9e9c-ecb9cfb11231": "What format should the data report be in to ensure it is understandable by most users?", "08fdf655-f40d-4a7d-8eec-5c6ca9a4754b": "What information about data collection should be included in the report?", "c63e76ef-0b53-439a-871d-3b04064a5530": "Who should have access to the data and metadata collected from individuals?", "9bdf0a7c-cc20-4518-b935-616a80516ebd": "What steps should be taken if a user login is not available for identity verification?", "3fd33ba3-95e9-4be2-87dd-1dbcf4afee88": "What is the purpose of summary reporting in the context of data usage?", "04331b6c-1532-4bcc-9a9a-f12352764d7f": "How should summary reporting be made available to the public?", "18215d4a-212f-4664-a6f7-f25e3fa46f80": "What are the legal limitations on the data and metadata that can be included in the report?", "c9685d0b-7fe3-47f6-a4b5-99d6cf0b3123": "Why is it important to include time limitations in the data report?", "313a9213-5dd4-4fd0-b6cc-5649c4da50f7": "What is algorithmic discrimination in the context of AI systems?", "36918793-5d0e-48bd-b045-f8ff83ea055d": "How does algorithmic discrimination impact different protected classes?", "664af71b-506c-4536-aa63-5a4c95942aa3": "What legal protections exist against algorithmic discrimination?", "0f5c4986-db15-4307-9880-e51f3bd60751": "Can algorithmic discrimination occur based on gender identity or sexual orientation?", "5dd96edd-ff11-42e1-8ea9-f87e9ae4b2de": "How do automated systems contribute to algorithmic discrimination?", "a14927ac-5614-423d-a8f9-5c0853bab534": "What are some examples of algorithmic discrimination in automated systems?", "67b571d8-1ebc-4a74-bc63-de217aebedd1": "How can organizations prevent algorithmic discrimination in their AI systems?", "176842e4-195d-47ea-bb47-4747e05ce154": "What role does computation play in automated systems?", "d7798ca6-fc5a-4819-813f-1d68a7715e00": "How can algorithmic discrimination affect policy implementation?", "12f7ff31-5e11-4d2c-bedc-26c5cc7322d9": "What measures can be taken to ensure automated systems do not violate legal protections?", "c21ceaec-63ff-45ce-a1c8-4c954a4d0cba": "What are the risks associated with GAI models synthesizing NCII and CSAM?", "c6cf6391-dc4d-4c46-b3b5-fb5a40f28726": "How have websites and mobile apps that generate synthetic NCII evolved over time?", "01e4a6f5-a4f4-4957-9cdc-2aaba3491a36": "What are the characteristics of trustworthy AI in managing harmful bias and ensuring safety?", "241aa1b9-216b-43a4-9f90-6610fe5c28b2": "How do third-party components in GAI value chains affect transparency and accountability?", "2a49cfa9-6bbd-43ad-bea4-28427449269d": "Why is the risk of improperly vetted components higher for GAI compared to traditional AI systems?", "72445f3e-2877-4f4b-9192-ba572bcdb530": "What challenges are associated with vetting large-scale training data for GAI models?", "422aafdb-ec57-4340-bd89-2fc20e3b0da0": "How do pre-trained models and software libraries contribute to the risks in GAI value chains?", "30ac0cae-4e18-4ad7-bb21-0e34d9b27082": "What measures can be taken to enhance privacy in GAI systems?", "d14ef51f-6a7d-4b9b-8c2c-7ba562b3d774": "How does the difficulty of training foundation models impact the development of GAI?", "d716e7c9-5860-48e4-914a-8c72625fdbbf": "What are the implications of automated and scaled online businesses generating synthetic NCII?", "407a7ba6-d65a-4a11-afa4-c6ea6e4c2736": "What are TEVV metrics and processes in the context of AI deployment?", "436567b2-4a53-418b-8002-08cb30c28f31": "How can measurement error models improve the validity of pre-deployment metrics?", "d6bd80dd-1e24-488d-a6e5-eb5aa8895943": "What is the importance of documenting biases or statistical variance in applied metrics?", "84eeb2b5-483b-4794-8d69-a1fb7ca5b57a": "How can domain expertise be leveraged when modeling complex societal constructs like hateful content?", "1149e1f0-19fe-4af1-b052-9c5b0bf7b453": "What are the risks associated with confabulation, information integrity, harmful bias, and homogenization in AI?", "3e72a6bd-6a5a-4b12-8156-069a0f9382be": "What tasks are involved in AI deployment, operation, and monitoring according to the provided context?", "1e55727a-0e04-4def-9388-239c06d052dc": "Why is it important to evaluate and document the effectiveness of TEVV metrics and processes?", "a76bd5b4-7983-42f7-98f1-6cf2180a7448": "What challenges exist in assessing AI risks using currently available measurement techniques?", "5bcfbe76-4c4e-4bf9-8a09-cd1dc9eb77c3": "How can risk tracking approaches be improved for settings where AI risks are difficult to assess?", "f8f78806-13b8-4acb-9826-c4408a3c523c": "What are some suggested actions for improving the measurement and evaluation of AI metrics?", "0e48d737-5d25-4c14-94d4-f0be919145c3": "What are the best practices for conducting post-mortem analyses of AI incidents?", "ee67a49d-fa40-4f72-bc61-a39c2144d958": "How can visualizations help non-technical stakeholders understand GAI system functionality?", "963ca3e1-2fd0-4a20-85c7-d1bfe31c0bd9": "What methods can be used to represent GAI model behavior effectively?", "133585cd-97fd-4343-9627-bfa3bdcaee9b": "Who are considered relevant AI Actors in the context of AI deployment and development?", "37f44acd-e83a-4327-9d26-ac7ce250879c": "How should incidents and errors be communicated to affected communities in AI systems?", "44946c07-c978-408b-9b84-a5fd16194ca2": "What processes should be followed for tracking and responding to AI system incidents?", "20bc157a-c8bb-4ff2-9003-6f475b0d145a": "How can after-action assessments verify the effectiveness of incident response and recovery processes in AI systems?", "bc817926-284f-402f-a8b4-8016544b7436": "What are the key components of a Human-AI configuration for managing dangerous or hateful content?", "757d49ae-2c4c-4dda-b834-35d68feba859": "How can AI deployment and development tasks be effectively managed to prevent future incidents?", "ec4a9dd4-cf97-4b49-8736-19a432697eef": "What are the suggested actions for managing GAI risks in AI systems?", "5dc1404b-b9d5-4165-bfba-5ae18b4ef0a8": "What is the Blueprint for an AI Bill of Rights?", "1b4835f2-59c4-4255-a964-61969ea8863e": "How does the Blueprint for an AI Bill of Rights address law enforcement activities?", "8f94848b-91d1-4bd4-9095-8fab3fe85de3": "Why might notice not be appropriate in certain law enforcement contexts according to the Blueprint for an AI Bill of Rights?", "be1037b8-1682-4b8b-ab5b-6bad9c9018d7": "What are some examples of automated systems that might require sector-specific guidance?", "adb12b37-a721-476a-817d-6ae9d45f34bd": "How does Executive Order 13960 relate to the use of automated systems in federal agencies?", "d9941951-34e0-4599-ba08-82c1823f7557": "What are the potential challenges of using AI systems in school building security?", "800426c1-0a93-4185-b638-fc754e6a3feb": "What safeguards are in place for federal departments and agencies using automated systems?", "6cf609a1-e1ab-4388-9eee-76603f895da4": "How does the Blueprint for an AI Bill of Rights balance the protection of sensitive law enforcement information with the principle of notice?", "03ffb16c-be24-46fd-a594-205d9e68e49f": "Why is future sector-specific guidance important for the use of automated systems?", "6ff903dc-0e95-45b1-b7ba-5147dcdd7088": "What oversight mechanisms exist for federal agencies using automated systems?", "90935253-ff29-43e9-9473-f71036e4e5f8": "What are the main security concerns associated with generative AI as discussed by Burgess in WIRED?", "a87294ca-3f18-46f9-921e-10e6fa956d49": "How does prompt injection hacking pose a threat to generative AI systems?", "16605b19-6407-427d-8ad6-0bd123fef5ef": "What are the key takeaways from the Canadian Centre for Cyber Security's guidance on generative AI?", "5cd233c2-fb54-486b-bec3-6c406a216562": "How can training data be extracted from large language models according to Carlini et al (2021)?", "799db38e-e7df-45c2-af2e-701b12bdbee5": "What methods are suggested for securing AI pipelines as per the Mandiant blog?", "7b243fdf-3ecd-4747-87fd-c6c9860cb3ae": "What is the significance of next word prediction in large language models as explained by Burtell et al?", "d623ee79-0c8b-4ee4-a9ce-edbf633b20de": "How does the memorization of data in neural language models impact their security, based on Carlini et al (2023)?", "fe664b26-b505-4d49-b30f-fe252750d38f": "What are the potential risks of generative AI highlighted by the Canadian Centre for Cyber Security?", "d5d0438c-36f6-4328-90cb-4baa96b6a1bc": "How does the Georgetown Center for Security and Emerging Technology explain the power of next word prediction in AI?", "466b29fa-ac89-4e2a-8791-cfe170592641": "What strategies are recommended to mitigate the security flaws in generative AI systems?", "39725f9a-68c2-4fe9-aa8d-9a4f08d023ae": "How are AI systems being used to limit access to equal opportunities in education?", "ca7c2204-b6f5-4781-bf3b-8d9f02540ffd": "What are the concerns related to the use of remote proctoring systems in education?", "f43c197c-7302-4f65-a850-8d1592d44eee": "How does facial recognition tracking impact students in educational settings?", "6d8e2c0e-e4cf-4ebb-a417-815a64668c79": "What are the potential issues with automated tenant background screening in housing?", "14c91abd-8d01-4cf0-8523-0812a5c1c840": "How does facial recognition-based control affect residents in housing complexes?", "68d008be-5997-4d96-96e0-731c60a6c09e": "What are the risks associated with discrimination in automated hiring screening?", "cc4bb020-b92c-4f19-adc2-06b98ff1da33": "How does workplace surveillance impact employee privacy and opportunities?", "e63cb0d8-46ce-4928-aef8-ccd1f1693044": "What limitations of existing privacy laws were discussed by the panelists?", "6f3c8f9d-9bbc-44cd-92a3-95215c3cfee5": "Why is it important for students to have the ability to reinvent themselves in terms of privacy?", "2f47fea5-c195-4bb6-889a-3d498268e761": "What are the concerns related to the use of robot teachers in education?", "b23c2ac0-3c8d-418f-9d3a-4d355510c475": "What are the key components of an incident response plan for third-party GAI technologies?", "e341d43e-c5cf-470a-9937-1a0522709924": "How often should third-party GAI incident response plans be rehearsed?", "d32de1da-1496-480e-bfcb-a256b5aa2656": "What are the best practices for aligning incident response plans with relevant breach reporting and data protection laws?", "0fc878ed-8fa8-46f3-a457-943aac86deb4": "How can organizations ensure effective communication of third-party GAI incident response plans to all relevant AI actors?", "62ea75e9-a1f9-482e-b748-93e00e24299b": "What steps should be taken to define ownership of GAI incident response functions?", "d326718b-3c54-47fe-b90b-e026762b440d": "How can retrospective learning be used to improve incident response plans for third-party GAI technologies?", "8113c5c3-1121-4595-b984-3595f621efb3": "What policies and procedures are recommended for continuous monitoring of third-party GAI systems in deployment?", "1f685537-4d22-4888-90a8-cf32f7774031": "How can organizations address data redundancy in GAI systems, including model weights and other system artifacts?", "32b71e7d-ef8b-49de-a3f8-f906cd313fe2": "What are the potential impacts of harmful bias and homogenization in GAI systems, and how can they be mitigated?", "0ee66a25-f2c4-434c-b514-7affa3493429": "How should organizations integrate value chain and component integration considerations into their GAI incident response plans?", "f150a1c8-dad3-432e-91e2-c40098a5ebd7": "How can LLMs deceive humans into believing they are speaking with another human?", "1d4e22f5-8de7-4f07-9526-61c7936f9459": "What are the potential risks of adversarial prompting in LLMs?", "7c6bd923-b691-426e-beb9-67b46c185a03": "How do researchers study the extent to which humans can be deceived by LLMs?", "04d3f9c8-515a-4f01-8cae-62fb636fd1cc": "What are the characteristics of trustworthy AI?", "b4f47764-9f91-49f1-88a2-221cc0e06715": "How can GAI systems produce inciting or radicalizing content?", "3e394353-4b52-4a52-af48-562eea9f2152": "What mechanisms allow LLMs to generate dangerous or violent recommendations?", "36aee960-dcad-4e5c-97d0-05f4fc62b815": "How can harmful bias in AI be managed to ensure fairness?", "a4b69757-7dcd-4eff-90eb-a30ced99f719": "What makes AI systems safe, valid, and reliable?", "72048f3a-c23d-42a8-8194-a14c109a5af2": "Why is it important for AI to be explainable and interpretable?", "2047cc7b-4522-4ef9-93de-8bc09531f843": "What are the potential downstream impacts of confabulations in GAI systems?", "ab3b72f8-672c-486e-bc6b-9461baa761da": "What is a pre-deployment assessment in the context of surveillance?", "8e05f5b4-738f-4015-9698-0d841d8727cf": "How is a disparity assessment conducted in real-world surveillance deployment?", "e41871e4-fd71-4858-9f31-90e4bce717ba": "What are the specific goals of data collection in surveillance systems?", "0aee33e7-9f19-42ba-9f6f-020edc4551d5": "How can one ensure that only the minimum required data is collected during surveillance?", "780e0590-0606-443d-a7e8-2891d967ee16": "What are scope limit assessments in surveillance data collection?", "85dfacbf-8094-47ad-80dc-7bbcc46712ca": "How are data retention timelines determined and justified in surveillance systems?", "6c98830e-c446-42a8-ac57-f8f46ee0a255": "What is the impact of surveillance on individual rights and opportunities?", "7abcd464-ee77-4ce1-8d41-57315a78fed9": "Why is it important to have an independent party assess the impact of surveillance?", "cc012aac-5ce3-434c-8e51-35c1e8338ce4": "How should surveillance reporting be provided to ensure clarity and machine-readability?", "51e0fc29-06be-4a85-a0ae-1fa144bff1cc": "What are the best practices for documenting the impact of surveillance on access and rights?", "b74eff6f-0348-4f4d-8455-ec94c409abc3": "What are the best practices for obtaining user consent for data collection?", "0db718f0-8251-47d3-83c0-f9a203c06084": "How can companies make consent requests more understandable for users?", "84f786f2-b45d-4028-8864-a9c521374ba4": "What are the current issues with notice-and-choice practices for data use?", "39695c7a-9723-424f-a040-717c4cadc993": "What enhanced protections should be in place for data related to health and finance?", "48492acb-3e38-45a7-bcde-061e9fcbb695": "How should data pertaining to youth be handled to ensure their protection?", "64e6cef4-2fdf-4121-b410-fc7168f68289": "What are the necessary functions for which sensitive data can be used?", "71ec2ac9-cc52-4831-a314-2f92000494e4": "What ethical reviews should be conducted before using sensitive data?", "8dd38cfb-46a6-4d54-9560-fa7036a393c5": "How can communities protect themselves from unchecked surveillance?", "40a38d86-7774-4a05-b3ec-46ee736efe3f": "What kind of oversight should surveillance technologies be subject to?", "5303236e-9b24-4300-a6eb-71fd2660ba8e": "What are the potential harms of continuous surveillance and monitoring?", "68145309-ec97-431b-a2e4-ff7c84ae2b92": "How can I opt out of an automated system in favor of a human alternative?", "bc2c2f00-164e-4fb2-b04f-0a8e11a127b2": "What are the criteria for determining the appropriateness of opting out of an automated system?", "99c78782-3873-4806-b67a-5057aee1fe94": "Are there any laws that require a human alternative to automated systems?", "f1218cc9-8cf6-492a-87b4-e7e2b34939a8": "What should I do if an automated system produces an error?", "550182c7-921f-4b50-a93a-089c2d1e4f06": "How can I appeal or contest the impacts of an automated system on me?", "38276fcb-783d-4a66-b4ab-96d3654c9707": "What is the process for accessing human consideration and remedy if an automated system fails?", "64350a23-d7dc-4a12-b082-a57617fd4d14": "What kind of training should operators of fallback and escalation processes receive?", "db717bad-ccdd-4097-a630-6459902c89c3": "How can I ensure that the fallback and escalation process is equitable and effective?", "bb183df4-5c49-4847-b777-9b605daaec7d": "What are the reasonable expectations for opting out of automated systems in different contexts?", "126df14a-0e79-4f7b-9ba1-5cb3197cef81": "How can I access timely human consideration if I encounter problems with an automated system?", "bb600a12-cf3e-430f-aeac-ffb55080bc75": "What are biological design tools?", "ab0afd54-b8f3-4255-bcea-00d7f469fa4d": "How can I access the paper on biological design tools on arXiv?", "4b17bc0d-84c9-44ce-b287-5873628bcb88": "What is the significance of the paper \"230613952\" on arXiv?", "d8711a1f-0375-405e-88fb-47084c2dd456": "How do biological design tools impact scientific research?", "4e3d1faf-aaf7-4382-bfdb-f07986da3774": "Can you explain the main findings of the paper \"230613952\" on arXiv?", "01016449-dd51-43e2-8064-6163d76960ca": "What are some examples of biological design tools mentioned in the arXiv paper?", "96010c49-fead-4764-a1cf-52f08f825c94": "How do I cite the paper \"230613952\" from arXiv in my research?", "1a7dc66f-7d3c-449a-91a8-730ae4fbf5e0": "What are the applications of biological design tools in biotechnology?", "618088f5-7d87-423c-94f7-092dad08e9de": "How do biological design tools contribute to advancements in synthetic biology?", "7a5bd3f1-0117-49f5-847a-c98c3e1ed923": "Are there any limitations discussed in the arXiv paper \"230613952\" regarding biological design tools?", "396fdf65-f8c8-44af-b354-a6a3a8080cf4": "What are some effective mitigation procedures to address algorithmic discrimination?", "53b2c06d-8db2-4193-bad2-2cabf8dd64af": "How can organizations ensure ongoing monitoring of automated systems to prevent algorithmic discrimination?", "33223652-33b9-477b-a3f2-76a8fd138783": "What are the best practices for performing disparity assessments on automated systems?", "669ee0bf-cd38-407d-a36e-b02d7efaeba6": "How often should automated systems be monitored for algorithmic discrimination?", "ba748242-63e0-4e5f-9536-5e37095f34d7": "What approaches can be used to assess whether an automated system has led to algorithmic discrimination?", "369d0faf-1db8-4137-8253-078bff29bf59": "How can changes to the context of use or associated data impact algorithmic discrimination in automated systems?", "177fe09a-b7d7-45cf-a228-70fe1d4708d3": "What role does demographic information play in monitoring and mitigating algorithmic discrimination?", "74ec3300-8c0b-4be9-9382-88a845824eef": "How can organizations identify patterns of unusual results in automated systems?", "5cf0dfa4-8cc2-49d6-a074-c9d97d87b51e": "What are the challenges in performing regular assessments of algorithmic discrimination?", "77d5bb4a-5a5b-46c2-987e-911e1f0d76e7": "How can testing with a sample of users help in identifying algorithmic discrimination in automated systems?", "269c41f4-2524-4d35-9147-09e322f2504d": "What is model collapse in the context of AI and synthetic data?", "53226a81-7a32-4f91-a589-46450a0b7283": "How can overly homogenized outputs lead to unreliable decision-making?", "0d9724ae-bfe2-47f4-a021-1296db5dd676": "What are the risks associated with foundation models acting as bottlenecks?", "db9015f0-bda2-4f91-9808-19738ae88078": "How does model collapse threaten the robustness of AI models?", "b87fc7dd-8992-4529-85cb-066cbde37dcc": "What does it mean for AI to be \"Fair with Harmful Bias Managed\"?", "eecaaa8a-3251-4874-9a51-cd62286c5095": "How can synthetic data contribute to model collapse?", "bfb13ae7-473a-4caa-968a-c783c333b1e6": "What are the potential consequences of overly homogenized content in AI systems?", "030514da-a5fd-4a1f-ac10-cf8922a6150f": "How can foundation models amplify harmful biases in downstream systems?", "43d3ae5b-1657-46fb-920d-87bbf081d086": "What are the characteristics of trustworthy AI?", "40e129bc-068b-44de-942b-641c29a2983a": "What risks are involved in human-AI configuration and interactions?", "3eda549a-f514-4a5a-8cef-68367b00516e": "What are some real-life examples of laws and policies that protect rights and opportunities in AI systems?", "4359e62b-013c-45a6-b540-f16600b3d7c0": "How does the Department of Energy's AI Advancement Council contribute to ethical AI use?", "f8696290-800f-4952-81bc-3020e6f11c09": "What are the key components of the Department of Defense's Artificial Intelligence Ethical Principles?", "38c06f0b-84d5-48fa-ab9f-4a53a84e8916": "How does the US Intelligence Community ensure ethical use of AI systems?", "b81f442e-1b5f-43da-8261-9b2c1363ea3d": "What practical technical approaches are being used to protect rights in AI systems?", "dba6ad78-0e3e-4068-a3b5-38c5ab583a94": "How do sociotechnical approaches help in the ethical development of AI systems?", "4b849fa7-2c5a-41a5-b197-62210fadaf66": "What issues does the DOE AI Advancement Council address regarding AI systems?", "6f3964c4-6694-4977-976d-770e0175af87": "How do the AI ethical principles of the Department of Defense differ from those of other agencies?", "34e35d11-aa65-49a1-92b2-ab8eb6f362e6": "What role do US government agencies play in the ethical implementation of AI systems?", "77bca3a1-9687-4e84-8144-d0e76454c55c": "How can policies ensure the responsible use of AI in national security and defense activities?", "adb044f3-b06c-4147-b270-388aa7ce28a1": "How can AI potentially enable malicious actors to access CBRN weapons?", "2cc96dd4-379c-4ea0-8aab-bf6fcb0ee94c": "What are the risks associated with LLMs in the context of CBRN information?", "6b0d4619-0b82-4e50-9d25-50215d1b4223": "How might LLMs facilitate the misuse of biological and chemical threat knowledge?", "7f729371-7c4c-40c9-9651-c84ceaf4ab59": "What did recent research find about LLM outputs related to biological threat creation?", "4bcda5c2-bc2c-4f9b-a31e-c90c1bffe24b": "Are LLMs more effective than traditional search engines in providing information on CBRN threats?", "eab83c61-d25e-47e3-bf55-cec2cf22dd7d": "What are the implications of LLMs for individuals without formal scientific training?", "80e574c2-a56c-4ade-b455-0eca52117b22": "How can transparency and accountability be maintained in the AI lifecycle concerning CBRN threats?", "7af58ef3-a1c9-420e-9eec-b2eecfc7ba18": "What measures can be taken to prevent the misuse of AI in the development of CBRN weapons?", "cb512b7c-e79e-479d-9557-8757c4fda4ed": "How does the accessibility of biological and chemical threat knowledge impact security?", "16c95e95-db9f-48d8-9c3f-fbf32e49bf92": "What are the potential dangers of AI in the context of CBRN information and capabilities?", "34bbf002-cd8d-4886-a398-74853f3697c0": "What are the regulatory safety requirements for medical devices in relation to AI?", "332c9a40-a1fb-448d-9067-d81497b47a04": "How do sector-specific privacy protections impact the implementation of the AI Bill of Rights?", "05415269-22c9-4836-aceb-97f1c11d2051": "What new laws might be needed to ensure the protections proposed in the AI Bill of Rights?", "ac4195ce-e0d5-4720-a4ce-5bcc8078b64c": "Are there any exceptions to the principles in the AI Bill of Rights for law enforcement?", "05f22b03-2ba9-4910-8467-4cb79c9a8206": "How can existing laws be balanced with the new principles in the AI Bill of Rights?", "d202eb6e-03df-49f4-88d4-b160a4626fcf": "What are the practical challenges in applying the AI Bill of Rights to specific use cases?", "02abe32e-eeca-4b60-9b32-22ae7ce787a5": "How does the AI Bill of Rights propose to protect civil rights and civil liberties?", "8afa840c-a2a0-42cb-95b1-8f1b404515e3": "What role does the private sector play in implementing the AI Bill of Rights?", "a0f7a50f-5987-4c5c-98a6-36316797bea8": "How might new policies be adopted to support the AI Bill of Rights?", "817d708c-fe29-43ce-9478-5bc616838321": "What are the competing public interests that might affect the application of the AI Bill of Rights?", "9cb6e467-ad95-44c0-841b-777832cfa4aa": "What are the safety implications of automated traffic control systems in smart cities?", "eb391602-3d68-4cb9-a04a-29b9595203c3": "How do electrical grid controls contribute to community safety?", "c3b9539e-277f-4737-9e5e-e386eeebd7a7": "What role do industrial emissions control algorithms play in environmental protection?", "73302da7-3bef-402f-9bf6-f12df15c536e": "How do smart city technologies impact the safety and efficiency of urban areas?", "58e6a606-3cd6-48fc-9dad-9c6fdc181f83": "What are the ethical considerations of using algorithms to adjudicate benefits and penalties?", "2d6ccf02-270d-43e3-9410-be3e588f9b6d": "How effective are fraud detection algorithms in preventing fraudulent activities?", "22b01cb1-5a1a-4789-8b56-4d7baa49fcc7": "What are the privacy concerns associated with biometric systems used for access control?", "dbb74eb6-3fde-40d5-8df3-bcc934e6a158": "How do access control algorithms determine eligibility for services or benefits?", "7b7be751-057b-4a7f-88ad-cfe0a6255aca": "What are the potential risks of using fully autonomous systems to make benefits-related decisions?", "0a96f62f-6fcc-42d1-a86d-2d07836fa4ad": "How do environmental impact control algorithms help in reducing industrial pollution?", "c12924b2-38c5-459b-8743-4004840152f9": "What is data memorization in the context of machine learning models?", "1ee45afd-2181-4a4b-8885-4e243fe3c9ba": "How do adversarial attacks on LLMs reveal sensitive information?", "064348ce-09cb-4cf5-9b13-efb66fb26273": "Why is it important for model developers to disclose specific data sources used for training?", "07faf7be-a125-488a-8fc9-95d2a01f7983": "What are the privacy risks associated with data memorization in AI models?", "0f2299ca-5f65-45fa-bec4-3d8a5dd6667d": "How can AI models infer sensitive information that was not part of their training data?", "082dc681-2582-47ff-9159-652af70f6d28": "What are the potential negative impacts of AI models inferring PII or sensitive data?", "8476f9de-1c46-484a-9d42-63c47d4e3ddf": "How can users be made aware of whether PII was used in training AI models?", "1930f79d-9ceb-4935-a643-c1d80c961fee": "What measures can be taken to prevent AI models from leaking sensitive information?", "d28999ed-71ad-4d55-afa6-e2d551dffcee": "How does the collection of PII for training data pose privacy risks?", "968af77b-e22e-4bd8-b951-9a663a94877d": "What are the challenges in ensuring that AI models do not generate or infer sensitive information?", "9c573068-8e35-41b2-879e-161babef96ed": "What are the capabilities and limitations of monitoring systems in deployment through TEVV processes?", "ead604ce-72c9-4dc9-a2ec-008dc710ab0e": "How do humans engage with GAI content in decision-making tasks?", "e690c10f-4933-40e7-835f-6034cc551627": "What are the benefits of using overt disclosures in GAI systems?", "ffc79ee0-1f3c-4956-b746-ac94aae246e9": "How can organizations document and delineate GAI system objectives and limitations?", "bf06a17c-7eb3-4ac4-9260-bea0f24eedfd": "Why is provenance data important for GAI systems used in content creation?", "97d2ebc4-8668-4f50-9e9d-4dec70f48766": "What are robust watermarking techniques for GAI systems?", "87372dbb-28be-4288-810b-cdec1b6c9afd": "How can metadata recording techniques help trace content origins and modifications in GAI systems?", "2423bd23-5f6b-4730-a392-af960a751c0b": "What tools and repositories are available for metadata management in GAI systems?", "fd2d6cb9-81af-4234-a95b-65534c17a395": "How can narrowing GAI task definitions to include provenance data benefit organizations?", "845cfb86-2eb6-43f6-bbc3-bfbd925cd21a": "What is the role of structured public feedback in enhancing content provenance?", "4bdb3f89-538a-40e3-9935-8e433ab9e6e2": "What is the purpose of the Generative AI Public Working Group (GAI PWG)?", "6905b587-39e3-4b09-ac42-6cded7de2b9c": "How does the GAI PWG obtain multistakeholder input on GAI risk management?", "13355254-5606-49a9-a3eb-4769a610066b": "What are the four primary considerations relevant to GAI discussed by the GAI PWG?", "e2b55aad-14c7-43c8-bdad-c0dcd2877575": "How does NIST plan to update the AI RMF subcategories and suggested actions in the future?", "ad739be8-7f90-4ee1-817e-e3f5a425b4f2": "What is the role of public feedback in the GAI PWG process?", "ea9cb785-05ef-4f79-8053-0816fff5eb8d": "How does the GAI PWG ensure transparency and collaboration in its process?", "0e7c873d-f587-4cfe-bb7e-5b4877644666": "What is the significance of governance in the context of GAI risk management?", "329c3806-ac7f-48ea-8945-74a6f91b5d0f": "Why is content provenance important in the context of generative AI?", "5a1e1ec3-fda4-453e-ab8e-ce1d9faad797": "What is the importance of pre-deployment testing for generative AI systems?", "5e8537e8-4e6c-41f2-9b53-2a9692305988": "How does incident disclosure contribute to the management of GAI risks?", "5c7e3648-de65-4b1e-b755-1e099598c19f": "What are the key strategies proposed by the Information Technology Industry Council for authenticating AI-generated content?", "d119441d-bce2-47b9-9924-dbf87e4cc817": "How does the concept of algorithmic pluralism aim to promote equal opportunity according to Jain et al (2023)?", "22519aa4-4650-4092-b1b5-e3ac5d8f725f": "What are the main findings of Ji et al (2023) regarding hallucination in natural language generation?", "48b1844d-583d-4e62-908d-889b415b5c31": "How do people typically react to AI failures, based on the study by Jones-Jang et al (2022)?", "cfb67ae3-43b1-4450-9bb3-d7f29aa0e86f": "What factors contribute to algorithm aversion as discussed by Jussupow et al (2020)?", "2c4bf45c-f7e6-46e1-9e67-30c91c0444c8": "Why do Kalai et al (2024) argue that calibrated language models must hallucinate?", "ab2fee39-2a46-49d1-8cb6-55b4167b1b63": "What are the implications of automation bias and algorithmic aversion for the adoption of AI technologies?", "dc7ea38d-faed-4f3d-a7b0-8c1ae79788dc": "How does perceived controllability influence people's reactions to AI failures?", "7ec7f6e0-8490-4dab-ad43-7031aaff916f": "What are the potential solutions to mitigate hallucination in natural language generation as surveyed by Ji et al (2023)?", "3835091e-5a77-48ed-b786-16833c6dad03": "How does the literature review by Jussupow et al (2020) explain the reasons behind people's aversion to algorithms?", "14ce14f2-5924-440f-9f82-1d9518db6f9e": "What are the best practices for documenting GAI risks associated with the system value chain?", "c102bbce-23e6-4b6e-a359-2de7b683d7f0": "How can organizations identify over-reliance on third-party data in their AI systems?", "1bd749b4-cb8c-4f68-951e-4dc05f918076": "What are the key components of a contingency process for handling failures in high-risk third-party AI systems?", "4ba5ee61-6497-4e28-86f2-603d556d088f": "How should incidents involving third-party GAI data and systems be documented?", "17f3b4d5-ed49-486d-af7c-5c8150c925ef": "What are the common risks associated with using open-data and open-source software in AI systems?", "9bf8ec79-8c71-49e3-9cf1-bfcf6e900eac": "How can companies integrate value chain and component integration to mitigate GAI risks?", "d4c8b3c6-aaa0-44ad-89d8-826c0e200e82": "What steps should be taken to ensure intellectual property is protected when using third-party AI systems?", "5724c74c-76af-4944-89d9-d6e91234c261": "How can procurement processes be optimized to reduce risks associated with third-party AI entities?", "a22ca24b-e8c8-49a3-9ddf-5f838e432323": "What are the roles of contractors and consultants in managing third-party AI risks?", "b00e06f3-0822-47bc-8d94-e046c58311d6": "How can organizations monitor and operate AI systems to ensure they are not overly reliant on third-party data?", "2f3330e7-4aca-4708-bbc6-29c775344e94": "What is an algorithmic impact assessment?", "24490295-2bcb-4b3a-b834-033104ba2420": "How can entities ensure accountability in automated systems?", "b0e2eb81-c76f-407d-87a7-bf82bdc5c994": "Why is it important for notices to be timely and up-to-date in automated systems?", "2dca1178-1147-434b-b575-69e710673993": "What should be included in notices about automated systems?", "93bf2a5e-1a9f-4aa4-a500-5dde06657e7c": "How can user testing improve the clarity of notices and explanations in automated systems?", "f79d35e1-9c4c-471c-b976-8c3dae58e572": "What are the key components of Algorithmic Discrimination Protections?", "62517b56-3063-4614-8fd7-d987ee4bf4e2": "How should entities notify users about changes in the use case or functionality of automated systems?", "c53f1315-615a-4dfa-a417-1ea3f2c92e3b": "What are the expectations for reporting in the context of automated systems?", "bd8dd202-37fc-4f0e-9788-e8deb33eabdb": "How can entities ensure that users understand the notices and explanations provided for automated systems?", "fbf6c048-26ac-4ec7-84a1-ae7cc41ebfb7": "What role does research on user experiences play in the design of notices for automated systems?", "38534eb5-4881-4f58-be97-63238c6c479b": "What are the current limitations of existing GAI tools in facilitating CBRN weapons planning?", "b27e8b4f-0fcc-41d9-a4e8-94ac31878ba1": "How can ongoing assessments improve the monitoring of AI tools' capabilities in CBRN contexts?", "991a0d40-0a1c-44ba-b955-3de8eb4bd067": "What measures can be taken to ensure GAI systems do not access sensitive data related to CBRN weapons?", "e84751a4-90f8-413b-b3b6-e91f28305dcc": "How important is it for AI systems to be explainable and interpretable in the context of CBRN risk assessments?", "89c9205b-de28-4888-8c7f-79f9900b731b": "What are the key characteristics of trustworthy AI in the context of safety and security?", "8c67ad80-5cd3-4842-88e6-fc5e71086935": "How can we enhance the safety of AI tools used in high-risk areas like CBRN weapons planning?", "8b30e5d6-0420-4224-b85d-052d8d01a5b4": "What role does explainability play in the trustworthiness of AI systems?", "766d94d6-ff7e-4281-9d69-ba577c2b2228": "How can we ensure that AI systems remain interpretable as they become more advanced?", "0d7ff6a0-f0ab-42d2-bc69-3d81e2146c1a": "What are the potential risks of GAI systems having access to CBRN-related data and tools?", "03c6100c-5fe4-4dff-9b62-da870a412d36": "How can we balance the advancement of AI capabilities with the need for safety and security in CBRN contexts?", "0f4996c3-02e1-4284-94a7-c3695bba5538": "What are the rights of the American public when it comes to opting out of automated systems?", "070d096a-c121-4062-9907-63cb49651bb5": "How can people with disabilities ensure they receive reasonable accommodations instead of automated systems?", "5b9ce146-9406-4f12-8eae-194503dd5c93": "Why is it important for the American public to have a human fallback system for automated services?", "77c334c1-312b-4323-9cf6-971bedb52b19": "What protections are in place for the American public if an automated system fails or causes harm?", "93b3a12a-1c0a-4a3d-8a80-a250055ab49f": "Are there any laws that require a human alternative to automated systems?", "0fcc07cd-f226-4132-b9bb-62b83ccc8c18": "How can the American public conveniently opt out of an automated system without being disadvantaged?", "e6ab4ce6-4166-46a9-8f52-a88ce88d15f0": "What are the potential risks of relying solely on automated systems for critical services?", "61dd4dc8-b98c-4c08-b83e-b7c8eb005bd6": "How does human review provide protection against unexpected scenarios in automated systems?", "28b5c99a-f4e0-41c7-b3c0-57d98c306d61": "What measures are in place to ensure that time-critical systems have a human fallback option?", "15bda80e-6fb3-4e9e-b3aa-343fbfc57016": "Why might an automated system fail despite rigorous testing?", "b17b2a91-c2f2-421a-9161-f577f4f49902": "What is the AI Bill of Rights released by the White House Office of Science and Technology Policy?", "1c591402-7c09-4342-9a4c-3e447029fa73": "When was the Blueprint for an AI Bill of Rights published?", "3f4d1548-89ee-4941-adf0-70889cee50a0": "What is the purpose of the AI Bill of Rights framework?", "3451f2b4-c2cf-4c53-979c-50973acbb1bd": "How can I access the Blueprint for an AI Bill of Rights online?", "d0bc240e-3df8-4e3c-91e6-826ec2f75c9a": "What is the role of the Office of Science and Technology Policy (OSTP)?", "7edebb20-1da2-49a3-b7c1-035d6f51e95c": "When was the Office of Science and Technology Policy (OSTP) established?", "8aeea347-cc31-433e-b950-82f344182a53": "What prompted the development of the AI Bill of Rights?", "e6649708-a5e7-4253-873b-745f594a79a0": "How did public engagement contribute to the AI Bill of Rights initiative?", "550ed3d0-68d5-4c20-9af9-61b217f9c8ec": "What are the main goals of the National Science and Technology Policy, Organization, and Priorities Act of 1976?", "a1df848c-b03a-406d-a467-61ee5c89957f": "Who benefits from the advice provided by the Office of Science and Technology Policy?", "50365036-5406-482d-a4f4-64f43b779b3a": "What is continuous monitoring in the context of GAI systems?", "fe6f7ccc-7cd6-4b50-8a46-8aba632ecc48": "How can feedback from affected communities improve GAI outputs?", "608884ac-731d-4bfa-b339-8c8d39749dd9": "What are some techniques used to evaluate the quality and integrity of data in AI training?", "2372552e-4f0d-4f62-8715-f7845287595c": "What is chaos engineering and how is it applied in AI systems?", "b404ae73-34eb-4434-82b9-0e2c6ab3bf2e": "Why is stakeholder feedback important in evaluating AI-generated content?", "fdc418c4-4bca-46d6-95b4-c7d346c634ca": "What are the benefits of structured human feedback exercises like GAI red-teaming?", "7e203668-296d-4740-807b-996e28110ecf": "How can GAI red-teaming help in managing risks associated with AI?", "fb5db5ae-01cb-4cf0-8c7b-9193091f22ff": "What are the potential negative impacts of GAI systems that need to be monitored?", "4f49fc89-1623-43ba-9f22-18c09e38f9ef": "How can harmful bias and homogenization be mitigated in GAI systems?", "5f86869b-e204-4551-8742-3ac0a5c93510": "What are the key components of tracking and documenting risks in GAI systems?", "92689af0-19a9-4930-8cc2-d7dcf81cda7a": "What are the common issues users face when conversing with chatbots?", "44aae3aa-24ad-4059-9e44-4c37b8d088b4": "How do users typically react to unhelpful responses from chatbots during distressing situations?", "cef22ba7-26f8-4a69-8040-adb5bf18fa6f": "What risks are associated with the creation and public exposure of offensive or hateful language by AI?", "cd099f8e-41d5-45f2-9799-b0ffd6d8b77c": "How can AI-generated denigrating or stereotypical content contribute to dangerous or violent behaviors?", "63ef9f64-d9a2-4b88-9c2f-0c99870fc0e5": "In what ways can the spread of denigrating or stereotypical content exacerbate representational harms?", "ab431b98-c209-4dcd-a050-4a14e1bdb05b": "What characteristics make AI systems trustworthy, particularly in terms of safety, security, and resilience?", "802de2ca-5fa0-4365-94c2-2f48d2551dc3": "What are the privacy risks associated with the training of GAI systems?", "a7780055-4593-4371-baf2-0854c10b4668": "How does the use of personal data in GAI training conflict with widely accepted privacy principles?", "2eb881cb-2d98-4856-b5d4-062d6a883283": "What are the key privacy principles that might be at risk due to GAI system training?", "1835f2f8-1619-4637-a8a9-fb620ed7cb26": "How important is transparency and individual participation (including consent) in the context of GAI system training?", "a5a1e993-48c9-46ea-8808-a32d93da94aa": "What are the ethical considerations for using sensitive data in automated decision-making processes?", "69dbc047-26cf-443a-83ec-63b327f9646c": "How should health insurance companies handle sensitive data provided for payment facilitation?", "1bc73bf1-7e48-46ac-a612-80196cb803e7": "What is the role of an independent ethics committee in reviewing the use of sensitive data?", "c084d257-5a73-46df-8538-5073ed7a01c4": "Why is it important to have periodic reviews of decisions based on sensitive data?", "91cd1699-977f-4d73-8327-5dc62cff847d": "How can we ensure that necessary functions remain optional and not coerced?", "2f1e2c40-8abe-446e-9ab4-5eb6690a678e": "What are the potential risks of using dynamically developing algorithms in sensitive data contexts?", "6ed87438-4cb5-4a0e-8495-7378ba140033": "How should entities handle data that might limit rights, opportunities, or access?", "7cf57cf0-ea86-4802-a27d-d8900db77529": "What are the guidelines for ethical review of sensitive data usage?", "80b69831-2305-4880-94da-f11163809d19": "Why might an ethical review determine that data should not be used even with consent?", "8d50eaaf-61f5-49ac-a319-1c3d968c61a0": "What are the challenges in monitoring the ethical use of sensitive data in automated systems?", "623d8b3e-7499-4bf1-b87b-e7a81b50ee4a": "What is GAI red-teaming and how is it used in AI evaluations?", "b45f8960-4fbb-403a-8415-36afec2cd758": "Why is it important to engage representative AI Actors in structured human feedback exercises?", "64b731e3-b3d6-480b-9718-5237e4c7595a": "How can harmful bias and homogenization be mitigated in AI systems?", "c2a47cbb-9233-4979-8506-436f11a090b3": "What are the key considerations for conducting impact assessments in AI?", "6ce7fe74-d363-4677-b304-583120b4f062": "Why should those conducting human feedback exercises not be involved in system development?", "c44d6e5d-92e9-47d4-a053-1feb8c4ea992": "What roles do AI Actors play in AI deployment and development?", "272d11b8-e3c8-4c56-8cd2-61cec900ff82": "How does data privacy factor into human-AI configuration?", "3cca6e56-8ef0-4ae7-9605-f6f461bb0b87": "What are the responsibilities of domain experts in AI impact assessment?", "b49893d8-5d05-416a-962b-90597b72602d": "How can AI systems be monitored effectively to ensure they meet evaluation standards?", "6e23f927-8119-4bf8-823e-e3ef9d26c60a": "What is the significance of TEVV in the context of AI operations and monitoring?", "723651fb-9c2d-409b-b320-816fae7dec56": "What are the Access Board\u2019s Section 508 regulations?", "75f53b59-e2b8-4540-adea-9e72c300aab8": "What are the technical standards for federal information communication technology in the United States?", "660d930f-69d6-4768-b0a1-44bb673dfacf": "What is the International Organization for Standardization's role in technology design processes?", "ca06fff7-b717-465a-91c0-f20cc32b3902": "What are the World Wide Web Consortium Web Content Accessibility Guidelines?", "0d33d01e-495f-4254-8b8e-d7393c8fa678": "What is NIST Special Publication 1270 about?", "43a76af4-542c-499e-81e3-23edeecb1692": "How does bias in artificial intelligence affect public trust?", "f2963ebd-4268-48bc-99ef-b385c395b833": "What are the three categories of bias in AI identified by NIST?", "6007e7be-7afe-475c-8f2c-d189608a35f8": "How do systemic biases contribute to harms in AI?", "873995b6-97c4-4fbf-9d84-aafae525c908": "What are some examples of statistical bias in artificial intelligence?", "6106aef5-150e-4d9f-b12d-839a1bc64d5e": "How can human bias impact artificial intelligence systems?", "5d31f548-e180-4961-b02c-1026476307a5": "What is provenance data tracking and how does it help in detecting synthetic content?", "41737fda-7f5e-4526-8ecb-d8a560682739": "How can provenance data tracking improve the trustworthiness of AI systems?", "759a70d7-6d44-4ebd-b51f-264e7fd500c6": "What types of metadata are included in provenance data tracking for GAI content?", "10bbc39b-cae2-4ff4-b61a-2c7fa87c5fd4": "How does synthetic content detection contribute to GAI risk management efforts?", "cf616e2b-26ef-43e4-9dc4-b7b9569b9f8a": "What are the benefits of combining digital content transparency with organizational accountability mechanisms?", "9b2af444-94eb-4943-808d-8a01c74a7860": "How can provenance data tracking help trace negative outcomes back to their source?", "110eb68c-1767-4f4d-ba87-2028a2a1105c": "What role does provenance metadata play in improving information integrity?", "1a06806d-fb35-4c87-8809-da5a08874714": "How can provenance data tracking be implemented for different types of content like text, images, and videos?", "5296dde2-bf5c-4f7d-b712-62c552801ef5": "What information can provenance metadata provide about GAI model developers or content creators?", "5b028681-df5b-4e75-a745-ef99535810a3": "How does digital content transparency uphold public trust in AI systems?", "273f9105-5fd9-4850-ad10-dd9a9b8cc778": "What are some potential risks unique to the development and use of GAI systems?", "f2015027-8be4-41af-9324-6b28bc14ba10": "How can organizations identify and manage risks associated with GAI?", "1c39f437-06bb-4fe4-9973-5e2bdc4a8cb2": "What does it mean for a risk to be labeled according to the outcome, object, or source of the risk in GAI?", "1e85d184-b2c1-4ff6-af08-8ea57ec19ad9": "How are the risks of GAI mapped to Trustworthy AI Characteristics in the AI RMF?", "e0add861-37cb-4915-902c-04c52bc85356": "What are some examples of risks \"to\" a subject or domain in the context of GAI?", "b2c9c8be-32f2-487a-8dcc-b0edbbaec345": "What are some examples of risks \"of\" or \"from\" an issue or theme in the context of GAI?", "553df791-945d-4f86-8985-dd794f43f520": "How can future updates help in managing the risks associated with GAI?", "39d40845-848b-4f07-ac52-444d6f6dc6e4": "What role do Trustworthy AI Characteristics play in managing GAI risks?", "d8eb671f-52dc-4612-9671-cf9c05b5beae": "How can organizations categorize risks based on their unique approaches to risk definition?", "6d2fc2bb-39a1-4e50-b2f9-1579f87f3239": "What is the significance of Appendix B in the context of GAI risk management?", "295453ca-009a-489b-9a3b-7f39d0aede8a": "What is the NIST Privacy Framework and how does it help organizations manage privacy risks?", "8a36fef4-811b-4fdd-80eb-a3d79e7b5155": "How can organizations use the NIST Privacy Framework to support ethical decision-making?", "417512e6-a1fb-48c7-b125-859ffc865ba6": "What are some benefits of adopting the NIST Privacy Framework for managing privacy risks?", "39ea0fdf-a276-42b5-911b-4ec55c6e14c9": "Why did the school board's surveillance attempt in Lockport, New York, lead to a state-wide biometrics moratorium?", "2d51294e-3f63-42ce-b8e7-80d8791bacfc": "What are the implications of the biometrics moratorium in New York schools?", "28211d1f-6063-4fe9-8a8f-56d2e0c9bd80": "How does the NIST Privacy Framework help organizations demonstrate compliance with privacy laws and regulations?", "ffd0d540-b753-4764-918d-866af54215a0": "What sectors have voluntarily adopted the NIST Privacy Framework?", "a4e0cabf-38aa-4b49-920c-c6d52520a8fa": "What measures are included in the NIST Privacy Framework to identify and communicate privacy risks?", "c63477fe-7ddc-4df4-8341-f70bcccc16f6": "What was the community's reaction to the school board's surveillance plan in Lockport, New York?", "916eda70-3efa-4870-9261-7badb6422454": "What is the significance of the July 1, 2022, deadline in the New York biometrics moratorium law?", "5928ad96-89e5-4214-a148-421437e5e16a": "What does it mean when a document does not supersede existing statutes or regulations?", "ccf1c7b4-aec1-4f63-b6b5-ee4c5681bfdd": "How does this white paper affect the interpretation of international instruments?", "3320cd92-d86b-448f-aca0-3a32e873afe7": "Are Federal agencies required to comply with the principles described in this document?", "eee8169c-4314-45e2-88bc-1d4fe146f56b": "Can the principles in this white paper influence the US government's position in international negotiations?", "8a46d200-e514-4172-a7b4-199a50cfb153": "What are the implications if the principles in this document do not meet existing regulatory requirements?", "4e95d3b7-08c9-4644-87d2-e0c03b0312ea": "How do these principles impact lawful activities of government agencies, such as law enforcement or national security?", "de79da97-0834-4ee6-b1f4-eb364ebbe1ac": "In what ways does the context of automated systems usage affect the application of these principles?", "ef810a8b-abbe-45ac-b82f-47d20dc71885": "What should be considered when applying the principles of this white paper to automated systems?", "b0f0f94e-aa95-48c9-8e54-5d27b11c1ddd": "How might the principles in this document interact with existing policies and regulations?", "a5863708-81c6-4a01-a207-62001c6f482f": "What are the limitations of the principles outlined in this white paper regarding government agency activities?", "b613cee8-31a4-4979-8cd1-7547aef01a05": "What are the benefits of using technology to push benefits to those in need?", "93e8f04f-3a3f-4daa-8c55-9a697403cf18": "How can automated decision-making systems ensure positive outcomes in benefit distribution?", "4d63f90f-ffcf-461e-b0b2-d7443e9e0e53": "Why is it important to provide clear notice and explanations about the use of technology in benefit systems?", "87ebbea1-b1c2-4928-8cc9-72bec3599f01": "What are the potential risks of using technology to take supports away from people who need them?", "383af2c3-7350-4f63-b41b-480703a2f318": "How can human-driven mechanisms ensure the timeliness of benefit payments?", "db698d0d-61cc-4055-82ba-9e9f070b90a0": "What emerging technologies are being used in the healthcare system?", "864c0429-b586-4dfb-9fd9-1122c038f71f": "How can consumer products related to health benefit from current technological advancements?", "512bfcf4-61ac-46a5-bc16-91f0073ecef3": "What role does the White House Office of Science and Technology Policy play in healthcare technology?", "cf6c1f99-aa2f-4487-bf62-4d84073869b1": "How does the Center for American Progress contribute to the discussion on healthcare technology?", "3ab8040e-18b6-4400-94bc-7f7d0d4b1b2b": "What are the responsibilities of the National Coordinator for Health Information Technology in the US Department of Health and Human Services?", "789a4e6b-1b5f-4a2e-be70-4c60b2e169ef": "What are the key expectations for automated systems in terms of technical standards and practices?", "d904c6a8-b7ae-43d7-8287-35c070601ea2": "How should derived data sources be tracked and reviewed in automated systems?", "b604912b-4408-4b2b-8eb6-c1c90a9637c8": "Why is it important to identify and track data derived from algorithms in automated systems?", "17717bc2-52ea-4d65-b273-956e440e43ef": "What are the potential risks associated with derived data in automated systems?", "26da822d-31fe-48b3-9ca8-50155d6fa977": "How can feedback loops and compounded harm be prevented in the use of derived data?", "c5d39949-dbb6-4c5e-b00a-b6fd7788ae1d": "What measures should be taken to validate derived data against collateral consequences?", "cfea209a-9830-4e58-b251-1f70f42f93f1": "Why are data reuse limits important in sensitive domains like criminal justice?", "7a516f8b-a3c1-4579-a094-1302e810e841": "How can data reuse in new contexts lead to the spreading and scaling of harms?", "acc90406-7fa4-487f-a601-6e3dc23145a9": "What are some examples of sensitive domains where data reuse should be limited?", "3a91cd21-aa78-4c15-a216-a24057643605": "What practices should be implemented to ensure the safe and effective use of automated systems?", "8363b99c-85ab-4773-86a2-f63cac3d051a": "What are the key responsibilities defined for periodic review of content provenance in GAI systems?", "f87db50c-422f-4529-a21c-78abd2d10fab": "How often should the periodic review of the risk management process be conducted for GAI systems?", "34ed1fca-1d3e-4fdc-b3a4-9281c24dd99e": "What organizational roles are involved in the incident monitoring for GAI systems?", "4d916dd9-762b-4031-b912-5fc4222557c2": "What are the suggested actions for improving information integrity in GAI systems?", "cb1a51e1-502e-4c12-b98b-10353458c6e6": "How can organizations establish effective policies for after-action reviews of GAI system incidents?", "09aabd10-bf20-4e36-a1af-b2389c95fefe": "What is the importance of maintaining a document retention policy for TEVV in GAI systems?", "16ef8d11-a13c-44d4-a88e-868f36377f22": "How do after-action reviews help in identifying gaps in GAI system incident response?", "abb1af0f-005f-408f-aa68-27d64a96a71e": "What are the procedures for updating incident response and disclosure processes in GAI systems?", "681af4b6-c008-47ac-87ac-859d9849d77d": "Why is it important to have digital content transparency methods for GAI systems?", "c32d437f-b5b4-4d28-b7f8-1a2de95eb23b": "What tasks are associated with governance and oversight in the context of GAI systems?", "2792f4ff-31f6-48c7-9b05-a903e1fa4fdf": "What is the NIST AI Risk Management Framework?", "b4374d8c-f461-4c57-8c4e-4d9975359384": "How does the NIST framework aim to manage AI risks?", "cd79ee78-f93f-4750-875f-33a59251f574": "What are the key characteristics of trustworthiness in AI according to the NIST framework?", "0135028f-ffa7-4189-92d1-0888059657c4": "How is the NIST AI Risk Management Framework being developed?", "7c54c8a4-972c-48f7-9388-9640bbc920ce": "What role does Congress play in the development of the NIST AI Risk Management Framework?", "7ab73fee-d39b-41c6-8cd8-222b44049d82": "How can organizations incorporate the NIST framework into their AI systems?", "579d7a98-c43c-4d11-b24e-6d7275e820fa": "What are the benefits of using the NIST AI Risk Management Framework?", "427a0cc8-5c14-4b43-bcd0-372317b5a8e7": "How does the NIST framework address privacy concerns in AI?", "e7555614-9fb9-4861-b164-85033e7aeace": "What opportunities are available for providing input into the NIST AI Risk Management Framework?", "5c5b27b8-e80f-4e87-80a8-dda89bca7b12": "How does the NIST framework ensure the explainability and interpretability of AI systems?", "4436c96e-b176-4846-b3be-120e574bbaae": "What are some examples of unintended harms caused by automated systems?", "53134aa2-f21c-4ea7-be6f-6b32937c51f0": "How can companies ensure that their development decisions are ethically vetted?", "89297186-beb5-4d42-b727-46821b71a4e0": "What are the benefits of pre-deployment testing and ongoing monitoring for automated systems?", "ae608ad7-1d56-4091-a89f-36c7c99c6c23": "How do public consultation processes help in the deployment of new automated systems?", "666e61ac-70db-4dc3-8657-e4c29d144123": "What existing practices protect the American public from potential harms of new technologies?", "98137aae-d585-4b93-b931-6c0f0a022772": "Why are proactive protections important for the use of automated systems?", "18d231da-3925-4aa0-8358-ad67f4ce8c1e": "How can expanded protections increase public confidence in automated systems?", "9f500df2-ad6b-42ed-8645-8a9e917bdf80": "What role do ethics reviews play in preventing harm from automated systems?", "8d4c69ae-abc4-4f4b-961e-5d0f8194de75": "How can governments improve their consultation processes for new technologies?", "4097b122-a3b3-4718-8577-48d550199b62": "What are the challenges in implementing safeguards for automated systems?", "1a442f2d-3986-4d5f-ab68-0ad589163a65": "What is AI-enabled \"nudification\" technology?", "d1108cc8-95d4-4e06-83da-0a5667e1679b": "How does AI-enabled \"nudification\" technology impact women disproportionately?", "0141b7f8-c7ab-497a-af5e-f0ad4e4ff34e": "Why is it difficult for both humans and AI to detect inauthentic images created by \"nudification\" technology?", "04c27c24-9db1-48ea-9b81-876c36344898": "What are the potential personal and professional consequences for victims of non-consensual intimate images?", "e6564ab8-5109-4a91-9546-08eef6406338": "How does the proliferation of AI-enabled \"nudification\" technology contribute to image-based abuse?", "6c438bca-254b-4856-9607-ce37450c6154": "What measures can be taken to protect individuals from AI-enabled \"nudification\" technology?", "b754ed6a-d92e-4edb-a8bc-d0eea1d29626": "How does the experience of harm from non-consensual intimate images affect victims' mental health?", "56c9eed6-f23c-499e-8bc0-52b33a7b3e1b": "What are some examples of apps that enable non-technical users to create or alter images without consent?", "6f321352-c8fc-4909-9996-33dc8382f6aa": "How can the authenticity of altered images be verified to prevent image-based abuse?", "5398f151-8b5f-42f5-b601-0b47b2d7ba49": "What steps can be taken to make AI systems safer and more effective in preventing image-based abuse?", "57eb4cf6-01d6-4cae-b638-975a2a0f2656": "What are the key findings of Virginia Doellgast and Sean O'Brady's report on call center jobs and worker stress?", "8a32a722-7f7e-40c1-967a-6c5bd05a1c88": "How do management practices impact worker stress in call centers according to the CWA report?", "e1b53808-6b10-44e0-882d-ba92d299d675": "What recommendations does the Federal Trade Commission make in their 2014 report on data brokers?", "cad08b72-46c0-40b7-bbf7-613ff0ab5f7f": "What are the main arguments presented in Cathy O'Neil's \"Weapons of Math Destruction\"?", "4da03091-d495-4661-9c76-eca9b1c37a74": "How does the US government use social media surveillance according to the Brennan Center for Justice report?", "53d8542f-d4e3-4349-bb91-b058abf20736": "What are the implications of Shoshana Zuboff's \"The Age of Surveillance Capitalism\" for privacy and human rights?", "08b49066-3db9-4bb5-97a1-46a2adb0324f": "How does the Brennan Center for Justice report describe the extent of social media surveillance by the US government?", "ded819f0-0467-4497-a257-4b727219d0dd": "What are the potential risks associated with data brokers as highlighted by the Federal Trade Commission?", "51e0e9cc-e3b9-47a7-8bf8-405f02fa04e6": "How does \"Weapons of Math Destruction\" address the issue of algorithmic bias?", "aec903ac-6dcb-450f-ad0d-c664a413a4b9": "What solutions does Shoshana Zuboff propose to combat surveillance capitalism?", "8d838d90-aa87-4ecc-9abc-0fcfa5a4d35d": "How can we protect the public from algorithmic discrimination in a proactive manner?", "a4c16234-c64d-454b-98d2-55d8906e1040": "What is a proactive equity assessment in the design phase of technology?", "33c734e9-1fa0-4bd1-b225-487ee53c2b33": "Why is it important to review potential input data and historical context during the development of automated systems?", "da018beb-b83d-4eaf-8c14-30d735aac789": "How can we ensure accessibility for people with disabilities in automated systems?", "93044246-0649-4ba3-b8b1-355b186025a1": "What societal goals should be considered to identify potential discrimination in technology?", "df01e2f4-1283-4220-9353-4573fe7a1203": "Who should be included in the equity assessments for automated systems?", "98fd7a77-ecb9-40ff-b731-37710588af27": "How can we address potential discrimination against Black, Latino, and Indigenous communities in technology?", "37017d6c-9757-463e-b5e4-7df7f5a6a2cc": "What steps can be taken to ensure equity for Asian Americans and Pacific Islanders in automated systems?", "cc45a9c2-e146-4637-ae51-762e5cf9a713": "How can we support members of religious minorities in the design of automated systems?", "a04f1bd4-fb2f-4020-a56f-07c5d12ee11c": "What measures can be implemented to protect women, girls, and non-binary people from algorithmic discrimination?", "21b9f512-20cd-4b96-a8e5-824c5d788b9e": "How can continuous surveillance systems impact civil liberties and rights?", "cbc08ff9-844c-48d2-bd2f-d965463662ff": "What are the risks of using identity-related information in surveillance systems?", "62b31c74-49b3-45d8-958c-175dffe5e8da": "Why should continuous surveillance not be used in public educational institutions?", "2eba3ccc-3d6a-46ae-bebc-6cf52aea05da": "How can algorithmic discrimination be avoided in surveillance systems?", "a81db3fd-a4bb-4dd0-a446-4b3c34313263": "What are the potential consequences of using biometrics in surveillance?", "cc016098-a84b-4347-a791-d173694b1fd8": "Why is it important to limit the use of location-based inferences in surveillance?", "61262c75-27f1-4742-a14d-7dfbb43c1f6b": "How does continuous monitoring in workplaces affect employees' rights?", "48be4dc1-8648-4ae2-b813-f08d7321d697": "What measures can be taken to prevent surveillance from limiting access to critical resources?", "d3c26eb4-5404-4906-8f26-12a930b32e4b": "How can social network data be misused in surveillance systems?", "5cd15272-af41-4e9c-aa26-42043ce6088c": "What are the ethical considerations of using surveillance in public accommodations?", "7869bc66-d4ef-4fc7-9897-351302b9eae5": "How can automated systems impact the eligibility for Medicaid-funded home health-care assistance?", "c13ee199-2840-46a1-ab92-c45e02f2f6b6": "What are the potential issues with using algorithms to determine eligibility for social services?", "d612ed72-1fb8-4bca-9b74-b5243bf640ca": "How can the lack of transparency in algorithmic decisions affect legal cases?", "bbecd72d-7534-478d-a686-94b4ed9cb7c8": "What are the challenges faced by lawyers when contesting decisions made by automated systems?", "68914712-215e-4582-8129-f1f7db119cc4": "How can individuals be informed about data collection and algorithmic assessments in child welfare cases?", "5f684140-09b8-47a2-8459-aa9c40072ce2": "What are the ethical concerns surrounding the use of algorithms in child maltreatment risk assessments?", "b0deab21-1ef7-4bb8-bb66-bd0567448753": "How can the use of algorithms in social services be made more transparent and understandable?", "e6417fe3-22a8-4536-89de-555e6a9a171f": "What steps can be taken to ensure fairness in algorithmic decision-making for vulnerable populations?", "12de6c9e-e24e-4576-9d3c-1fcbd6ccc54b": "How can parents contest decisions made by algorithms in child welfare investigations?", "522de666-2af7-4927-bf75-9d3efd25b34d": "What are the legal implications of using algorithms without notifying the affected individuals?", "fe8e6e24-ac4f-4570-bb59-e53149f51c49": "What is the purpose of the overlapping framework of rights mentioned in the context?", "07b9522e-0eac-4459-85e1-b511e6b00056": "How does the Blueprint for an AI Bill of Rights aim to protect the American public?", "55277100-414c-49c6-84ac-e6a011795719": "What are some examples of protections already required by the US Constitution mentioned in the context?", "d6a87b9f-e5f6-44ac-bc18-b4f1bc5515a9": "How should measures be taken to realize the vision of the rights framework?", "fc2524b6-ed78-43d5-b6fe-a01bdd53e031": "What is the relationship between the Blueprint for an AI Bill of Rights and existing US laws?", "fd764fa7-268d-48fb-9ec7-3be9aa2eb183": "How does the context describe the role of judicial oversight in government surveillance?", "202fc607-7128-470f-aed2-e848351cae24": "What does the context suggest about the proportionality of measures taken to protect rights?", "e5097d86-a6bc-49d4-86a4-d058c5b193a1": "How does the Blueprint for an AI Bill of Rights envision the future use of automated systems?", "e633d508-d253-4b7f-bec5-75b11962e65e": "What are the principles described in the Blueprint for an AI Bill of Rights meant to ensure?", "742f04aa-415c-4e14-98b9-5298de43c47d": "How does the context relate to the protection of opportunities and access for people?", "e1fd4aa1-c10d-4f71-bf77-b4cca9a6e0ed": "What are the best practices for regularly assessing and verifying the effectiveness of security measures in AI systems?", "9fbe4be3-06da-42bf-a241-95fc342604db": "How can organizations ensure that their AI security measures have not been compromised?", "f1082088-e78b-42d5-9285-ae15731354be": "What methods can be used to compile statistics on policy violations and intellectual property infringement in AI systems?", "e6f71c20-417c-46ac-8a42-10c97c50760f": "How should transparency reports be analyzed across different demographic and language groups?", "2f6a4b04-9c59-4bad-8823-c165db75adf3": "What are the key components to document when giving instructions to data annotators or AI red-teamers?", "a64475ac-8c2d-48af-b278-eb91c933268d": "What digital content transparency solutions are recommended for documenting instances of content generation, modification, or sharing?", "0d873b42-d24d-4da1-89ff-6ccc15e84b6b": "How can organizations address risks associated with transparency and accountability in AI systems?", "97de1dbe-9a2c-4a3a-9a0d-842bf1c1ad55": "What are the common risks related to intellectual property and harmful bias in AI systems?", "7e629132-bcc2-46fa-9815-95ccd889c845": "How can the effectiveness of AI deployment and impact assessment be monitored?", "070a1044-27d3-4b31-aa33-23e39882c9ab": "What role do domain experts play in the operation and monitoring of AI systems?", "10ecba85-50b5-466c-92fc-326759a2a5ee": "What are formal methods in the context of automated systems and machine learning?", "43072003-14fe-4f66-94c2-9ac121ef7717": "How does the Designing Accountable Software Systems program ensure legal and regulatory compliance?", "98486804-fb18-4583-ab43-67b07b08efa6": "What are the transparency and validity requirements placed by state legislatures on pretrial risk assessments?", "54358f22-1102-42f3-b0f2-7227b0a0f0c2": "Why are civil rights groups concerned about the use of algorithmic pretrial risk assessments?", "ffba1d9d-ebb6-4955-a4fa-839e180301ef": "What does Idaho Code Section 19-1910 require for pretrial risk assessments before they can be used in the state?", "99d80794-a2ea-449b-9af4-62646f316051": "How can a pretrial risk assessment be shown to be free of bias according to Idaho Code Section 19-1910?", "07f00fa3-c821-41ec-9ff1-a781bd1bd55d": "What steps must a locality take to validate a pretrial risk assessment as free of bias?", "e9dee9ec-012d-4ba0-a878-5dec4184c9a2": "What are the implications of the requirement for all documents and records related to pretrial risk assessments to be transparent?", "b922e48a-8d8e-4ae8-807f-5bb7477dd82d": "How do formal verification and analysis contribute to the reliability of automated systems?", "75050f5f-7cfc-4adf-a91a-1d7fb4185c7c": "What methodologies are considered rigorous and reproducible for developing accountable software systems?", "6b983f53-78fb-4bd3-be9a-cb6deb436d63": "What are the key components of a certification program for managing GAI risks and interpreting content provenance?", "550b57aa-5cc7-44ec-b07d-c62cd7dd4668": "How can existing training programs be adapted to include modules on digital content transparency?", "65a3901b-9b76-4dcd-b3ba-d04968302b14": "What methods can be used to evaluate whether GAI operators and end-users understand content lineage and origin?", "fe52b3aa-3245-48cc-86da-8f3f3225b4cf": "Why is it important to delineate human proficiency tests from tests of GAI capabilities?", "b3b5db46-0009-42b6-9465-4b5f30fbe624": "What systems can be implemented to monitor and track the outcomes of human-GAI configurations?", "530d41df-1d98-428a-9449-1e199472e781": "How can end-users, practitioners, and operators be effectively involved in the prototyping of GAI systems?", "68889efa-4505-424f-ae65-dc462fc55044": "What are the risks associated with human-AI configuration and information integrity in GAI systems?", "5149446a-0fef-433d-a2ae-bfa7865ae0d0": "How can proficiency in managing GAI risks be tested and certified for specific industries?", "0fba5c84-2bcf-4be8-9684-d1904eb861bc": "What are the benefits of continually refining and improving human-GAI configurations?", "5634f37b-9702-4b53-8dce-bfb21a101d03": "How can information integrity be maintained in the context of GAI systems?", "8eb9d5d5-af51-4376-9b9d-1297edd11496": "What are the potential risks of using biometric markers in school exams?", "eaf23276-45c4-465b-a0a1-c347a8fc5b76": "How might biometric systems in schools affect students with disabilities?", "451adf54-6336-4465-b4de-b6390b383b02": "Can location data from data brokers be used to track visits to sensitive places like abortion clinics?", "f2489a19-b9ce-4941-a3e9-154a0ecad928": "What kind of student data do companies collect to forecast student success?", "317d1c65-7185-452c-878a-1b16e14d3e4d": "Why are parents concerned about the collection of sensitive student data without consent?", "474c5dc3-a92f-40c7-b290-7529f751d77b": "What are the transparency issues related to the use of collected student data?", "2e297de2-2657-4560-9b39-b6316e39d5b4": "How can the collection of demographic information and other sensitive data lead to discrimination in schools?", "38c93c3c-8f95-4361-80e3-d401cdfb7a38": "What are the implications of employers transferring employee data to third-party job verification services?", "7a8d09ca-630b-49f2-921f-eb1e2dffc47c": "How do schools use data on free or reduced lunch status in their assessments?", "2b74ec7f-f8f1-40ba-b970-2b4fbc944395": "What are the ethical concerns surrounding the use of student data for predictive analytics?", "839d62aa-6521-47db-b195-5f55f3715e07": "What mechanisms can be used to sustain the value of deployed AI systems?", "8c59f600-8f81-40e2-8f76-cad08254ac3a": "How can organizations compare AI system outputs against predefined risk tolerance guidelines?", "5a461ec3-611e-43f6-af2a-7a7f88454170": "What are the risks associated with AI-generated content, such as CBRN information or capabilities?", "16e67bb6-1d80-4b1d-b60d-cda158c2d378": "Why is it important to document training data sources for AI systems?", "ddf72159-1801-49ab-a221-531088d69221": "How can feedback loops between AI content provenance and human reviewers be evaluated?", "f24bf2cd-d91c-4662-bfd9-31b03791b58e": "What are the benefits of implementing real-time monitoring systems for AI content provenance?", "c0e0ee49-2ab6-4eab-bafe-fec66c3023af": "How can organizations ensure that AI-generated content does not include obscene, degrading, or abusive material?", "c7548d81-fe0d-4316-a260-2d342818a5c6": "What steps can be taken to prevent harmful bias and homogenization in AI systems?", "91e539e0-ee4e-4e3b-bc8d-b3794ca3798a": "How can dangerous, violent, or hateful content generated by AI be managed?", "4c18144e-d6a3-4d13-a836-e55c43d2c5e2": "What is the role of information integrity in the context of AI development and deployment?", "18472313-2f95-4239-88f3-4a75df9cf246": "What are Model Cards for Model Reporting and why are they important?", "18e201b7-192f-4ea2-934b-df1184862535": "How do adverse action notice requirements under the ECOA and the FCRA impact consumers?", "07cfac24-a2ea-4bef-b577-15ed4935a4bd": "What information is provided in the FTC's guidance on using consumer reports for credit decisions?", "86d77516-8a49-4005-97e2-fef5020a40b3": "How does the CFPB act to protect the public from black-box credit models using complex algorithms?", "b2ac04a5-d89c-4fc8-8023-6bb6b8a814ab": "What are the key takeaways from the Conference on Fairness, Accountability, and Transparency (FAT* '19)?", "03bc6211-f0ad-4a75-b7aa-92f96064ce4b": "What are the adverse action notice requirements under the ECOA and the FCRA?", "75513281-57cf-4e3f-8495-cafc4e79d242": "How does risk-based pricing affect consumers according to the FTC?", "28e98921-df31-486e-a4bf-cd2a8f018ee4": "What measures has the CFPB implemented to address issues with black-box credit models?", "1942efe7-0de7-42dd-9f0b-23cdddc6969d": "Why is transparency important in credit decision algorithms?", "ba2751db-a092-4455-89ce-60ce0f2e5ff0": "How can businesses ensure compliance with adverse action notice requirements?", "43ab2e49-8484-42a0-b907-a326016b5fba": "What are the potential risks during the design stage of the AI lifecycle?", "71c80195-c590-4a6a-8455-cc14b7d76775": "How can risks during the deployment stage of AI be mitigated?", "bac1fcc0-33c2-4bd1-bcae-206282b1aebf": "What is meant by \"algorithmic monocultures\" in the context of AI risks?", "ded49efb-8ec5-43c7-90a3-414f486b61aa": "How do risks at the ecosystem level differ from those at the individual model level?", "aa5a5372-d095-4583-8dd2-04b30b8d377b": "What are some examples of risks that can arise during the operation of AI systems?", "d364a35e-d9b1-4892-aeee-562d6fe3fb16": "How can human behavior contribute to the risks associated with AI systems?", "8e058945-6592-48f2-a7c0-b29f59239765": "What are the implications of repeated use of the same AI model on labor markets?", "0586fc63-c513-4afc-a447-0f010dfa2c07": "How can the decommissioning stage of AI systems pose risks?", "5723f355-fc6d-4188-a997-6fc9df87d7e0": "What are the potential impacts of AI on access to opportunity and the creative economies?", "b1450fe5-2a1b-49c3-8181-b0908b22ec94": "How can risks from GAI system outputs be identified and managed?", "d6dc8fa5-06dd-4cb6-b9e1-3d99509fd36f": "What are the standard risk controls that can be applied to proprietary GAI technologies?", "43f41ff8-54b8-405b-97e6-387ac8c232c9": "How can organizations ensure third-party transparency when using GAI systems?", "def9231f-04c4-40b3-a87d-4a5d13f4664e": "What is the role of software bills of materials (SBOMs) in managing GAI system risks?", "0abe2ec1-662d-4cd9-8f9a-28ce9be69bbc": "How do service level agreements (SLAs) contribute to risk management in GAI systems?", "36dbeaca-841b-42b4-a5fc-2f1795f02564": "What is the importance of statement on standards for attestation engagement (SSAE) reports in GAI systems?", "e49097b7-7ffa-4581-8515-1b30cfa7d7b0": "What are the challenges in risk mapping and pre-deployment measurement for GAI systems?", "f51fa6cc-e9f9-4425-b85d-44db74827ce4": "How can robust test, evaluation, validation, and verification (TEVV) processes be applied to GAI systems?", "b85af260-3cfa-4a9c-bc21-802cd691f871": "Why is it important to document TEVV processes in the early stages of the AI lifecycle?", "7444a8c8-575f-4345-84f3-0124561379dd": "How can acquisition and procurement due diligence help in managing risks associated with GAI technologies?", "84e38c59-60d0-4319-81ad-3888d9b12506": "What are the benefits of involving representative AI Actors in the TEVV processes for GAI systems?", "80255fc3-d877-468b-97e9-7be818ed20da": "What is the AI Risk Management Framework (AI RMF) 10 for Generative AI?", "9516ed5a-2087-413e-925b-26135b0f5d06": "How does the AI RMF 10 aim to improve the trustworthiness of AI products and services?", "6fab3d01-f156-429d-831e-4ea194ff08bf": "What is the purpose of President Biden\u2019s Executive Order 14110 on AI?", "91adbba6-9d02-4f12-8ca2-25bf46b3776b": "When was the AI RMF 10 released?", "bbdcadfc-3db1-46fa-85d2-e443a126d455": "Is the use of the AI RMF 10 mandatory for organizations?", "36c3329b-2a66-4361-b5f7-7ce8ade47cec": "How can organizations benefit from using the AI RMF 10?", "aae224f2-ef20-4cd2-8f53-3e748c2dac1e": "What are the main components of an AI RMF profile?", "150d6339-723a-443d-bc9f-4b1be5ba3fb1": "How does the AI RMF 10 help in managing AI risks?", "3779173f-0285-4630-b333-78420055bd18": "What considerations are taken into account in the AI RMF 10 for Generative AI?", "2a0ab6da-3f10-4297-bb6a-887732c4d6be": "How does the AI RMF 10 align with legal and regulatory requirements?", "66823427-2ffd-4c77-909f-5289272bdb4c": "What is algorithmic discrimination and why is it a concern?", "0ab73255-60d0-4d92-ac1c-3d0c7a6949a9": "How can automated systems amplify existing inequities?", "feeecc56-069d-45a3-88eb-3f4d74890fcb": "What are some examples of algorithmic discrimination in facial recognition technology?", "88d366bd-b700-4c98-a848-aedce1be01ee": "How do hiring algorithms contribute to discriminatory decisions?", "00fc2bf2-fbad-4580-bd4a-05669d18fc7c": "In what ways can healthcare algorithms be biased against Black Americans?", "2844838c-8c42-4c48-a44f-63715f685e1d": "What are the potential consequences of data that fails to account for systemic biases?", "36ef20ee-eb6e-4f4e-9d65-e279ee7920d6": "How prevalent is algorithmic discrimination across different industries?", "3302a720-0035-40c3-a9b6-017c0952d77d": "What protections are in place to address algorithmic discrimination?", "1a75a965-db4c-48cc-a1ac-7e4bcb22e7a6": "How can we ensure that automated systems do not perpetuate existing biases?", "0e9714b8-1ef7-4875-9d9f-858297cad936": "What steps can be taken to mitigate the risks of algorithmic discrimination?", "69b6d24d-4f77-4453-bd79-e8af8137c7e1": "What are healthcare navigators and how do they assist people in obtaining health coverage?", "d3ffb77a-2b83-4c99-9923-bd42f0684d90": "How did the Biden-Harris Administration support healthcare navigators in 2022?", "575075f0-9aa1-4135-87b7-2b392d2cda2b": "What kind of training do healthcare navigators receive to help consumers with health coverage?", "c53668a5-710d-4526-98b9-af2788d4b125": "How do healthcare navigators help small businesses and their employees with health coverage options?", "84bee82d-ff9b-4d9e-8a7a-b6cbcb4db569": "What are some real-life examples of laws and policies that protect rights and access to healthcare?", "d74ae40a-97e3-4c00-bef1-b09032380ffd": "How do practical technical approaches help in protecting opportunities and access to healthcare?", "7b48a310-6aed-4f43-a279-f37e09939bba": "What is the role of sociotechnical approaches in ensuring healthcare access?", "3db05dac-a97a-4b1e-9652-d76803a23c04": "How can increased funding for healthcare navigators impact uninsured consumers?", "984009d5-34ae-416b-b936-3f783743a025": "What are the benefits of having more trained and certified healthcare navigators?", "1fcf65a3-0d6b-4145-99cc-0bf2c3235ad1": "How do healthcare navigators assist with completing eligibility and enrollment forms for health coverage?", "66fd1727-6e57-49a1-ae65-9fdb471abd5a": "What is the role of the National Science Foundation (NSF) in AI research?", "4d10f264-ec1c-496e-8dbb-69f985ae5cf1": "How does the National AI Research Institutes contribute to the development of safe and trustworthy AI?", "4f34a0ae-db65-487e-8242-a6f5458fa9de": "What are the key principles of the AI Ethics Framework for the Intelligence Community?", "4c4c9829-d71f-4438-a202-eaef5533887c": "How does the Cyber Physical Systems program support AI research?", "90a56106-7089-4755-973a-da4912ed88d1": "What kind of research is funded by the Secure and Trustworthy Cyberspace program?", "0a37c463-1465-453d-bbbe-74e8a11fe10f": "How does the AI Ethics Framework help implement ethical principles in AI development?", "bc9e1c96-6a89-4d9b-b96c-71acb36031e0": "What are the main goals of the National AI Research Institutes?", "037f5eee-4dd0-4bed-9151-ef0be3018604": "How does the NSF ensure the safety and security of AI systems?", "06f79190-3996-4bfd-86e5-6065a4dc96ee": "What is the importance of explainable AI algorithms in the context of the Intelligence Community?", "3df54901-e507-45fe-bd82-154e2d31aae0": "How do privacy-enhancing technologies contribute to the security of automated systems?", "c4da0ee6-b97a-4cac-839c-667bdb97a9d0": "What are the key expectations for automated systems to ensure they remain safe and effective?", "37843ed7-8a1c-4d60-818a-72b161e25abf": "How important is ongoing monitoring for the performance of automated systems?", "0944ea5d-dbdd-4f95-829a-6caacde3bed0": "What procedures should be in place for recalibrating automated systems?", "12d5b6aa-803b-4df4-8a6d-4fce86fdf65e": "How can automated systems be continuously evaluated for performance metrics and harm assessments?", "643c631e-a495-4f76-ac44-fc0226061601": "What steps should be taken to update and retrain machine learning models in automated systems?", "6fbfb445-cd6f-4999-b0b3-48d65d58c0e6": "Why is it necessary to have fallback mechanisms in automated systems?", "9c0537b0-0ed3-4084-aefd-70d1001d512a": "How do changing real-world conditions affect the performance of automated systems?", "379808b9-e51a-42f1-b658-7f085dd496a5": "What are the best practices for post-deployment modification of automated systems?", "f9ec3773-dc0b-474f-8e19-10f71c543467": "How can unexpected conditions be managed in the deployment of automated systems?", "caf8112e-e9f3-4acd-81fb-c3b49486c424": "What role does continuous evaluation play in maintaining the effectiveness of automated systems?", "581a5da7-213e-4a7d-9f8d-d4133dd57842": "How does harmful bias in GAI models affect different racial and gender groups?", "9d4abb67-4cfb-4342-9757-6880500e5378": "What are the potential consequences of GAI systems performing poorly for non-English languages?", "6c77fa09-63b8-42b1-ab8c-8a0878fe99b4": "How can GAI models perpetuate or exacerbate existing societal biases?", "a46e5b14-4ca5-47eb-b330-0d94b8f9f326": "Why might GAI systems be inappropriately trusted to perform equally across all subgroups?", "7fabb17d-bead-4be0-a042-a82244952276": "What are representational harms in the context of GAI models?", "b6bddb5b-9792-4d2b-8ddc-6bd3455e5a52": "How can disparities in GAI model performance lead to discriminatory decision-making?", "93e73653-3778-45ec-8b88-83ef0cee390f": "What are the risks of using GAI systems for lower-resource languages?", "0ce84b04-c919-4d13-a642-1c811d94eb54": "How can training data contribute to harmful bias in GAI models?", "8b538c35-c3ca-4876-93d6-3a9de1887248": "What steps can be taken to mitigate bias in GAI systems?", "07203f30-4f7f-4aca-8eca-7c309b4986f2": "How does the underperformance of GAI systems for certain subgroups compare to not using GAI systems at all?", "b3dc9b16-8ecb-4c62-ab68-b96bed0dfc8d": "What are the challenges faced by people without smartphones in accessing unemployment benefits?", "02ced4ba-2675-4cba-9819-e8e00d3cd558": "How does the digital divide impact the ability to claim unemployment benefits?", "98bb75ed-7a8c-4dce-86df-07d671be2a37": "What are the implications of the UIA lawsuit on the perception of unemployment in the state?", "da95bf2a-175f-49d9-b8a6-280808d008cc": "How does the state criminalize the unemployed according to the Detroit Metro-Times article?", "f6187e72-cd60-47bf-8a65-87c6ff917a35": "Why are doctors turning away patients with unbearable pain, as discussed in the Wired article?", "14dd793d-1318-45e0-bf65-ae5c770cde2a": "What role do algorithms play in the treatment of chronic pain and opioid addiction?", "d3a655b7-63e1-43d6-a477-2241c65e6d0a": "How does Amazon's use of bots for firing employees affect workers?", "777e4f21-26bb-4162-81e1-25f943df9293": "What are the consequences of being fired by a bot at Amazon?", "d4ec38ed-55a8-425e-af0d-8589faeb46ba": "How do machine managers at Amazon impact the worker experience?", "2a40a8e1-c0b7-4eac-a84a-5d63fbdb6c9a": "Where can I find the definitions of \u2018equity\u2019 and \u2018underserved communities\u2019?", "c54055bd-b859-4846-856f-3208fb311b0f": "What are the specific criteria for deactivating a GAI system according to MG-24-002?", "6f0ae6ac-c093-4863-8cf2-4ada7f9d42e7": "How should organizations escalate GAI system incidents to the risk management authority?", "b4852e25-ec60-482f-a671-b99fc5379d18": "What procedures should be established for the remediation of issues triggering incident response processes in GAI systems?", "0d20124d-250f-42b7-a215-c52597d6ff03": "How often should the criteria for deactivating GAI systems be reviewed according to MG-24-004?", "477cd099-57f2-473f-9e76-397ae38e415a": "What are the key tasks involved in AI Deployment, Governance and Oversight, Operation and Monitoring?", "dcf15838-d29f-4e4a-843a-165bf83a1bfe": "How can organizations ensure that AI risks and benefits from third-party resources are regularly monitored?", "9d4de1d9-a66b-4c82-935a-f7e97333ed5c": "What are the risk controls that should be applied and documented for third-party AI resources?", "070ee859-7379-4692-a8af-44336782308e": "What timelines should be provided to stakeholders for the remediation plan of GAI system issues?", "639643b4-60e7-474d-aee6-3ee349564306": "What is the role of the organizational risk management authority in managing GAI system incidents?", "acb51a42-9248-4132-bcd5-aa4101f783dd": "How should organizations document the risk controls applied to third-party AI resources?", "507836ec-d203-47f8-a680-09f83fa2a01f": "What is the Responsible AI Resource Center (AIRC)?", "0712927b-b14b-48bd-8873-1773a4748151": "What is the purpose of The Language of Trustworthy AI: An In-Depth Glossary of Terms?", "5a1bdd93-7277-481d-9646-c6981146f8d6": "How were public comments and consultations used in the creation of the document?", "4ce4e9f4-b9b5-4632-a08b-c5e41c2f4b8a": "What does risk refer to in the context of the AI RMF?", "d4e55ebe-4671-4b95-9b07-c9e5e246be26": "How can AI risks differ from traditional software risks?", "8b4d8ad8-933b-4290-9fb2-14ecdde49db9": "What are some examples of risks that are likely to materialize in a given context?", "04d3166e-c3f8-442a-a5a4-36726190ef49": "What are some examples of risks that are more speculative and uncertain?", "da1e7ff4-4699-42f3-b32e-97305e4b357a": "How can GAI exacerbate existing AI risks?", "81f01fdb-f899-4475-8c04-9e7fe197e698": "What are some unique risks created by GAI?", "88a166d0-e31c-43cf-8f58-baefce9d284f": "How can the magnitude or degree of the consequences of an event be measured in AI risk assessment?", "9ed882bf-45cd-4218-a475-bb18625ade4c": "What are the main ways hiring algorithms can introduce bias according to Miranda Bogen's article in the Harvard Business Review?", "56adf753-40ac-4593-bb8a-832feec13d31": "How is the TSA making flying easier for transgender people, as discussed by Arli Christian in the ACLU article?", "19cd8cd2-a1f6-479e-8713-95e8683eee7f": "What specific measures has the TSA implemented to accommodate transgender, non-binary, and gender nonconforming passengers?", "3e165023-0718-43ed-99b9-10cbcee9f4e1": "What are the concerns raised by the National Disabled Law Students Association regarding the online administration of bar exams?", "dfad7b64-3277-4f06-af37-d85af7857c4e": "How does automated test proctoring software discriminate against disabled students, according to Lydia X Z Brown?", "a41d0161-4967-4046-9c48-e87cea7d5bf8": "What are some examples of bias in hiring algorithms mentioned in the Harvard Business Review article by Miranda Bogen?", "bf872985-a9f6-4c68-ac13-c3442a951f5a": "Can you provide a summary of the four ways the TSA is improving the flying experience for transgender individuals?", "f53a3ba9-3917-4f06-a722-e4d54d8119c2": "What resources are available on the TSA website for transgender passengers?", "6fa39a37-eeaf-4520-b9bd-162fce45ab8c": "What are the key points in the NDLSA's report on online bar exam administration concerns?", "f4bf60dd-02c3-4248-bed1-4831c9772821": "How does the Center for Democracy and Technology describe the impact of automated test proctoring on disabled students?", "63eb9983-de78-4519-b2c8-f22b454ee053": "What are the legal and regulatory requirements for AI development and deployment?", "267043c7-c798-4c11-b2d1-9c33ddd4fb4e": "How can AI development be aligned with data privacy laws?", "d30f4bc2-2f5f-41a6-b0ca-38e7382ef444": "What are the implications of copyright and intellectual property laws on AI?", "fc798bdd-e26e-4ed1-8142-e17cd09368df": "How can organizations manage and document legal requirements for AI?", "1fc3dca0-6426-432e-bf6d-db436aa4dfd0": "What are the risks associated with data privacy in AI systems?", "3eb7529c-01dd-46f1-ba24-c428f4b98cbf": "How can harmful bias in AI be mitigated according to regulatory requirements?", "10147097-3b5f-4390-8fdb-61a55029b730": "What is the role of governance and oversight in AI development?", "b73e2ec4-11cf-4e83-9ef8-b0612a7df8b7": "Who are considered AI Actors according to the OECD?", "3f2795a8-623f-4759-be58-bac1f301777a": "What tasks are involved in the governance and oversight of AI systems?", "1dacc26f-f637-4f67-b91a-d11102d1bb85": "How can organizations ensure compliance with intellectual property laws in AI development?", "b5373d9d-ce4e-40bc-bcbc-4f675f59d1a8": "What are the disproportionate risks that AI poses to women according to the Brookings article by D West?", "029ed6d6-5b8c-45f2-8690-d47dc48f8232": "How do large language models (LLMs) perform in citing relevant medical references as discussed by Wu et al?", "3dd37534-24da-494e-b97b-5e3890f7f13e": "What evaluation framework did Wu et al propose for assessing the citation accuracy of LLMs in medical contexts?", "a89ead0f-a8bc-4db8-adae-b72aa1d2c3c5": "What evidence of racial bias was found in OpenAI\u2019s GPT when used as a recruitment tool, according to Yin et al?", "3babea8f-5928-47f3-962e-9b283207ab3e": "How can jailbreak prompts affect the behavior of large language models, based on the research by Yu et al?", "0af422b3-b85e-4b51-94fa-71fc9ab1d8fa": "What are some examples of digitally-disadvantaged languages mentioned in the Policy Review by Zaugg et al?", "1c9b32d4-4ae7-4a96-a889-035f67276bb2": "What are the potential implications of AI-induced racial bias in hiring practices as highlighted by Yin et al?", "9b7725c9-7b92-4fdd-a10f-8ec1530b228d": "How does the article by D West suggest mitigating the risks AI poses to women?", "649f7414-376a-4e58-8d1e-23d5e7ed9d6a": "What methods did Yu et al use to explore and understand jailbreak prompts in large language models?", "e78a0693-7005-4bea-89f2-c3ea112bce08": "What policy recommendations are made by Zaugg et al to support digitally-disadvantaged languages?", "9db21a04-c438-4476-ac0a-82338eae49b2": "What are participatory engagement methods and how are they used in AI development?", "97081408-da30-4645-9afe-f3307ed6cdc7": "How can field testing improve the usability of AI-generated information?", "289db7b2-4d79-467d-a5d9-9d5b231a0264": "What is AI red-teaming and why is it important for identifying system vulnerabilities?", "2d57ed17-a6f7-46f9-81ca-95cb462059cb": "How does feedback from civil society groups influence AI system design and implementation?", "4922c3b7-23a1-4ecb-9156-b04943f93fab": "What are the benefits of using focus groups in participatory engagement methods?", "f2e88045-e05b-41dc-8cec-46bbf532cb91": "How do structured, randomized experiments contribute to understanding AI interactions?", "8e4237f7-4dab-4b3e-872c-636b750a8551": "In what ways can AI red-teaming help in preventing discriminatory outputs?", "6a0cc42b-ef86-4bfe-8697-1dafa8d40ca3": "How can surveys be effectively used to gather feedback from affected communities regarding AI systems?", "a27db742-1147-4e73-b8f8-fb28f863182a": "What role does public feedback play in the maintenance or decommissioning of AI systems?", "1da6cbec-681b-41b5-8fdd-7e46b4b39a69": "How can insights from field testing be used to improve data quality and preprocessing in AI systems?", "dc7b408c-d2a4-440a-8bd3-382b37949adc": "How can I find out if my personal information is stored in a federal system of records?", "12c4f43b-f79c-40bf-89c5-5de9f0101a4f": "What steps do I need to take to contest the contents of a federal record about me?", "928f7047-c52c-49a0-aad2-2d36efcef6dc": "Are there any exemptions under the Privacy Act that would prevent me from accessing my records?", "8ee048c2-ac9d-46c0-8558-0d5e5e8a8743": "What legal actions can I take if a federal agency does not comply with the Privacy Act?", "7785c38a-a968-421a-b0d8-c15fe4003fa3": "How can I request a federal agency to amend or correct my personal information in their records?", "5af5e28f-c346-483e-8b52-dabdb08f98cf": "What kind of monetary damages can I seek if an inaccurate federal record affects my opportunities or benefits?", "6eec9174-86c8-46fc-84dc-0a9dadffd18c": "What are the procedures for accessing my individual information stored in a federal system of records?", "d9e3443e-a2be-4b7f-9ead-f1b68a8decec": "How does the Privacy Act protect my personal information stored by federal agencies?", "ed5ff628-1277-45a4-a100-1c1d064b76a6": "What qualifies as an adverse determination under the Privacy Act?", "29499a6f-b07f-4cba-b4bf-90e40d038c36": "Can I seek legal relief if a federal agency maintains an incomplete or untimely record about me?", "dd201fcb-d5b4-47aa-9fa7-584fdc9a91d0": "How do GAI systems impact the preservation of endangered languages?", "f95cd300-7368-4898-875c-8a16e2699308": "What challenges do GAI systems present to model adoption and inclusion?", "08be409c-62b9-4046-a4e5-41f7aef94b1e": "How can the use of GAI systems lead to undesired homogenization in outputs?", "165031b8-01ee-4b7e-a254-95b7729cbac2": "In what ways might GAI systems make it more difficult to use endangered languages in everyday processes?", "77d2f46d-9e77-41fd-8367-3d123120a90e": "What are the potential biases associated with GAI systems?", "41496cd8-8df9-4ece-9b75-799e48764e3d": "How can repetitive aesthetic styles in GAI outputs affect cultural diversity?", "8e9ba476-d233-4ed8-8772-0fbd52872bb3": "What steps can be taken to ensure GAI systems do not contribute to the loss of endangered languages?", "10c840b0-30ab-46e2-9109-d9b669bd57ac": "How does bias in GAI systems reinforce the problem of homogenization?", "6e44c18a-5777-4822-9e66-8bc1a5a2f194": "What are the implications of overly uniform outputs produced by GAI systems?", "85eab450-b260-459c-acfc-4403ea487fee": "How can we address the challenges of inclusion and accessibility in GAI systems?", "93694198-599c-4796-8cf1-d528b4f3fb10": "How can organizations ensure that automated systems are kept up-to-date?", "2c321c09-c163-4b0c-81f8-f536580d0caa": "What are the best practices for notifying people about significant changes in automated systems?", "65b87922-c4d3-45bc-b744-c2b89f7b4788": "Why is it important to understand how an outcome was determined by an automated system?", "cffd7b3d-0081-4a33-80e6-28b3fbf26823": "How can automated systems provide explanations that are technically valid and meaningful?", "20e03c70-f24d-4dba-9cb5-042b4cba722d": "What factors should be considered when calibrating explanations based on the level of risk?", "760cfb81-f3c9-4194-a7d3-602f41fab3e8": "How can summary information about automated systems be effectively communicated in plain language?", "f709685b-47a6-44a7-8f9e-3aa0df73870e": "What methods can be used to assess the clarity and quality of notices and explanations from automated systems?", "16141df8-9867-4de3-bcb0-a2b9fac32680": "Why should assessments of automated systems' notices and explanations be made public?", "a44f2530-9479-450c-9cfd-272c1f2071d6": "How can operators and others who need to understand automated systems be supported?", "38270bdb-0dd7-458c-b93a-daebec80be45": "What are the challenges in ensuring that automated systems provide useful explanations to users?", "c699565e-04ef-419e-94cb-1b796a737007": "What is the AI Bill of Rights and how does it relate to Executive Order 13985?", "547dfa62-9140-4639-82df-a9142ce5286b": "How do the Fair Information Practice Principles (FIPPs) influence data privacy laws globally?", "9af0586d-74db-446b-8e26-38be4dd9afc6": "What are the core principles of the Fair Information Practice Principles (FIPPs)?", "eccf5725-f424-4268-b727-1f1a40978434": "How does the Blueprint for an AI Bill of Rights incorporate elements of the FIPPs?", "abeee4d7-e9fd-4575-a169-f5d27f7e36bb": "What is the significance of the 1973 report by the advisory committee to the US Department of Health, Education, and Welfare?", "66addcfc-c316-415e-b410-381db4715a69": "How does the AI Bill of Rights support racial equity and underserved communities?", "a172a3a2-d810-4b13-909a-903ececb2cd1": "What are the key elements of the Fair Information Practice Principles (FIPPs) relevant to automated systems?", "a5209e31-d0db-40ff-9bb4-1750d45a7717": "How do the principles of the AI Bill of Rights align with civil rights and civil liberties?", "8ed364a4-97b6-4794-a155-78239f56aa6b": "What role does the Federal Government play in advancing racial equity through the AI Bill of Rights?", "cf81588a-da58-41b7-8f19-e0ed9fa5cd06": "How are the Fair Information Practice Principles (FIPPs) applied in different domains like privacy and civil liberties?", "20cc838f-302b-4a97-b9c0-048b821e21d7": "What is an algorithmic impact assessment?", "11441d7e-f3c5-47eb-9661-dc9501b61db8": "Who is responsible for performing an algorithmic impact assessment?", "96391b37-e002-4444-bc88-af07e80d1be7": "How are corrective actions taken in response to an algorithmic impact assessment?", "f173dfc5-537c-4934-943f-6a4a8209556a": "What should be included in an algorithmic impact assessment?", "adb72817-95df-4fe7-be09-43be351339a0": "Why is it important to make algorithmic impact assessments public?", "98945bdc-d7f5-48af-828b-72fdff9b0735": "How can algorithmic impact assessments help in addressing algorithmic discrimination?", "ce1d92ca-dd4d-4c46-a861-5f738828da2a": "What is the role of disparity testing in an algorithmic impact assessment?", "6739f71c-fbe1-4126-86a2-75deefb42217": "How should the results of an algorithmic impact assessment be reported?", "b3952c44-c80c-4aa5-b364-771c28f8d963": "What are design stage equity assessments in the context of algorithmic impact assessments?", "ab8785d8-fdea-42dd-9207-1331f4d206dd": "Why is it important for algorithmic impact assessments to be machine-readable and in plain language?", "ad91ce86-6dec-4abb-97c6-044f2e6af46d": "What are some examples of issues caused by the lack of human alternatives in automated systems?", "2a47278f-8456-435f-a92f-ad1569f822a9": "How did the unemployment benefits system in Colorado fail applicants without smartphones?", "4baeb7cc-0f63-4432-a61e-2cca08742d2c": "What problems can arise from not having a human review option in fraud detection systems?", "7229c62f-af34-49e6-a1b2-244381296eaa": "Why is it important to have human alternatives in systems that distribute unemployment insurance?", "a551e275-7aaf-4936-8b4a-335333fd8c65": "How can automated systems incorrectly flag entries as fraudulent?", "3185d577-2897-470b-b66d-2b878c1635f3": "What are the consequences of not providing a human fallback in automated systems?", "6b956523-9518-4fff-b4c4-3cb46fe34560": "How did the hospital software error affect a patient's access to pain medication?", "f829362b-3144-4e70-9964-54ff3cf5f3c8": "What are the risks of relying solely on automated systems for critical services?", "04775f45-fc8c-47ae-a74f-788e790dbb7f": "How can the lack of human intervention in automated systems impact people's lives?", "ff3d43c5-1076-4f90-9595-d7507024df5f": "What measures can be taken to ensure that automated systems have adequate human alternatives?", "7a87b77e-ba2b-4a75-85d3-eff3e7a4f1b3": "What are the potential harms of inaccurate inferences made by AI models?", "2af440c3-e3bc-417b-9229-b989c418f726": "How can wrong inferences of PII lead to secondary harmful impacts?", "f9ad7469-3e3c-4909-8679-251103726804": "What is the impact of predictive inferences based on PII or protected attributes?", "94623f99-aa39-458d-bf0c-452bdeae335c": "How can AI models contribute to representational or allocative harms?", "a18f484d-0c10-474e-b56a-fc1b92977406": "What are some examples of dignitary harm caused by information exposure?", "caabe18b-3881-404c-bba9-113c918db0f3": "How can extortion result from the exposure of sensitive information?", "6cb41015-478f-4102-8f71-ca101130bef4": "What are the risks associated with confabulations in AI inferences?", "b45acd2b-d605-4e83-8246-a99cd8d2ffff": "How can adverse decisions arise from predictive inferences made by AI models?", "4f89b591-6f2b-4813-bd57-b6ee9e7cd684": "What is the relationship between harmful bias and homogenization in AI models?", "259e3515-ab8c-4f3d-8cbe-178ee51a7ff6": "How can inappropriate inferences of PII disadvantage individuals or groups?", "f7f7c66a-12a1-4a76-88e9-6719de9f8272": "What are the main problems that the principle seeks to address regarding automated systems?", "0404c1f3-7897-4222-838e-d468ed48b862": "How do automated systems impact employment opportunities?", "da09c33c-a2d1-4eba-b8bb-9cadf83a607a": "In what ways do automated systems shape experiences in the courtroom?", "84e11e40-3503-4afa-9c0b-3bd17751dc5f": "Why is the impact of automated systems often not visible to the public?", "fb6baf2b-0c39-43c6-8a23-5a6286b57d3c": "How can an applicant determine if a hiring algorithm affected their job application?", "5ae8a14f-7af5-4fe1-ad8d-daa3b64b7412": "What are the potential consequences of a judge using an automated system to make bail decisions?", "b86dc33e-c44f-40f8-9201-6e77c52b2d3e": "How can individuals correct errors made by automated systems?", "44365a55-f06d-44c0-86f8-5e766884f298": "What steps can people take to contest decisions made by automated systems?", "cb8c33b7-5edf-4f02-827c-e51d99fe8c20": "Why is it important for people to have knowledge about the impact of automated systems on their lives?", "57d78d01-d80c-4a0d-bffe-90e8c5435146": "What are some illustrative examples of the issues caused by automated systems in various sectors?", "a7190971-83cc-4e5b-ad51-d097c2d2416f": "What is automation bias and how does it affect human interaction with GAI systems?", "31534a0f-52cf-452d-9a29-f8ac561198f7": "How can human experts overcome their aversion to GAI systems?", "2c413bc7-6f0c-41fa-ac50-d2f146083ecb": "What are the potential psychological impacts of emotional entanglement with GAI systems?", "f493ae8c-e533-406a-8fc5-5c6bdaad070b": "How can over-reliance on GAI systems be prevented?", "589fdab2-8712-4a25-9c59-8f26e281a6dc": "What are the risks associated with confabulation in GAI systems?", "3f801419-15e7-46cf-8d7a-d2709b6b5792": "How does automation bias contribute to the homogenization of information?", "2acf1a52-6ed2-4c87-9d62-518df86733d3": "What strategies can be implemented to mitigate the risks of bias in GAI systems?", "68b6149d-8a90-4b40-b32e-6e844ff70543": "How can humans balance their expertise with the use of GAI systems?", "bfe3017a-f8b1-49b4-8022-1d6c6b18b595": "What are the benefits of using GAI systems despite the potential risks?", "66d721dd-e056-41a0-9177-35ec1a02f2a3": "How can emotional entanglement with GAI systems be managed to avoid negative impacts?", "7994cad7-21e2-4e09-aca5-f56ef1ae8a5a": "What are the ethical considerations for developing automated systems that could potentially violate safety?", "47e3ea93-fd88-4971-8203-e36fc3545d64": "How can unintended safety violations in automated systems be identified and mitigated?", "de90f1a6-2df7-4240-944b-87dad8ddb4c3": "What steps should be taken if an automated system is found to have safety violations after its launch?", "b0f19328-390f-4526-9bd5-58e3217d2939": "Are there any guidelines for rolling back or modifying automated systems with safety risks?", "3a98c1d4-e98a-4339-a8d8-666a62f395d1": "How can developers ensure that automated systems do not have unintended harmful consequences?", "3e20c7ee-70ab-4ad1-b14f-32dfe44d0c85": "What are the best practices for ongoing risk mitigation in automated systems?", "a13b0ccd-e3a9-4ab3-afc9-d9c863fbeb66": "How should companies handle automated systems that have been found to violate safety standards?", "d904e7ea-ad3b-4610-ab7e-0e02f95f7b78": "What are the potential risks of using automated systems without proper safety checks?", "2fbedd50-8ea8-4da8-a425-802da3096391": "How can the safety of automated systems be continuously monitored after deployment?", "1ec42e53-7159-49a1-9293-810d7a184a7f": "What measures can be taken to prevent the development of automated systems intended to violate safety?", "a0842cf3-00a4-4746-87d9-6ac45ada3ebd": "What are built-in protections for data privacy?", "b76e72b6-358d-4943-98de-cad3c4c392ea": "How can I ensure that my data is only used in ways I approve?", "4f6747b8-9ad2-45ed-8979-f605d5765654": "What are some examples of abusive data practices?", "8effaebc-2db1-4d37-852d-5b080b55344a": "How do design choices impact data privacy?", "ba664a46-2556-466b-94a9-e6c9881f6bc5": "What does \"privacy by design\" mean in the context of data collection?", "412a6863-0745-4d66-8b48-137fc5c03a6e": "How can I verify that a system respects my data privacy decisions?", "a96cda8b-8d4a-4085-b297-fbf4fc69d244": "What are alternative privacy safeguards if consent is not possible?", "c4c103d2-5129-4be7-bf42-4971da92e49a": "Why is it important for data collection to conform to reasonable expectations?", "4a2850be-c31b-461e-9165-f82e606b10ff": "How can I identify if a system is using privacy-invasive defaults?", "398d3a34-f7cf-44c4-81ea-d314e9f92196": "What should I do if I feel my data privacy has been violated?", "9b1e7e8a-3e78-4c98-a0df-24b37ca791cf": "What techniques can be used to mitigate biases in GAI content and data?", "0a69cf57-ac9d-423f-896d-b9e4e02f762a": "How does re-sampling help in reducing representational biases in generated content?", "164c840b-8cfa-4c20-a441-123e95824ffb": "What is adversarial training, and how does it mitigate biases in GAI content?", "e20bd84c-d838-4049-a2df-fba4b9d06ea1": "Why is it important to evaluate GAI content for representational biases?", "387e47d4-c086-4a07-aee6-28a9fa60cad7": "What are the potential risks of not addressing harmful biases in GAI output?", "3700f1e4-cf35-4c27-b2c3-9101bf5d8dfd": "How can re-ranking be employed to address biases in generated content?", "8c76e76d-83c9-4a57-a7df-b1c0e035ef8b": "What steps should be taken to analyze GAI output for harmful content?", "7d2386c3-a371-45d9-965f-2c7a9c11567e": "What types of harmful content should be checked for in GAI output?", "badd4da0-e5f0-426d-ab2a-1019b483cc97": "How can due diligence help in identifying potential misinformation in GAI content?", "4da36b86-9edb-4192-abf8-385aa94c36e8": "What are CBRN-related or NCII content, and why is it important to monitor them in GAI output?", "f377e766-01de-4a90-9986-ecb3ee8d61a9": "What are the best practices for designing consent requests that are easy to understand?", "4490f651-668a-4e60-9931-3c6e393d09b8": "How can user experience research improve the readability of consent requests?", "68e419bb-fa35-4890-98c7-4c8a11d9c78e": "What methods can be used to ensure consent requests are accessible to users with disabilities?", "838be5c8-7e39-41a0-8cc2-5d54526523b5": "Why is it important to avoid \"dark patterns\" in user experience design for consent requests?", "8540243f-924a-413c-bdad-29d5f1240c58": "How can consent requests be tailored to different languages and reading levels?", "c05097c0-b8be-4d9f-94ad-f28a74c0bbf6": "What are \"dark patterns\" in user experience design, and why should they be avoided?", "ed30f33c-f72f-4ca9-a986-e78ea5a84f34": "How can we ensure that users understand the contexts, time span, and entities involved in their data consent?", "078872ce-c620-44bc-b9d9-715ca524c82e": "What are some effective ways to test the comprehension of consent requests among users?", "c9c8531a-c33d-47fb-9c20-0a74872c9cd3": "How can user experience design improve the transparency of data and metadata consent requests?", "bd0d1036-d121-46a7-a63d-633b1fe52539": "What role does user experience research play in making consent requests more accessible and understandable?", "64ded336-d26a-4bba-9264-29a782f46b17": "What are the potential risks associated with the misuse of AI systems by humans?", "a1524a0a-e5c7-4b05-9291-a9f012c2f8a7": "How can the distribution of harmful deepfake images impact physical safety?", "69cb94c7-5d81-4222-9d53-052f0edd2720": "What are the long-term effects of disinformation on societal trust in public institutions?", "10493f76-3a15-41b8-a590-28e28ca5d22f": "How do the characteristics of a GAI model influence the presence of risks?", "0aa175b1-cf6c-4db9-95ae-790c02ed0420": "What factors should organizations consider when measuring GAI risks?", "e8a86a82-58ef-4852-8a9f-46998fe000e6": "How can the architecture of a GAI system affect its risk profile?", "e0c9a867-2ae3-4d5e-b74c-b2145e48b693": "What role do training mechanisms and libraries play in the safety of GAI systems?", "f497ff9f-1f6b-49ff-92b7-b97bbf4e0d33": "How does the availability of model weights impact the security of GAI systems?", "82c14235-06d7-4a04-8d16-eefd798350a9": "What are the implications of using different data types for training GAI models?", "6e3592cd-fc1e-495d-9976-3cea02adf35a": "How can organizations tailor their risk measurement strategies for GAI systems?", "bb8fd71c-8305-452b-bcbf-823fa999f419": "What are the Department of Defense (DOD) AI Ethical Principles?", "84ddfcae-5a84-4ca2-b23f-9f2993a78864": "How does the Intelligence Community (IC) AI Ethics Principles and Framework guide AI use in national security?", "6afc1f62-ea09-46aa-a9b2-a2231efa41db": "What is the Blueprint for an AI Bill of Rights?", "50dbd6b6-2f67-425a-8549-e938e1e443c4": "How can the Blueprint for an AI Bill of Rights inform national security and defense activities?", "33c03e38-7064-41dd-8c15-2570eff220f3": "What are the special requirements for AI in national security and defense activities?", "c062971a-0cf8-4ef1-93b3-5fca436d15d4": "How does the federal government ensure the use of trustworthy AI in law enforcement?", "3664af6f-9a6b-4750-9157-5b8c10c049bf": "What safeguards are in place for AI systems handling classified information?", "57f12af2-eace-4916-bf6c-caa521375c02": "How does the Responsible AI Implementation Pathway impact defense activities?", "3421dbb5-bc3b-4a71-9aba-9794e6c02c61": "What role does the Blueprint for an AI Bill of Rights play in the implementation of AI policies?", "1ea5b81c-aa5d-4fad-851a-f32df353f699": "How are existing policies governing automated systems applied to national security activities?", "bb9c137b-e957-4d1b-b681-1150450070b8": "What are the potential harms of using AI to generate non-consensual intimate imagery (NCII)?", "b0543347-b171-470d-ad58-452023d32540": "How can AI-generated content be managed to prevent the creation of child sexual abuse material (CSAM)?", "420598ea-3a86-4086-a5a1-8ca8b6a218a1": "What are the characteristics of trustworthy AI in terms of accountability and transparency?", "b2067f9b-b092-45f0-9fef-ed8c776f2ec2": "How does AI-generated obscene content impact privacy and psychological well-being?", "9d318383-e177-487e-b4cc-8d2e8e8fcc13": "What measures can be taken to ensure AI-generated content is fair and free from harmful bias?", "78e29c0f-c807-43a5-87c1-f4c5adab2595": "How can the spread of AI-generated deepfakes be controlled to protect individuals' likeness and voice?", "1d9240a9-0d09-4243-869a-45ed6bd3e9db": "What are the legal implications of generating explicit or obscene AI content?", "0dd67cb2-1fd7-4f2a-8672-8384a5b08a73": "How can AI be used responsibly to avoid creating degrading or abusive content?", "558abc03-0ada-41ab-b040-b6a193e5b238": "What are the downstream negative consequences of AI-generated CSAM on law enforcement efforts?", "a03bd0ad-5366-46e0-b712-e2748c71adf5": "How can privacy be enhanced in the development and deployment of AI technologies?", "7d190ccc-08eb-4795-975a-b70b5b5615ec": "How does surveillance impact student expression in educational settings?", "36548052-71c8-4515-ac0b-ad22a66ff05f": "What are the potential negative effects of surveillance on tenants?", "9ac8b717-1ae9-40dd-a3f6-5d158e342089": "In what ways does surveillance blur the boundary between work and personal life for workers?", "6b5a119e-29e8-4818-8d39-1e9518975f3e": "How can surveillance exert damaging control over workers' lives?", "5a0058ad-c9e9-4177-8811-35405c083ed1": "Why is the misapplication of data from criminal justice settings problematic for housing access?", "a1134775-18f2-4e09-af57-88092617afdc": "How does surveillance technology shift the burden of oversight from employers to workers?", "1dc6b0e5-60ff-4939-b09e-5d09b8373385": "What are the concerns regarding the use of surveillance in schools?", "b0b4a730-7836-419d-9d06-8a4e167aa24e": "How does surveillance technology affect equality of opportunity for tenants?", "3e475a7b-f183-48f1-9831-ecd371bfa399": "What are the chilling effects of surveillance on student behavior?", "91cdf914-e675-47bf-93d5-c257b52a9a51": "How should the assessment of surveillance technologies be conducted to ensure fairness?", "2e1a0ef5-cf28-488d-8c12-76d6037e5541": "What is the process for a human review of criminal investigative matters?", "ea3e3a56-e8b5-4d12-9369-363db39673a1": "What are the statutory requirements for judicial review in criminal cases?", "dfaba32e-19b0-4171-9fff-cc08fe20d751": "How do civil rights laws protect against discrimination?", "399b8802-887c-4184-b0f2-1a2bb5d07543": "What role does judicial review play in protecting civil rights?", "9dd9b476-9945-476b-9194-d8c83c4f9121": "What are the key elements of civil rights laws in the United States?", "3ed61f5b-fb54-4c4c-b6e5-0883b23b939d": "How does the judicial system ensure compliance with civil rights laws?", "dc4b76d1-b12c-4871-9175-8b56b5ad43ec": "What are the common types of discrimination covered under civil rights laws?", "a2334897-6fd9-497a-98b9-6686d453c2ad": "How can individuals seek judicial review if they believe their civil rights have been violated?", "0c5ef9a5-ec71-4016-8ae6-d82d2f95a1c5": "What is the importance of human review in criminal investigations?", "7a2b2ba9-a488-4c90-b169-70168efcc780": "How do statutory requirements influence the judicial review process?", "3fd5f3df-b1eb-4d8b-ab8d-07dddcc7fb48": "What are some effective strategies for testing AI in crisis situations?", "3be1b1c1-3098-49a5-b204-34bfa97d73b3": "How can we ensure information integrity in AI systems?", "7a009e96-556c-4842-92a2-a7b32f8146b7": "What methods can be used to identify and mitigate harmful bias in AI?", "25fac591-48bb-4ed2-bded-de52900f6c77": "How do we address the issue of homogenization in AI content?", "ec3564d2-3ce8-4426-b4d7-b66f34d730e8": "What are the best practices for handling dangerous, violent, or hateful content in AI?", "8e35ecf3-062b-4a03-810e-8eae31b35e67": "What roles do domain experts play in AI design and development?", "a0f073dc-3521-48db-9264-58f3bd671c3a": "How can end-users be effectively involved in the AI development process?", "29b2f6cc-206a-4bd6-98c7-e2c851e3a8b2": "What human factors should be considered during AI operation and monitoring?", "a86dc0bf-80f8-4a7b-969d-cf58b6502c4d": "How can AI systems be configured to handle ethically sensitive contexts?", "481d542a-46dc-4b5e-8bbc-a6e1094820a1": "What are the key tasks for AI actors in ensuring safe and ethical AI deployment?", "4a6050b2-f6b6-4ac9-939c-2e1188e92e89": "What are the key principles for designing ethical automated systems?", "72d17156-9c97-4463-a90b-1619a6608c3d": "How can automated systems impact public rights and opportunities?", "85b5625a-d5ea-45b5-b7b3-e66d4556da73": "What steps should be taken to ensure automated systems are fair and unbiased?", "8b7eeeff-ae14-458b-978e-5c1fc9573219": "How do you integrate ethical principles into the technological design process?", "7b875bc5-cd9a-4069-bcfa-6de556eb6548": "What are some examples of automated systems affecting access to critical needs?", "ee228074-989d-4a9b-bb46-df0a26dfda27": "How can designers ensure that automated systems do not infringe on public rights?", "89b66459-d623-4a30-a195-8da50e72a096": "What role do ethical guidelines play in the development of automated technologies?", "9a2bb4f3-2d1a-4689-9dfd-70c1a4fcfade": "How can we measure the impact of automated systems on public opportunities?", "ea968134-16f4-4d5c-810d-6049e27309d5": "What are the challenges in implementing ethical principles in technology design?", "a62f4790-c903-4ecf-bd87-b92068d5138a": "How can automated systems be designed to enhance public access to critical needs?", "9e50e870-f706-492e-b58e-93871354422c": "What are aggression detectors and how are they being used in schools?", "56433327-6403-42b5-bf0e-0d4ac4b47722": "How effective are aggression detectors in monitoring student behavior?", "bfb3f439-fb7d-4a61-b36f-ce7e06ea95fa": "What are the privacy concerns associated with using aggression detectors in schools?", "2e31e7e4-1966-455f-9c93-2f0416756f97": "How did cheating-detection companies profit during the pandemic?", "552e3fce-d322-41d0-a8d1-3af4d9b6d239": "What are the main arguments students have against cheating-detection companies?", "9f9d57e8-6574-422f-a98d-9523410c912d": "How does virtual testing disadvantage disabled students?", "6f686792-3271-44f2-962a-d0c914f0a618": "What are some examples of ableism and disability discrimination in new surveillance technologies?", "608ada78-40e7-49ac-a75a-e38f46e3d296": "How are new surveillance technologies impacting education for disabled students?", "44ee99fa-4fa1-4199-a49e-31e3f890d5de": "What measures can be taken to ensure virtual testing is fair for disabled students?", "b9dd8dbc-8dcc-4aa5-ae17-13bc77130913": "What are the ethical implications of using invasive surveillance technology in schools?", "f49391ff-b450-458f-8e1d-f4b2b768a48b": "What role did the American Civil Liberties Union (ACLU) play in the development of the Blueprint for an AI Bill of Rights?", "3c010abe-44a4-4a61-9b06-c9c60c98d68b": "How did the Aspen Commission on Information Disorder contribute to the discussions on AI oversight possibilities?", "815c1373-abe0-4296-ac84-38501b0c77cd": "What insights did the Australian Human Rights Commission provide regarding the potential harms of AI technologies?", "9eec3afd-f85a-4f17-a4d9-2c0989096d84": "In what ways did the Brookings Institute influence the positive use cases of AI discussed in the OSTP meetings?", "b795e850-79da-4183-9560-e011ed68a0a6": "How did the Center for Democracy and Technology participate in the development of the AI Bill of Rights?", "0ed96dda-2f6e-4335-86b8-3a5d06011bbd": "What specific ideas did Deepmind offer during the OSTP meetings on AI technologies?", "2a6ac783-bbc1-4474-9864-2029c75aea04": "How did the Center on Privacy and Technology at Georgetown Law address privacy concerns related to AI?", "3e13f6fd-312b-432b-9012-933f8b0a169a": "What contributions did the Data and Society Research Institute make to the discussions on AI oversight?", "7b11f5c5-827e-4e70-85f7-d83417de66c7": "How did the Electronic Privacy Information Center (EPIC) engage in the conversations about AI harms and benefits?", "fc17d202-010f-4306-916d-724cc681a109": "What was the focus of the EdSAFE AI Alliance's participation in the OSTP meetings?", "690220f2-52f5-463a-a2bc-b46c7e1aa79c": "What are the key privacy risks to consider during the development life cycle of an automated system?", "e2b24491-dd1a-45d0-a381-86ca5ec207df": "How can privacy risks from reidentification be mitigated in machine learning models?", "56f6feff-327b-4075-ac32-9d237ff6468f": "What are some potential harms to non-users of an automated system due to inferred data?", "425a7779-bf1b-4757-8833-e612c9b1ea05": "How should data collection be communicated to individuals whose data is being collected?", "e03f414b-1fb3-4067-99fc-cd4b122f509d": "What legal considerations should be taken into account when collecting data for training machine learning models?", "0d06fec9-3c74-49df-bc13-1e3ac9679a8d": "How can user experience research help in ensuring that data collection practices align with user expectations?", "b13e829f-73e8-4b70-9adf-6ea6b7b298e8": "What are some effective technical measures to minimize privacy risks in automated systems?", "ad29fd63-96d1-4788-8c4e-794dd0d3f326": "How can policy measures help in mitigating privacy risks associated with community surveillance?", "cefe533c-58c1-4179-b1f7-9134e20f1d19": "Why is it important to minimize data collection in the development of automated systems?", "61db5427-3644-439f-9a42-012708bb8917": "What steps can be taken to ensure that data collection for machine learning is consistent with user expectations and desires?", "7d332e8c-9943-4cdf-8bb4-1a3b498766ea": "What is the importance of tailoring explanations to specific audiences?", "b8324cd6-d521-45f3-8ba9-08ce631df758": "How can user experience research help in assessing the effectiveness of tailored explanations?", "05f09f2b-477a-4dbf-aaec-b5af1923c6b1": "Why might an explanation to a decision subject differ from one given to an advocate or domain expert?", "f022ec7a-8358-48d6-824c-0c26e658b35a": "What mechanisms can be used to build understanding and intuitions for a stated purpose?", "469f8389-8d8d-4b27-a69a-f16514c85c43": "How can explanations be effectively targeted to different audiences?", "5e39df0c-36a5-4afa-a2f9-6c5e341815f9": "What are some methods to assess the tailoring of explanations?", "8d3feca0-94b3-4b4a-a785-23a3b9b9a23c": "Why is it important to clearly state the audience for an explanation?", "3a3c3db2-4132-4478-a847-b36068c56e01": "How does the role of the recipient (eg, subject, advocate, expert) influence the type of explanation provided?", "15467e60-3777-472b-bd0b-47bf9f467a78": "What are the benefits of providing tailored explanations in decision-making processes?", "9a7b141e-920a-487a-a055-e1e9f4f1b2e6": "How can plain-language statements about causality differ from other types of explanations?", "ab11ab5b-818a-4880-8e91-ed269bd1af2c": "What does it mean when a document states that mentioning commercial partners is for information only?", "df1199d7-da58-4fef-8f9f-07735134932e": "Why do some documents include a disclaimer about not implying endorsement by a US Government agency?", "5958036c-3258-4303-8b1a-1622e068421a": "How should I interpret references to academic partners in a government document?", "5a32b7cd-ddd5-4513-abdf-563e5cd76816": "What is the significance of stating that materials or equipment are not necessarily the best available?", "fee5f65d-cb22-46ee-8464-222e373ff06b": "Why might a document specify that it does not intend to imply recommendation of any products?", "4a58ee08-f3de-45fd-907f-e153459dc81e": "What is the purpose of including a disclaimer about non-profit partners in official documents?", "d42d824c-1120-43e8-9235-7572b76c40a4": "How can I determine if a product mentioned in a government document is endorsed by the agency?", "4a312aa6-5cb4-4c06-8984-88e91ac45dc4": "What should I consider when a document mentions commercial entities but includes a disclaimer?", "d04730cf-a6d4-47bc-a699-0e3df94761de": "Why do government documents often include disclaimers about the quality of mentioned entities or materials?", "14564a8a-cdac-40c0-a780-78be519b65dd": "How does a disclaimer about not implying endorsement affect the credibility of the information provided?", "569527ac-7ac6-431b-875e-d434eb402c21": "What is AI red-teaming and how does it help in assessing privacy risks in AI systems?", "8389f03a-5744-4818-89fe-c077ff7f056c": "How can AI systems output training data samples and what are the associated risks?", "1ccccd94-2c43-4041-a4bc-08e24d811d35": "What are model extraction and membership inference risks in AI systems?", "93b0c907-004d-4f32-8b53-a052b75beeff": "How can AI systems reveal biometric or confidential information, and what measures can be taken to prevent this?", "a7f6c8ab-1b04-4dfa-9f57-4235258646b6": "What is the importance of engaging with end-users to understand their concerns about content provenance in AI systems?", "526267ce-38f4-4e58-b51d-6ae5ff0cf0dd": "How can AI systems track or reveal the location information of users or members of training datasets?", "b79cbec1-977d-4eb4-83ae-3d2d9ac42e08": "What are some techniques for designing provenance data-tracking in AI systems?", "9fa37ff2-7f29-4880-94c1-06fb3c6e92d5": "How does the Human-AI Configuration impact the privacy risks of AI systems?", "93413343-a62f-4859-8ee9-a611536c2b93": "What role do domain experts play in the operation and monitoring of AI systems to ensure privacy?", "48834fa9-9abe-4fae-a27c-636139f88c50": "How can intellectual property be protected in the deployment of AI systems?", "65958d5c-13fc-4718-8038-77c445872b42": "What are the key expectations for automated systems in terms of data privacy?", "ffe21033-700d-4f6d-aace-0e85d2620745": "Why are traditional terms of service considered inadequate for protecting privacy?", "8c46db62-ddab-49e2-b983-32432a14f583": "How can built-in privacy protections enhance data privacy in automated systems?", "4a376bd0-31f5-4955-8cfe-1e0e65d8f7e0": "What is meant by \"data minimization\" in the context of automated systems?", "7e329714-1d9f-4b04-926d-a8d79ddfdd9d": "How can transparency be ensured in the use and collection of personal data by automated systems?", "7d478e4e-d640-4a81-a971-a2d88d655d78": "What mechanisms should be in place to allow users to control access to their data?", "61151ee6-a28e-4a40-97ad-d1edeee36eb6": "How does \"privacy by design and by default\" differ from traditional privacy measures?", "b3576af0-4c36-49f5-a0e8-247d2ec6f65d": "What are the benefits of having clear mechanisms for users to manage their metadata?", "b624d6cb-faa1-4468-96a8-136321416574": "How should automated systems handle the sharing and storing of personal data to meet privacy expectations?", "8ecd7751-64ef-421e-a44c-3231886cdc17": "What role do technical standards and practices play in the development of privacy protections for automated systems?", "632ebbb0-13c7-4a44-b49f-bb277513d09c": "What is the importance of verifying deduplication in GAI training data samples?", "e06e7091-ac74-4abc-bc2b-da388c1149b9": "How does synthetic data impact the deduplication process in AI training?", "64518426-3d37-4769-9d8d-826b0577f97a": "What are the potential risks of not verifying deduplication in AI training data?", "dc8b4a4c-213f-4f2e-932c-a9a6f8cdadcc": "How can harmful bias and homogenization affect AI deployment?", "1bcb29e6-07a9-43ff-a455-16802ef28195": "What roles do domain experts play in AI impact assessment?", "19e66fef-7521-471f-82e2-f6884868acaa": "Why is it crucial to involve end-users in the operation and monitoring of AI systems?", "488f2216-2ff2-4ee4-9851-37299ad2c6c7": "What are the best practices for ensuring information integrity in AI systems?", "64edf8a2-3462-4be0-976d-1fddeea48012": "How can AI actors mitigate the risks of harmful bias in AI systems?", "b841ebb5-559d-4fe8-881f-c79ff7004ee3": "What is the role of TEVV in AI deployment and monitoring?", "5a0bd7f7-a52e-418f-9ada-e1fe84848613": "How does deduplication of training data samples contribute to the overall performance of AI models?", "64a056c2-8ae4-4280-874c-4a01461cc3e4": "What are some examples of state and local governments responding to AI-related problems with legislation?", "d69e8d1e-a600-475f-9486-61b3c75a2df4": "How have courts extended longstanding statutory protections to new and emerging technologies?", "2dbbbadc-b880-41d0-bb6c-e682dacb3df7": "What companies are known for incorporating additional protections in their automated systems?", "2827fc62-14af-4e2c-b663-ba391d6d25ba": "What innovative guardrails are researchers developing for the ethical use of AI?", "7fb6fed7-6aae-4ccc-b597-ab7e62e9b21f": "What are the key principles proposed by advocates and researchers for the ethical use of AI?", "44938ddc-9358-4c82-94f2-840413742d55": "What is the OECD\u2019s 2019 Recommendation on Artificial Intelligence?", "61da46c4-d66a-4522-bc97-af4fc2456222": "How has the United States adopted the OECD\u2019s principles for trustworthy AI?", "3f063cef-02a0-408b-98c7-56446e0fc13b": "What are the main principles outlined in Executive Order 13960 on AI?", "b4c11f6f-dc78-45df-9b02-6c9e97e63441": "How does Executive Order 13960 aim to promote the use of trustworthy AI in the federal government?", "a88f3780-0edd-49b8-89f3-212ba3ce8b3a": "What role do government organizations play in proposing principles for the ethical use of AI?", "ba2105e2-73d4-4990-9944-6d240e81707f": "How can automated systems be designed to prevent discrimination in loan approvals?", "17ebc163-d3e6-449c-980f-7ed26f839df6": "What are the basic safeguards against bias in automated systems?", "aab856e9-8ff2-46d9-893b-b6d07ab393c6": "How do automated systems impact underserved communities?", "cbad6951-7c8a-46df-bb70-c92a3f311a56": "What measures can be taken to ensure fairness in automated hiring processes?", "79e16b79-9d36-4087-8659-146694974dfc": "How can we ensure that automated systems treat all people fairly in the criminal justice system?", "1b05d0a8-49d2-47be-ab48-25a75c6e77f6": "What are the potential biases in using educational attainment in loan underwriting models?", "a980423a-f92e-407d-8954-c365cd973ffd": "How can we protect against abuse in automated medical treatment and payment systems?", "70182aa9-6598-41d8-94c5-c86d69052289": "What proactive protections can support underserved communities against automated system biases?", "c0cdbc54-4086-43e2-9a8b-0e45b61ecc4e": "How can nontraditional factors in automated systems lead to discrimination?", "732b7212-2022-429b-9d2d-ea2f1ff7d3f2": "What are the implications of higher loan prices for HBCU graduates in automated loan pricing models?", "9875b4af-3367-4c2e-9a31-faf2702adfa5": "What are the best practices for reviewing and documenting the accuracy and relevance of data used in different stages of the AI lifecycle?", "6fe66997-2216-429a-9ef4-7fe53eb306b3": "How can harmful bias and homogenization be mitigated in AI systems?", "36ec6e8f-35e5-4186-b6d7-f265dcdef11f": "What techniques are effective for fact-checking information generated by GAI systems?", "7d63e0e7-2c2e-4330-bdf2-4c7e114b7805": "How can the accuracy and veracity of information from multiple or unknown sources be verified in AI systems?", "bfa45ef0-bea8-47fc-b980-0b69ed2e1008": "What methods can be used to develop and implement testing techniques to identify synthetic media produced by GAI?", "6df2f1c4-ff8a-4107-a463-f56276352018": "How can one distinguish between GAI-produced content and human-generated content?", "51560d91-654c-4d78-bfbe-98c8ec9efde0": "What are the recommended plans for regular adversarial testing of GAI systems?", "9fde8cf0-b0b2-45f7-ad80-cadc55db3835": "How can vulnerabilities and potential misuse of GAI systems be identified through adversarial testing?", "b5e1a791-bb5b-42e1-b095-4eddeb8cbe89": "What are the key tasks for AI actors in AI development, domain expertise, and TEVV?", "3d828c80-9b73-4da4-a5ca-9009d879a58e": "What processes should be in place to ensure operator and practitioner proficiency with AI system performance and trustworthiness?", "8fd42408-bbd3-4239-b478-4be231789ff6": "What are the benefits of providing limited waivers of confidentiality for automated systems?", "41478b94-524f-4cb6-a05f-d28d7bf4f9c5": "How can designers protect intellectual property while allowing meaningful oversight of automated systems?", "607f4dd7-ca7e-4ee9-b4a8-f5f9760aeb1f": "What measures can be taken to ensure trade secrets are not unwarrantedly disclosed during legal discovery?", "dc9d51d9-e01e-4c24-8bc5-f2d310dd159f": "Why is meaningful access to source code and documentation important in sensitive domains?", "d012d297-b626-462a-868b-099e4ebe5d08": "How can automated systems be designed to provide built-in explanations for high-risk scenarios?", "19032e5d-0bf0-4167-86e2-54f143702e03": "What is the principle of Notice and Explanation in the context of automated systems?", "46b67447-c9d2-4689-b32c-eb292015ef9b": "How can fully-transparent models benefit the examination of automated systems?", "bbd073b0-20ee-4d44-ba79-37e22da7f19b": "What are the challenges of balancing confidentiality and transparency in automated systems?", "9fd04f5e-f2a7-4d3d-b5fb-3c5182e8df33": "How can court orders be used to protect sensitive information during the examination of automated systems?", "4a50706f-2be3-4191-a799-d03cfb6bd582": "What role does legal discovery play in the oversight of automated systems in sensitive domains?", "ef610b4b-0ce3-4569-b444-685ef6d6f71d": "What is algorithmic discrimination and how can it violate legal protections?", "3332f97e-e957-4370-b156-5b2e74510318": "How can designers and developers protect against algorithmic discrimination?", "38324b8e-0942-49f8-88c3-e261660ac848": "What are proactive equity assessments in the context of system design?", "9676f8ec-7e8e-461d-a77e-337c20095d6e": "Why is it important to use representative data in automated systems?", "07c06943-1c60-4690-a161-f2c3d04c88b7": "How can proxies for demographic features lead to algorithmic discrimination?", "f8088e52-e35c-4f3b-8479-e530e41e83c5": "What measures can be taken to ensure accessibility for people with disabilities in automated systems?", "d218c959-31ff-4b08-8feb-b6cd5b7f9808": "What is disparity testing and why is it important before deploying automated systems?", "fcda6d4e-26ec-4b88-b9e2-33410629db75": "How can organizations provide clear oversight of automated systems to prevent discrimination?", "358806ca-03c5-4219-8f5d-7d6bd50c4a51": "What is an algorithmic impact assessment and why is it necessary?", "95dfc992-0553-4499-94a7-d42021d7a805": "How can independent evaluation and plain language reporting help in mitigating algorithmic discrimination?", "3c860073-dead-456a-9dd9-746a0c1f2cc0": "What is ballot curing and how does it work?", "6e31af36-593c-4b4b-a439-f6361f629e60": "Which states have ballot curing procedures in place?", "bcac432d-f310-4e4c-8294-803789746673": "Are ballot curing procedures constitutionally required in all states?", "59c27c92-ad44-4eec-b9a6-c79134babf4b": "How do election officials contact voters for ballot curing?", "753f8d63-ebe4-412c-8c70-4b6defdfe6bf": "What kind of information might voters need to provide during the ballot curing process?", "838c69d0-ff96-462d-a5ec-c67efa2bcca9": "Can a voter cure their ballot if they missed the initial deadline?", "c7ec0382-b12d-4075-915b-a29a72506cfc": "How does the ballot curing process ensure the validity of a ballot?", "22887c27-fdf6-433a-9832-9a9115f9683a": "What are the common reasons a ballot might need curing?", "fdad09f6-1e41-4f9c-9715-fffb065295c1": "How long do voters typically have to cure their ballots?", "e1115d17-d43b-4b36-8792-e50c67b9bb8c": "Are there any states that do not allow ballot curing?", "ebd1eb6d-41f7-4335-b070-d78501a2156e": "What is algorithmic discrimination and how can it be avoided?", "64a7ca44-9bdd-4f2f-bdb1-baed69c8d781": "How do proxies contribute to algorithmic discrimination?", "20367a8b-c64a-4864-bc1b-3d280f351a0e": "Why is it important to test for correlation between demographic information and attributes in data?", "d9858772-aa90-4a5a-be0c-fc5b5b333bd8": "What steps should be taken if a proxy is identified in a system?", "57ce6697-9b12-431e-8391-82fdd25840ee": "How can organizations ensure that a proxy feature is not given undue weight in decision-making?", "39f694c3-4c98-4d86-b3e2-28fd601599af": "What are some examples of attributes that might serve as proxies for demographic features?", "14fb21ac-2111-40ac-bda5-782d92f9b5a6": "What legal implications can arise from using proxies in algorithmic decision-making?", "fb146223-b6a8-49be-bb4d-631246b9b613": "How can alternative attributes be identified if a proxy is found in a system?", "b11ec9e4-e31c-4b32-b7d2-e9801186fd91": "What are the best practices for monitoring systems to prevent algorithmic discrimination?", "8fd37fe4-2c42-4678-bce5-922ed9d2dae5": "Why is proactive testing crucial in the design, development, and use of systems to counter discrimination?", "4457a37e-6d0e-411d-aea4-b1c32f5a127d": "What are the limitations of AI in different contexts of use?", "a7e21fa9-a576-4c9c-b7b8-cf1dc782ba81": "How can structured human feedback improve AI systems?", "bdaa7693-aa11-46bb-a39b-d7e351b3fff2": "What are some anticipated human-AI configurations in the future?", "bc7e0482-14cb-4e29-83b9-d34e973bcfb4": "How can harmful bias and homogenization in AI be mitigated?", "06906bd2-d444-4b50-86fc-e7d4b6caa7be": "What measures can be taken to prevent AI from generating dangerous, violent, or hateful content?", "e7276fb9-5e30-4e77-a07f-98fae0b0a30c": "How can organizations identify and document illegal uses of AI systems?", "416fc8f9-4237-4877-a514-88013a360abd": "What are the risks associated with AI in handling CBRN (Chemical, Biological, Radiological, and Nuclear) information?", "35cbeeef-7459-4fe7-bb33-74fc872c987a": "How can interdisciplinary teams contribute to the development of AI systems?", "e5be6c98-b642-4a1b-968b-8d3d71eb0fd3": "What competencies and skills are necessary for AI actors to establish context reflecting demographic diversity?", "f6636c23-4d93-4bb1-abf3-5ef8d69586c7": "Why is interdisciplinary collaboration important in AI development?", "6e27bf91-4131-4e12-b6fa-cc6d5997f88f": "What are the long-term performance characteristics of General Artificial Intelligence (GAI) that remain unknown?", "ff520072-afb0-4b73-9f62-29ac419bf82d": "How can we ensure information integrity in the deployment of GAI systems?", "4e3cb88c-db89-46db-bfaf-6efc6c06e573": "What are the potential dangers of GAI generating violent or hateful content?", "1ba43e0c-45c7-42b2-9dcf-0edbb0e71b32": "How can a plan be devised to halt the development or deployment of a GAI system that poses unacceptable negative risks?", "82d470a2-1e99-4a76-92f8-281573661995": "What measures can be taken to prevent GAI systems from generating content that violates the law, such as CSAM or NCII?", "2e5d1e86-46b3-4528-b9e2-cbe1e372e99b": "What are the risks associated with GAI generating obscene, degrading, or abusive content?", "c53e6933-dd22-42ec-865b-0cfc8a4a6f10": "How can harmful bias and homogenization be mitigated in GAI systems?", "60fdcce3-24cf-46ff-bd37-71e20af72d5b": "What are the best practices for establishing transparent acceptable use policies for GAI?", "35c63a1b-39bb-4e6e-87d1-597fbe552fb0": "How can governance and oversight be effectively implemented for GAI systems?", "7e800537-d1f9-48cc-b514-dc45ba1e805f": "What are the key components of a risk management process for GAI that ensures transparency and aligns with organizational risk priorities?", "8caffdb8-0e56-4747-8e14-5076b635e19f": "What are the key roles and responsibilities for managing AI risks within an organization?", "7a804990-826f-4344-923d-164698caefdd": "How should an organization document and communicate AI incidents to stakeholders?", "1fefa53a-88bb-48ff-8f41-3aeae62bce74": "What procedures should be established for engaging teams in AI system incident response?", "b4eb2632-537c-4adb-8f56-5f7392106c81": "How can an organization ensure that AI Actors have the appropriate skills and training for incident response?", "3e0d09b6-f68d-4dc8-bef2-d2295751db89": "What are some official resources for reporting AI incidents?", "b9023848-3972-4a1d-9214-477117a0d3df": "How can harmful bias and homogenization be addressed in AI incident response procedures?", "8b69e61b-4fce-477c-8e17-3871ba07c8ca": "What is the importance of having clear lines of communication for AI risk management?", "3003c533-bec8-424a-a54a-5bbe26b84144": "How can organizations verify the skills and training of AI Actors involved in incident response?", "6e59a970-f171-4ff5-a512-b3d6396bb916": "What are the benefits of having diverse teams for AI system incident response?", "30812fe9-ef65-459a-9731-9e837e6be7cb": "How should organizations integrate value chain and component integration in AI risk management?", "8abc3501-531b-42b8-9cec-6ed049ec1079": "What are some appropriate responses to identified privacy risks in data processing?", "473df226-ad88-4f10-8241-ea6b0ab61506": "Why is it not appropriate to transfer privacy risks to users via notice or consent requests?", "a74c77b8-07cd-430c-8aac-7b1585bc505d": "What are privacy-preserving security best practices for automated systems?", "c4688abb-c2c2-46c4-86b1-0261e15da71b": "How can privacy-enhancing cryptography help in maintaining data privacy?", "003673d2-5ecb-440b-80be-5cd34bbaa356": "What role do fine-grained permissions and access control mechanisms play in privacy-preserving security?", "cb249eee-5357-4ed7-99fb-8da55348c57c": "Why is it important to ensure data and metadata do not leak beyond the consented use case?", "af4ab8cd-cae8-4e7c-922b-8e193f1f5383": "What are some examples of privacy-enhancing technologies?", "8fbca3b4-2fde-4348-a2e2-1dede9fbabb2": "How can entities balance privacy risks and benefits in data processing?", "39179d19-89d2-4969-a2e9-b1c27831cd8f": "What are conventional system security protocols that can be used alongside privacy-enhancing technologies?", "4e933b6b-f6f3-45a9-aa9d-c54fc2d80ade": "How can entities ensure users understand the privacy risks associated with data processing?", "7a230dee-19b1-4409-ac9b-4fd593416a92": "What are the main challenges for consumers with the rise of AI-enabled products and services?", "e0de42d1-9769-4cec-a48a-c883f8a5c316": "How can communities benefit from the growing ecosystem of IoT devices and smart city products?", "1bbc381a-2090-45be-8cad-d9d9d862608d": "What role does the Federal Trade Commission play in protecting consumer privacy in the context of advanced platforms and services?", "93fdcd7f-ecce-4c63-80b4-7e3b65a0ec49": "How can consumers ensure their rights are protected when using AI-enabled consumer products?", "e3a6aac7-0d0f-47f2-a09a-5a902286de83": "What are some potential risks associated with the use of smart city products and services?", "9343529d-ade3-4b58-8c53-8a84ae0b6620": "How can policymakers balance innovation with consumer protection in the digital age?", "1402d638-7b8d-4525-9c64-93611b63063a": "What strategies can be employed to enhance consumer trust in AI-enabled products and services?", "2b45e986-94f6-441b-a5b6-ad7d5710929e": "How do IoT devices impact consumer privacy and what measures can be taken to mitigate these impacts?", "60e72442-44d0-4e8e-8f79-1d3e0616d45d": "What are the opportunities for improving community well-being through the use of advanced digital platforms?", "5f17138e-15e8-4f1b-9538-d62720ee1357": "How can consumers stay informed about their rights and protections in the evolving digital landscape?", "5c5c28fe-f220-4cfb-b5dd-59fa4fc0ed70": "What is the NSF Program on Fairness in Artificial Intelligence in collaboration with Amazon?", "1ecd21e2-5962-46c3-8ac6-39f368d94cb0": "How does automatic signature verification software threaten to disenfranchise US voters?", "9a291c46-0178-49d0-a45a-eb38560f5715": "What is the cure period for absentee and mail-in ballots according to Ballotpedia?", "df46e7f9-256e-4668-a774-41a48f74c576": "How can you tell if two mail ballot signatures are by the same person, as discussed by the New York Times?", "3bac4694-ad52-44b3-a9b8-bdd45f2f3526": "What are the main points discussed in the article \"The Low Down on Ballot Curing\" by Rachel Orey and Owen Bacskai?", "caf5e917-228f-452a-aa7e-5e4722643f18": "What are the potential issues with automatic signature verification software in the context of voting?", "d769a613-d040-42c5-b109-da27d5f9787e": "How does the National Science Foundation's program on AI fairness aim to address biases?", "2f8b1ef4-df6f-4ba6-9615-e1a85909f4af": "What are the steps involved in the ballot curing process?", "ace15994-5883-41c6-a269-9979b4205fbd": "Why is the cure period for absentee and mail-in ballots important for election integrity?", "e1eb277b-b995-4e63-a1eb-9180ffd6f139": "What examples of signature matching challenges are highlighted by Larry Buchanan and Alicia Parlapiano in the New York Times?", "076a1fc1-5b6b-46f0-b2b6-48cfac0d15e7": "What is the importance of sharing pre-deployment testing results with relevant GAI actors?", "64993f9a-4b76-4a8f-874c-3d273367f43f": "Who are considered relevant GAI actors in the context of system release approval?", "153b18fe-97ea-4516-a51c-8aee0b2f8f07": "How does pre-deployment testing contribute to information security?", "99dc15b6-9a6f-46c8-ac5b-2de91142fa28": "What are the key components of human-AI configuration in information security?", "ee485024-fbdf-4772-9401-42eb2a5b5603": "What steps are involved in the pre-deployment testing process for AI systems?", "94cb95a8-ec57-470d-b1e8-3959d8963369": "How can sharing pre-deployment testing results improve system release decisions?", "af4cbd1c-b744-4bab-97a4-ec1f323b9260": "What are the potential risks of not sharing pre-deployment testing results with relevant actors?", "0325a366-a9b0-45a5-87cf-7107248c17f6": "How does confabulation relate to information security in AI systems?", "a2a6f02b-4f15-4743-a2b5-3f2025414a01": "What role does system release approval authority play in the deployment of AI systems?", "835ee7d9-eea0-48db-a932-b70328a2b80f": "How can human-AI configuration impact the effectiveness of information security measures?", "2184e32c-4f19-4057-964f-48f6090c4d64": "What are the health benefits of traffic calming measures according to the US Department of Transportation?", "426c71f4-c8da-4c45-b83c-0cb9d64407ac": "How do traffic calming measures help in slowing vehicle speeds?", "4f9726e7-c892-47d5-941c-423f9a84d608": "What are some examples of traffic calming measures mentioned by the US Department of Transportation?", "89c9ee09-99a0-44e8-9140-622366b15748": "How can organizations monitor and fix their AI models using responsible AI ventures?", "02099e7a-c0f9-4fec-8ac9-5f7fe1eab08c": "What are some startups that focus on AI ethics according to Karen Hao?", "38cff01a-3e4d-4f0d-84b0-6e9383d80e02": "What is the role of responsible AI ventures in promoting ethical AI practices?", "2c7799cd-0027-4338-a539-47cd43e086c6": "Can you provide a summary of the article \"Worried about your firm\u2019s AI ethics? These startups are here to help\" by Karen Hao?", "dc59187d-f64f-4de6-af9d-5e6e93133b18": "What are some top progressive companies building ethical AI in 2021 according to Disha Sinha?", "6f52455b-2a16-4f6c-94cb-d9ba727691af": "How does the growing ecosystem of responsible AI ventures impact the development of AI ethics?", "16af67cc-0dfc-4203-815d-8b6bf5ef6277": "What are the key points discussed in the MIT Technology Review article on AI ethics startups?", "555d774d-d331-4349-951f-af19f683089a": "What are the main privacy risks associated with AI as discussed by Lee, H et al (2024)?", "91ef15f4-5185-4c16-9ef0-4bbb37697969": "How does data poisoning exploit generative AI according to Lenaerts-Bergmans, B (2024)?", "1c1560c2-164b-4de3-9972-65d5c636bd55": "Why are GPT detectors biased against non-native English writers as per Liang, W et al (2023)?", "b0939336-ece6-40eb-82f5-1ec5f0e3b997": "What are the energy costs associated with AI deployment discussed by Luccioni, A et al (2023)?", "e8beb28b-df88-4857-a896-9492bc49fe69": "What operational risks does AI pose in large-scale biological attacks according to Mouton, C et al (2024)?", "d799c0a7-a358-47d3-8fc2-0b8da05d95fa": "How does generative AI exhibit worse biases compared to humans as stated by Nicoletti, L et al (2023)?", "4f6a16c0-eb08-4d81-9e2d-968a9c7bbe5a": "What are the key findings of the National Institute of Standards and Technology (2024) on adversarial machine learning?", "8029f69a-cddb-4697-8f5f-a117433fee74": "How can deepfakes impact privacy according to the taxonomy provided by Lee, H et al (2024)?", "66f00bc6-ab1d-43e9-9153-e2c75516e556": "What measures can be taken to mitigate data poisoning in generative AI as suggested by Lenaerts-Bergmans, B (2024)?", "5d336249-8464-410f-8952-8b2a4b802ba1": "What are the implications of biased GPT detectors for non-native English writers as discussed by Liang, W et al (2023)?", "cca47a77-0520-490a-ba33-7fc205506cff": "What are tenant screening algorithms and how do they impact housing applications?", "193d2e40-1043-47cd-a7ed-4c589f9ed491": "How do automated valuation systems work in mortgage underwriting?", "1377b78c-14b8-431e-bbcf-a61ca726482b": "What role do workplace algorithms play in determining employee pay and promotions?", "b20bfb27-3464-4d36-af79-19e0efb7136b": "How are virtual or augmented reality programs used in workplace training?", "b63ce3cf-4572-4655-8f8c-f0590a07b1dd": "What are the implications of electronic workplace surveillance on employee privacy?", "518edd21-747b-416c-a5e4-eded44945fd5": "How do AI-assisted diagnostic tools support clinical decision making in healthcare?", "f29882f4-75bf-4ba7-8c1a-4908f184296a": "What are the benefits and risks of using medical AI systems and devices?", "0477b1d1-9503-4a65-9872-d4e96a8ba1b1": "How do predictive models in healthcare influence patient treatment plans?", "cb0e5e06-fc4b-4849-a9f5-cc5a5541d9c4": "What are the ethical concerns surrounding the use of algorithms in employment decisions?", "675faee1-4b6f-46f9-9004-bfea75569eee": "How do automated valuations from online aggregator websites affect home insurance rates?", "e3b5b92f-9f7b-4c54-9240-dfaff35e382f": "What information is required to build or validate a risk assessment?", "50ddae98-3e18-4f13-87df-bdf5c87c3e49": "How can the public access information used in risk assessments?", "857942ee-2e35-4394-93cc-4a1070f88dbf": "Are trade secrets protected in criminal cases?", "039a84a0-ad22-41a4-9569-a900c59cd915": "Can trade secrets be used to prevent discovery in criminal matters?", "730583a5-c594-465e-b9db-d94372af5b92": "What are the legal implications of using trade secrets in criminal cases?", "a399efb4-2fe3-4b7d-bb74-468cb88c6692": "How does public inspection of risk assessment information impact transparency?", "25b7a363-4a6f-4e92-bf19-820353d20eda": "What constitutes a trade secret in the context of criminal law?", "e89b7e4e-9d0c-49e9-ba4b-d96fb7f38d1d": "How does the law balance trade secrets and public interest in criminal cases?", "43939503-05a7-4f81-a001-b61fae0e6cf3": "What are the consequences of failing to disclose information in a risk assessment?", "741c8a8a-a15d-4b18-b19a-a167119ce939": "How can parties in a criminal case challenge the use of trade secrets to quash discovery?", "ce483416-c348-4911-9628-813050e4eeab": "What are the best practices for limiting the scope of data collection to avoid mission creep?", "2ca2c34f-c0e3-4a49-bb51-b639dcabcfe2": "How can organizations determine if data collection is strictly necessary for their identified goals?", "361b48b8-e93c-4e16-91a5-cc5e07f1fe3d": "What steps should be taken to minimize data collection as much as possible?", "ac634aac-bdc9-4b09-83c7-537d7fab01bb": "How should organizations assess new privacy risks when using data in a different context?", "7f844336-cb98-4ae3-b02c-d29460977665": "What are appropriate mitigation measures for new privacy risks, and when is express consent required?", "09a89a5d-baf9-44e7-97ce-9150f52dcddb": "How can clear timelines for data retention be established and enforced?", "145c8074-9a0f-4eeb-a364-f358745607c7": "What are the legal or policy-based limitations that affect data retention timelines?", "9f2b34d5-6f6f-477a-95d2-34caa90de98d": "How should data retention timelines be documented and justified?", "1e82f71e-654d-42ec-9c14-6dadbe1f6a21": "What methods can entities use to proactively identify potential harms related to sensitive data?", "99c21c13-0f1b-4e41-ba73-66b99a72b9f5": "What strategies can be employed to manage and mitigate risks associated with the collection, use, sharing, or storage of sensitive data?", "3be3f630-af00-421e-9293-5b36b1457f21": "What are the AI RMF functions and their corresponding tags?", "15856cf5-933b-4a51-9b87-1a41b7989c41": "How should organizations determine the applicability of suggested actions to relevant AI actors?", "f8eeb340-f824-4954-8bba-97e13a1eaaa4": "What does the Action ID GV-11-001 represent?", "41e2ede8-bed6-4392-91c8-4d6e11fd5723": "Are suggested actions for GAI developers always relevant to GAI deployers?", "8d49c1c5-f878-4154-9587-d0639e096bfe": "What information is included in each table of suggested actions?", "256b9f00-7e6f-424f-a18c-4c07a21a9c0a": "How are GAI risks linked to suggested actions?", "2fefdcdf-4aed-4bf3-a395-cca2e7e6f51f": "What does the tag \"MP\" stand for in the AI RMF functions?", "5073c0c1-051a-4a7f-8466-425db5f7c409": "How can organizations manage GAI risks according to the suggested actions?", "6a8808c6-ab61-4357-826e-0a9c216b874d": "What are AI Actor Tasks, and how are they related to subcategories?", "ac5de47b-969a-4190-8637-c26262014360": "Why might not every AI Actor Task be listed for each subcategory?", "2d85a293-4a92-4d77-a148-01e6bdb2e171": "What are the best practices for identifying and documenting how a system relies on upstream data sources?", "fa278b40-585c-4815-898c-0b82776e868b": "How can one ensure the integrity of information when a system serves as an upstream dependency for other systems?", "d2d872e7-55fe-471f-8a3c-444fd24feaeb": "What methods can be used to observe and analyze the interaction of a GAI system with external networks?", "ce2f6805-1d0b-408b-93cb-df89733e20d7": "How can potential negative externalities be identified when analyzing a GAI system's interaction with external networks?", "f0e5e018-68e2-4930-9f82-18a05118dd73": "What are the key considerations for ensuring scientific integrity in experimental design and data collection for GAI systems?", "aaf22726-5e88-48f2-9816-290e976a2c52": "How can the accuracy and reliability of GAI output be assessed effectively?", "621c3d3b-6588-4b26-817f-34cf97d21faf": "What strategies can be employed to validate the authenticity of GAI output?", "d4ebcdf7-e442-4f4e-ae79-bc5b7049698f": "How important is content provenance in maintaining the integrity of information in GAI systems?", "1ed4e09e-46b0-42fb-8f66-ad56d901feb1": "What are the risks associated with the integration of value chains and components in GAI systems?", "4ee25323-db4d-4006-941f-65f9ef0a30a2": "How can one document the trustworthiness and construct validation of a GAI system?", "c87b4890-2e5b-4bac-9ab2-38345179a167": "What are the current consumer data privacy protection regimes in the United States?", "a5cec61b-fe2c-4f0a-b43c-0bcfda0a4e61": "Why does the United States lack a comprehensive statutory or regulatory framework for personal data rights?", "cd837571-a21f-4d34-be84-a254ecaabd8e": "How do existing laws guide the collection and use of personal data in specific contexts like health and employment?", "bf81addc-77e6-4962-a28d-6a88b104c798": "What are the potential harms of not having a standardized data privacy protection regime in the US?", "0a2781a2-70af-4a14-833a-be0db30d26de": "How do state-level consumer data privacy laws differ from one another?", "70ebdeb5-d993-421c-98de-6f8c8a151dcc": "What additional protections could assure the American public about the use of automated systems?", "c3a7e646-03e3-4fcb-a49d-f6aeb95cd652": "How does the patchwork of laws affect the application of data privacy in an automated society?", "b2069dc4-e694-456f-ac4e-bc92042d8f57": "What are the challenges in applying existing data privacy laws to new technological contexts?", "eaf0aa98-7660-4a3d-9f6e-c63c68dafdbc": "How can consumers ensure their personal data is not being monitored or collected without consent?", "d8553bac-b1a1-4c02-9567-b5f42bb59bee": "What legal authority is required for automated systems to collect personal data in the US?", "6a84a0e6-451d-4969-8be6-245780aeb397": "What is prompt injection in the context of GAI systems?", "7b29eb41-5e52-429f-af23-ae265e916f3f": "How do direct prompt injections differ from indirect prompt injections?", "f9c5155b-5175-472a-bbc4-0b5013dfe6b9": "What are some potential consequences of direct prompt injection attacks on GAI systems?", "8318048b-c34f-44ee-a2ba-511717b16077": "How can indirect prompt injections exploit LLM-integrated applications?", "0eb2f6c5-07ed-4df1-b0ee-9772ba08a943": "What are some examples of vulnerabilities that can be exploited through indirect prompt injections?", "1d52cdc1-e84a-4d67-80f9-7540a2133c4c": "How can querying a closed production model reveal previously undisclosed information?", "7b697db4-b43e-445e-bdaf-c9ceb08eb05e": "What measures can be taken to protect GAI systems from prompt injection attacks?", "41daecd9-dceb-4f72-a42a-e18ea7fda937": "How do conventional cybersecurity practices need to evolve to address prompt injection threats?", "fd25cb30-3e1b-4f40-bd7a-02df040ec85c": "What are the risks associated with malicious prompts in GAI systems?", "c5b9acfc-ac53-4dbd-85d3-f28fa4f32c26": "How can security researchers demonstrate the impact of indirect prompt injections on proprietary data?", "6d74ac43-d15a-4127-8db7-cef0fdb9d105": "What is the importance of using representative data in system design?", "c71c8dd4-63a7-4818-b770-97043dc5007e": "How can organizations protect against proxies for demographic features in their systems?", "2d5bfd5e-dbdc-4c0c-818e-615f91006738": "What are some best practices for ensuring accessibility for people with disabilities in design and development?", "0029c940-ec04-411f-a02c-3d2ed811162a": "Why is pre-deployment disparity testing crucial in system development?", "a07d446c-3b58-4fde-86ea-42a39458f472": "How can ongoing disparity testing and mitigation improve system fairness?", "c3e83063-2c07-421c-8e37-a16fff3f39e8": "What role does clear organizational oversight play in maintaining system integrity?", "606bee4d-8e16-4cd6-a403-2f19c040f21c": "What is an algorithmic impact assessment and why is it important?", "553267de-5d7a-4b26-91a6-2066782e1eca": "How can independent evaluation contribute to the transparency of algorithmic systems?", "2663ee64-79f0-4c1f-8410-651d596a09cb": "What should be included in plain language reporting of algorithmic impact assessments?", "dddf4214-014b-4cae-a517-b49cd94d6dbb": "Why is it important to make disparity testing results and mitigation information public?", "791a3b0f-42da-4f1e-8230-e984b9e83a5d": "What are some examples of automated systems that can impact civil rights and civil liberties?", "2e9b844a-3516-48c7-9857-37ddb2b13880": "How do automated content moderation tools affect free speech?", "41172f4c-70e5-4cec-9baa-a1647ae750e3": "What role do predictive policing algorithms play in the criminal justice system?", "e28c299b-5a05-4d08-b18b-a133fd152cee": "How can automated license plate readers impact privacy?", "89e46e02-8e3a-48fb-aee7-332e3cb87322": "What are the potential risks of using real-time facial recognition systems?", "d4f21fa8-9d60-4e63-b735-56afa6012b7e": "Why should the Blueprint for an AI Bill of Rights cover automated systems that do not yet exist?", "6605c4f9-86eb-48ba-970b-2ec5cbda43d5": "How do surveillance algorithms affect civil liberties?", "ed41893b-1bb9-4e32-bf94-8499f5845e84": "What is the significance of including speech-related systems in the AI Bill of Rights?", "06f4e666-7e80-47d4-920c-020d8e66d063": "How can risk assessment algorithms in the criminal justice system impact individual rights?", "d8fdf8f4-8a77-4d9d-9d04-353cf7713d24": "What are the implications of using automated systems for surveillance in terms of privacy?", "94bdbb5b-6d65-480b-8601-b46cc88fc568": "What are the potential harms of AI systems in terms of political opinions, sex life, or criminal convictions?", "455d3295-e1b6-4838-9757-ee00fbf6fc3f": "How can disparities between groups lead to harm when using AI models?", "dcad9f20-5d3c-4fa0-a3da-0807a6e8e29c": "What challenges arise in establishing a baseline scenario to measure harm caused by AI?", "a9fffb40-a9a9-44a4-8bd7-139528a1ab89": "How can biased behavior in AI systems result in harm to different subgroups?", "d0babf07-9aad-4ae7-a6cd-85ecdcba8291": "What constitutes an unambiguous harm caused by an AI system?", "89597661-548a-4a06-a978-89132aba1eb0": "How can we determine if an AI system has worsened someone's situation compared to the absence of the system?", "f9670d38-896e-4fbc-8f5b-b9d0037c76ff": "What are some examples of harm caused by disparities in AI behavior?", "a0b6a03d-c670-468c-bce1-9294b6d93b53": "How do divergent views on disparities in AI behavior affect the assessment of harm?", "86dbbc9d-37e8-4fa1-87dc-808e4eb2b54e": "What mechanisms can be used to identify harm caused by AI systems?", "001c0aca-6ef2-4987-9528-4e438dccb697": "How can we address the issue of biased behavior in AI to prevent harm to subgroups?", "ff4430a8-31f2-4f0d-a6e0-5c8649634b27": "What are some effective strategies for disparity mitigation in automated systems?", "e535b78c-e479-4e22-9856-3ae2f3a944b8": "How can we identify and evaluate multiple models to select the one with the least adverse impact?", "07577988-b22e-464d-a114-5a3836a9b93e": "What steps should be taken if adequate mitigation of disparity is not possible in an automated system?", "dbb54f3d-f426-42fa-85ac-fdf51750e91c": "How do unobservable targets lead to the inappropriate use of proxies in automated systems?", "7fdfe1a9-36d4-4691-885b-a3946764874a": "What legal requirements exist for mitigating or eliminating disparities in automated systems?", "7cae1803-9318-4bf5-a8b5-7013514edc4f": "How can modifying data input choices help in reducing disparities in automated systems?", "692e4a6c-a9d5-46ca-a0d6-9a31d7f06f67": "What are the potential consequences of algorithmic discrimination in automated systems?", "9601cee0-678d-42b5-a2a6-8db03a41e877": "How can we ensure that an automated system aligns with equity goals?", "087c8f2b-0171-4cb4-81de-f085a3a439ca": "What are the best practices for conducting a disparity assessment in automated systems?", "9ed4da28-5f9b-4191-8f88-b3d3f17c019c": "How can meaningful harm caused by disparities in automated systems be identified and addressed?", "1a5f3970-e2ad-4c5a-b2f2-464b42ba6670": "What are the common sources of bias in GAI training and TEVV data?", "1f4eaffe-4d1e-4e65-927f-b30861a0c252": "How do differences in outcome distributions across groups affect GAI systems?", "7108defb-707d-4f17-be57-997ab61d8de5": "What is the impact of the digital divide on the representativeness of GAI training data?", "dbfa0169-4237-4e27-a0a2-6f6add38775e": "How can latent systemic bias in images and text be identified in GAI systems?", "786162d0-4467-47da-a17d-83e793b39e99": "What role does demographic group coverage play in the effectiveness of GAI training data?", "2dccbfb2-4d4e-4e46-bb34-553b404afd69": "How can input data features act as proxies for demographic group membership in GAI systems?", "2026e84e-f0b0-41f3-a034-e2ee060d5e42": "What methods are used to filter hate speech in GAI system training data?", "af8f1310-f89e-41d6-a386-57d253594be4": "How prevalent is GAI-generated data in GAI system training datasets?", "cc1890d2-8f2e-48d0-abcb-8f49bfb2f5b2": "What are Winogender Schemas and how are they used to study gender bias in GAI systems?", "2ee7b790-cd75-4629-a52f-8036fad31546": "How can the completeness and balance of data sources be measured in GAI training data?", "53c25f8b-f67e-480c-ab2a-951cfbd311df": "What are the technical risks associated with advanced AI models?", "8be2ee75-d646-4383-b30c-966d720b0fa8": "How can AI confabulation pose a risk to safety?", "f18be8d3-47f5-4006-83fa-afb8ce01872b": "What are the dangers of AI making violent recommendations?", "4a8b4330-7921-4bb8-becf-4ea1bb6b54e3": "How does data privacy become a risk in the context of advanced AI?", "ce71108e-36bc-49d1-a29d-19023f609a26": "What is meant by value chain and component integration in AI risk management?", "4a77b200-1ee4-4bbb-b131-970c384060be": "How can harmful bias in AI systems be mitigated?", "3aa2d071-ce40-40d5-be9e-8219f64dbef9": "What are the potential risks of AI misuse by humans?", "73bb4bbd-de45-44ae-9b2f-d51c7dc9c39c": "How can AI be misused to spread obscene or abusive content?", "e5a3d5df-7333-4465-9aef-c982135b06e1": "What are the systemic risks of AI to the ecosystem and society?", "92a08a8e-413f-431b-92b3-ab96effc5cfb": "How does AI impact intellectual property rights?", "4e3b03a7-bfc2-43b1-b05b-f1a0c38eff6c": "What is algorithmic discrimination?", "70f62e50-5170-4ad9-928d-15660d1f86b5": "How can we protect against algorithmic discrimination?", "674c4e49-a7d5-47f2-bc90-df8320d389ce": "What are some examples of algorithmic discrimination?", "c61b9e7b-0fb9-4e82-a7c4-64d87b67c0b2": "How do algorithms lead to discrimination?", "2e142a47-da67-4d69-b1f4-c340f690597a": "What laws exist to prevent algorithmic discrimination?", "4e2fb6ff-c26c-4485-adb5-69581eb0a253": "How can companies ensure their algorithms are not discriminatory?", "114af13c-f1ec-49c5-a9a2-f604e098e204": "What are the ethical implications of algorithmic discrimination?", "454c3073-ee6a-49e6-b85d-712601950be6": "How can algorithmic discrimination be detected?", "890c20df-f21b-467f-9126-136a56509c15": "What role does transparency play in preventing algorithmic discrimination?", "ff14594e-b5c4-4958-99b5-c3c354b9f502": "How can individuals protect themselves from algorithmic discrimination?", "4b585781-3f5c-4022-b77a-4d5874711f71": "What is the significance of the watch list that Chicago police fought to keep secret, as reported by Mick Dumke and Frank Main?", "c3bfc377-21d8-4ea9-aa9d-28bf11fa7174": "How does the ACLU case in Idaho highlight the pitfalls of artificial intelligence decision-making, according to Jay Stanley?", "04ce1a4c-76ce-4640-9775-83d32fb8e781": "What are the key provisions of the Illinois Biometric Information Privacy Act that became effective on October 3, 2008?", "ee94abbb-0f06-48b9-944e-c9be35cad15f": "What is the ABOUT ML Reference Document by the Partnership on AI, and what does it cover?", "d9f37ef0-5071-4f8e-9872-db98336653a5": "How does the model cards framework contribute to transparency in machine learning models, as discussed by Margaret Mitchell and her colleagues?", "66ebcef0-85e0-4e6d-afc7-bdd573d73837": "What were the main findings of the Chicago Sun Times article on the police watch list by Mick Dumke and Frank Main?", "508213e7-c376-4ead-bb89-162fc3fef0e7": "How does the ACLU blog post by Jay Stanley address privacy and technology concerns related to AI decision-making?", "67c9bee6-f621-4966-ba5b-b12a16c01f45": "What are the implications of the Illinois Biometric Information Privacy Act for businesses and individuals in Illinois?", "061e1226-744d-4d65-8114-0fbadd9b3d06": "What is the purpose of the ABOUT ML Reference Document, and how can it be accessed?", "af11a645-0690-4cf6-929f-476f6853ee58": "Who are the contributors to the model cards framework, and what are their roles in the development of this framework?", "d7605540-f9a3-4e51-9036-60f1c4df408b": "What is the Biometric Information Privacy Act in Illinois?", "379f0256-6648-4343-82bd-85d0ba551fe2": "How does the Biometric Information Privacy Act protect individual biometric data?", "097159ce-edec-42b3-8e7d-f7eaa02730ca": "What are the provisions of the Biometric Information Privacy Act regarding the use of biometric information?", "80dcda41-28de-4d4b-a5f6-d968bf53fca2": "How are people in Illinois notified about the use of their biometric information?", "ada88ac3-f4cc-4c8c-9572-1eef6b100f8c": "What are some real-life examples of laws protecting biometric information?", "a9cc0b3d-ed88-41d5-9b61-453f88b305d5": "How can technical and sociotechnical approaches protect rights and access to biometric data?", "465a72fd-3256-436b-8e18-d0a099b3fc3a": "What are major technology companies doing to communicate with the public about biometric data use?", "8ffdf82f-7776-4d09-8d87-dd3b9b2d6dce": "What are the requirements for private entities to collect biometric information in Illinois?", "81883056-15e0-4dac-a219-beb17eae9f58": "How do policies help in protecting biometric information?", "30810cb5-f31d-468a-b938-a53597ae1d54": "What practical approaches can be taken to ensure the protection of biometric data?", "e7dc392f-0994-4434-83ca-5a2b95bfb984": "How have chat-bots and AI-driven call response systems improved customer service?", "d17c2c7a-b481-4a0a-8c1f-ecce10c054d1": "What are the benefits of using partially automated customer service platforms?", "1fd47749-e6b5-4a4e-9982-0d7a4096be47": "How do integrated human-AI systems enhance the speed of customer care?", "886cbf14-7c71-4800-97a2-a3f8f91c9aef": "Why is it important to maintain human agents in customer service despite the use of AI?", "c68e9ed0-dbc2-47cb-8531-79bafa03558c": "What role do human agents play in resolving complicated customer service requests?", "009e2017-2f03-4ae5-80e7-1e266e24c0c1": "How do businesses compile common customer problems for human agents to review?", "7e569e34-0129-4adc-9ffa-13874c019581": "What are ballot curing laws and how do they function in the context of voter signature matching algorithms?", "97e257bf-87aa-4bbd-a07a-8ebb0d758152": "How many states have ballot curing laws that require a fallback system for voters?", "78b69abb-f1c6-4398-9e4e-619d89054b29": "What happens if a voter signature matching algorithm incorrectly flags a ballot as invalid?", "9ede1912-c15f-41cf-83b6-4ac0a0be6c0f": "Why is it necessary to have a review by an election official in the ballot curing process?", "1c3e63ce-81d0-49de-920c-d4c81fd0d223": "What can I do if a former employer provides false information about my job title?", "729f05ea-87c5-43a9-8190-306ccc177546": "How can false data from a previous employer affect my future job prospects?", "99785c94-c276-4ed1-8039-13f1d0943fdd": "What are my rights if a company supplies incorrect information about my employment history?", "0c4d45a4-e1e2-46e5-b8cd-59ac132cfb62": "Can a job offer be revoked based on false information from a previous employer?", "fd23ff35-ce12-4ab5-8f00-6522d5466c82": "How can I dispute false information provided by a former employer to potential employers?", "68940d08-efcc-42d0-96b0-7e26d8c550cc": "What steps should I take if I find out a former employer gave false data to a bank or landlord?", "b8ac00ac-57f4-4259-aead-6d6f608ccea9": "How can false employment data impact my ability to rent an apartment?", "fe589612-ce8a-4fec-bfb6-4a9173dff0a2": "What legal actions can I take if a former employer provides false information about me?", "ae7f2e11-db6c-470f-9093-9964aba79b39": "How can I prove that a former employer supplied false data about my job title?", "9ccf00e2-cd6a-4824-8e75-cb8c6bc51c37": "What are the consequences for a company that provides false employment information?", "d7947ff9-c5a1-430f-a50d-48369fe5d238": "What is the National Science Foundation's program on Fairness in Artificial Intelligence?", "da12cd0e-8846-4a5c-84a5-84fc2b21b168": "How does the National Science Foundation support research in explainable AI?", "aad8a5bb-bb98-4c8c-942a-60d55b2f3a3a": "What are the goals of the National Science Foundation\u2019s program on Fairness in Artificial Intelligence?", "62e9df72-50c1-414a-85e4-ec1b02cc7b3a": "Why is explainable AI important in the context of fairness in artificial intelligence?", "e688d828-a12e-4b21-a574-d41223a895e4": "What kind of research foundations are being developed for explainable AI?", "d9a5744c-7e1e-4f1c-bfe4-9ccafe96cd82": "How does the National Science Foundation define fairness in artificial intelligence?", "bc9af756-894b-4f0e-936f-fc6f94db6b7b": "What are some examples of projects funded by the National Science Foundation\u2019s program on Fairness in Artificial Intelligence?", "c24ff655-ee05-4769-96a1-1598937bb5d5": "How can explainable AI contribute to the development of fair AI systems?", "6c682518-b844-4763-a6a6-0c784122c7e9": "What challenges are associated with creating explainable AI?", "02b62987-5164-4ddb-b1ea-ea1c5ae2bfa2": "How does the National Science Foundation\u2019s program on Fairness in Artificial Intelligence impact the development of AI technologies?", "9d1e0807-b733-47c7-aab7-6db6d469a140": "What are the legal limitations on reusing sensitive data in domains like finance, employment, and housing?", "11e017f1-450d-484e-80d0-fd445c1441ff": "How can extra oversight ensure the safety and efficacy of sensitive data reuse?", "09d9a596-3e41-43ef-83c1-590975022445": "What are the benefits and risks of reusing criminal data for civil legal matters?", "1845dce6-8092-4224-b27c-4416e17dad7c": "What measures can be implemented to mitigate the risks associated with sensitive data reuse?", "aa8a4f38-7a8e-4236-a7d0-6066bfef485e": "How should sensitive data be labeled to identify contexts for limited reuse?", "31b0b161-5d09-4e06-8df0-3cd3719087e0": "In what ways can aggregated datasets replace individual-level sensitive data?", "eba60a78-3558-4289-b6c4-c260c5ddd08d": "What are the requirements for demonstrating the safety and effectiveness of automated systems?", "3e075c52-14a9-4ce8-a841-7081ebf2773f": "How can automated systems be designed to allow for independent evaluation?", "76205606-28f9-4aa8-b954-f88b34b55c55": "What are the potential benefits of private sector use of sensitive domain data?", "da56e9b1-2461-4b61-9841-d2518ab0182c": "What are the criteria for legally authorizing the reuse of sensitive data in other contexts?", "9c269a12-6410-4ffb-b5cc-7e74a8f47bd2": "What are the National Artificial Intelligence Research Institutes funded by the National Science Foundation?", "b2d39dd5-4224-4f84-9380-d8ed71a03606": "How can I apply for funding opportunities in Cyber-Physical Systems through the NSF?", "607bb0c5-d732-4661-89d2-8c8968dc8954": "What is the Secure and Trustworthy Cyberspace (SaTC) program by the NSF?", "137febbc-d1a5-48ce-ae7b-8fbe207161b9": "What are the objectives of the Formal Methods in the Field (FMiTF) program by the NSF?", "0dad671a-1be1-4246-86ac-e3a70cf4fa38": "How does the NSF's Designing Accountable Software Systems (DASS) program work?", "5e1f7127-82d6-481e-b40c-00469b2a02f1": "Where can I find more information about the National Artificial Intelligence Research Institutes?", "ff09d5ba-b0ce-431f-a9e6-6443aef1d947": "What types of projects are eligible for funding under the Cyber-Physical Systems program by the NSF?", "ab935b74-ec3e-4c32-9554-a212ec3eac3e": "What are the key focus areas of the Secure and Trustworthy Cyberspace initiative by the NSF?", "77e27420-8614-491c-870b-1f18f4bc11ea": "How does the Formal Methods in the Field program contribute to advancements in technology?", "5c9a5427-5b74-4e51-97bf-ff3fe62e0773": "What are the requirements for submitting a proposal to the Designing Accountable Software Systems program by the NSF?", "ff62ad50-d65f-41bd-aee2-94ee02783edd": "What are the potential risks of automating HR functions like performance evaluations?", "fae2091d-78df-4aeb-84f0-c4796d91062a": "How can errors in automated medical systems impact patient care?", "1174d0d9-9f4e-48c8-b445-47e1b63e081e": "What steps can be taken to ensure automated systems in healthcare do not deny necessary treatments?", "a7594475-953f-4c1b-ba9b-7e06027df340": "How do automated HR systems handle employee appeals or disputes?", "38758d4b-c5d6-4e3f-a96c-ca37a83ffa88": "What are the ethical considerations of using automated systems to make employment decisions?", "8ff16b60-4549-4d28-a5f8-d09dc421e48d": "How can companies ensure fairness when using automated systems for performance evaluations?", "1e5dacbd-129e-4493-b540-f6526c84911f": "What are some examples of automated systems causing harm in the workplace?", "23c3efca-d4e8-49c3-b686-8b432530b4f6": "How can employees protect themselves from errors in automated HR systems?", "70b624ad-4e3d-49ea-b622-028680721234": "What role should human oversight play in automated decision-making systems?", "53b65fc2-c35d-42d3-bd12-9411f43f20fe": "How can organizations balance efficiency and fairness when implementing automated systems?", "79527f3f-64a1-47c2-8080-526e87d6de07": "What are some novel methods for measuring GAI-related risks in content provenance?", "6ecf170a-bccc-40bf-a94b-cdee16acc4b6": "How can we evaluate the effectiveness of technologies in detecting offensive cyber activities?", "2a759b35-2e4f-4be9-b7cd-da0671a48a2f": "What are the challenges in maintaining information integrity while assessing GAI-related risks?", "05a5cf4b-6f21-4f0d-a726-782a1b5f0055": "How do current technologies measure the risks associated with CBRN information or capabilities?", "7464ee39-be9e-4591-9805-942ced6f3320": "What strategies can be employed to ensure AI models produce factually accurate outputs?", "623fdaff-f939-45e1-b135-53865f42ef09": "How can harmful bias and homogenization be mitigated in AI configurations?", "cec41fa7-ba2b-4f27-b233-a38c64d2be39": "What are the implications of AI-generated obscene, degrading, and/or abusive content?", "4e306b7e-b18d-408a-a7ff-ff6ef4bb5ef0": "How can we balance the need for reliable AI outputs with the prevention of harmful content?", "861cc15d-b987-4ae4-8da8-87774f1fd4a2": "What role do representative AI actors play in managing GAI-related risks?", "9e1b0b50-9c70-4f88-b9e9-1bce21f959c9": "How can we improve the reliability of AI models in the context of offensive cyber threats?", "312e6911-48ad-414d-b366-80e0cca7c405": "What are the risks associated with increased attack surfaces for targeted cyberattacks on AI systems?", "822e0d55-b6d6-4fd6-b0aa-970bda988794": "How can the availability, confidentiality, or integrity of training data be compromised in cyberattacks?", "b39af8a9-fdbb-4957-b79a-1ff8ff6bebfd": "What are the potential intellectual property issues related to the replication of copyrighted or trademarked content using AI?", "789e4c72-791b-4dea-8f9f-92ecbfb598d6": "How does AI ease the exposure of trade secrets and what are the implications?", "f4b466db-87c7-4ab8-8d5b-cf86ef2d84e5": "What are the dangers of AI in producing obscene, degrading, and/or abusive content?", "e5767722-faaf-40d4-a45c-7a1913c24ca6": "How can synthetic child sexual abuse material (CSAM) be generated using AI, and what are the risks?", "e81ed8e0-0b64-4bec-b518-b4f8140f20b6": "What are nonconsensual intimate images (NCII) and how can AI contribute to their creation and distribution?", "583946a4-f8da-4e37-9457-b91e6a6ee12b": "What challenges arise from the non-transparent integration of third-party components in AI systems?", "63097304-7442-4cef-88eb-325c2ccf4638": "How can improperly obtained data affect the integrity of AI systems?", "e17ed95d-b504-4234-b183-12b62f58b109": "What are the risks associated with improper supplier vetting in the AI value chain?", "3f995ade-9508-4868-9f0c-834fe1fedd5d": "What are the key factors considered in health insurance risk assessments?", "faaf64ee-40f7-4b25-9728-d640745e0a90": "How do wearable technologies contribute to wellness apps?", "03221c60-4936-437c-ad35-65b5cb1935c1": "What algorithms are used in determining access to financial systems?", "d909e267-78c1-4718-abc3-4c5eacc1c332": "How do credit scoring systems impact loan allocation?", "4953d062-f6dc-4435-8edf-ed0f4e8baf36": "What role do automated interest rate determinations play in financial systems?", "a1f064d0-fe0e-4009-ad4e-da914ad74ab1": "How do insurance algorithms assess drug addiction risks?", "90c10536-7c1f-4080-96dc-6558f5fb05e1": "What are the ethical concerns surrounding insurance care allocation algorithms?", "9d896814-0ecb-4f79-9aee-8558f5e95807": "How do financial algorithms apply penalties like wage garnishment?", "9fba01ca-f4ac-40a5-845e-7e7798ee3c4a": "What is the impact of underwriting algorithms on health insurance costs?", "9c45750d-00eb-4f95-b273-e109238629c4": "How do wellness apps integrate with wearable technologies to improve health outcomes?", "9dfeb337-4771-4307-8f63-5a084befadf1": "What are the key steps in ensuring automated systems are safe and effective?", "5db1d569-a143-4b95-a780-abd6a626429c": "How can diverse community consultation improve the safety of automated systems?", "36cb7b8b-ec8a-4240-932d-f4d013ec010c": "What is the importance of pre-deployment testing for automated systems?", "4e1b4a56-946d-4b57-97a8-a7e7043aafbd": "How do you identify and mitigate risks in automated systems?", "ba44fb84-6b28-4604-b4cc-4e0cb6156a75": "Why is ongoing monitoring crucial for the safety of automated systems?", "709b6157-47f9-4f13-b778-73ea7b5cd999": "What should be done if an automated system is found to be unsafe after deployment?", "b831d5ce-83d9-44cc-8451-30165592bc6a": "How can automated systems be designed to prevent unintended harm?", "48591766-4e73-4127-9689-ac3ec34938b0": "What role do domain-specific standards play in the development of automated systems?", "70259660-a0cb-4dad-9af2-f1552f5c5405": "How can stakeholders contribute to the development of safe automated systems?", "5da3d175-a205-4f12-9bc7-b0694cc97663": "What measures can be taken to ensure automated systems do not endanger community safety?", "ba9a37a2-ff2b-496c-b75a-bea68b9b4d20": "What is the main argument presented by Darshali A Vyas et al in their article on race correction in clinical algorithms?", "77397e84-ee4c-4691-9eea-95549a0f45b1": "How does the Executive Order on Advancing Racial Equity define 'equity' and 'underserved communities'?", "c4c9deb3-0d0c-4b68-a899-07733f551dce": "What are some of the proposals offered by various organizations for designing algorithmic impact assessments?", "2af51b13-b8e3-4522-b662-cbd6f11fa87c": "Can you provide a summary of the article \"Hidden in Plain Sight \u2013 Reconsidering the Use of Race Correction in Clinical Algorithms\"?", "203b4219-e580-474c-8c70-588f4c24500f": "What is the significance of the Executive Order on Advancing Racial Equity and Support for Underserved Communities?", "c20f89ba-0198-4bb6-83d4-436159a5fecb": "Who are the authors of the report \"Assembling Accountability: Algorithmic Impact Assessment for the Public Interest\"?", "beea4d1a-fc3a-4f4c-b9fa-a3c73abf6041": "How does the New England Journal of Medicine article address the issue of race correction in clinical algorithms?", "b49c589c-51d8-4cdc-93e1-6d74c67948f7": "What are the key points discussed in Section 2 of the Executive Order on Advancing Racial Equity?", "71a79413-b607-4093-91a6-cedb011b3b57": "What is the role of Data & Society in the context of algorithmic impact assessments?", "89504f33-70c7-44e7-acee-ad69679cf410": "How can one access the full text of the article \"Hidden in Plain Sight \u2013 Reconsidering the Use of Race Correction in Clinical Algorithms\"?", "abccae6f-f846-42a0-ae0c-e7553ed3bcf9": "How do automated systems threaten the rights of the American public?", "1fe6aca7-365b-48fd-840b-21cafc375daa": "In what ways have technology and data been used to limit opportunities and access to resources?", "35d9684c-2d7c-462a-888f-687e5c1743d1": "What are some documented problems with systems designed to help with patient care?", "f3af11a5-dee0-41ce-9ec8-2befaa2c4b82": "How do algorithms in hiring and credit decisions reproduce existing inequities?", "f6a36e8c-7547-40e4-ab95-7e3640f7d9b0": "What are the potential harms of unchecked social media data collection?", "fd9e9dca-1dea-4621-9bc7-a973840546ac": "How can automated systems undermine privacy and track activity without consent?", "86f7a45a-b678-4ca0-9231-2659114e545f": "What are some examples of bias and discrimination embedded in automated systems?", "2807de8e-8f7e-4f04-9938-9baaf2698edf": "How can the harmful outcomes of automated systems be prevented?", "82042ccd-7ba3-45de-be88-dd7926ed2fa7": "What are the challenges posed to democracy by the use of technology and data?", "3f49ac16-f537-46ae-ac21-b82e769969c2": "How can we ensure that automated systems are safe and effective?", "ad65e418-3028-4caf-8dda-28cfc82b6e65": "What is generative AI and how does it create synthetic content?", "8a14b547-517f-4f3e-b937-5205af638a25": "How are foundation models related to generative AI?", "e996fac3-d63c-4fec-bdd3-645e2052ce20": "What are dual-use foundation models as defined by EO 14110?", "2fff0be9-8a59-42da-9d8f-9bf01c03c3a5": "What is the significance of self-supervision in training AI models?", "ff4c692f-1cfe-467e-9925-e44da6bac8a6": "How many parameters do dual-use foundation models typically contain?", "77dc336f-61a8-4597-8e60-7d6a28b2750a": "What is the role of the National Institute of Standards and Technology (NIST) in developing resources for generative AI?", "992eb21e-18d3-431b-82ee-6c8845b57e84": "What is the AI RMF mentioned in the context of EO 14110?", "754a638c-4963-45fc-b877-baf962d095cd": "How does EO 14110 impact the development of generative AI models?", "7837880f-1aaa-4a74-a820-8b848142069f": "What types of digital content can generative AI produce?", "83e7fc2a-5676-4a2d-9b53-fe9dc216d52d": "What is the purpose of NIST AI 100\u20131 in relation to generative AI?", "da4c933d-5790-469e-8c9c-08a80e387962": "What are the best practices for obtaining informed consent in feedback activities?", "b85c7dc7-598b-4f88-a94c-845f6bfd5cec": "How can organizations ensure they are following human subjects research requirements?", "e973c5e2-042d-4219-9ab3-9357946c544f": "What are effective methods for compensating subjects in research studies?", "3750af0a-f7d0-4923-8075-a53beefd9441": "How can feedback activities improve system documentation?", "5d13e05d-ba03-4e6f-8b60-c6aba8ce6ab7": "What role does informed consent play in enhancing debugging practices?", "de4376c0-1172-4302-94d6-b2308ff91785": "What are the key components of human subjects research requirements?", "35b33780-ab39-406c-a73a-5af626b322df": "How can organizations balance subject compensation with ethical considerations?", "2da11dfc-4bf8-44c6-b11b-bb784841bfa5": "What strategies can be used to integrate feedback into decision-making processes?", "9c4e8887-02a6-46a2-855f-dd49cdc870b1": "How does subject compensation impact the quality of feedback received?", "c3b4dffe-1622-4cde-b35d-0384b5fe8b90": "What are the ethical considerations when implementing feedback activities in organizations?", "ee6a09cb-bc0e-4337-94c7-9f98ea4e5ee3": "What is the role of the Software & Information Industry Association in the tech industry?", "b23e7b4f-f930-4e25-9deb-614c17f86250": "How is Stephanie Dinkins contributing to the Future Histories Studio at Stony Brook University?", "40981bd2-f90d-4edb-a063-87981f5fad81": "What initiatives does TechNet support in the technology sector?", "dc149b68-bca5-4e5e-912d-d7be8e71e054": "How does the Alliance for Media Arts and Culture collaborate with MIT Open Documentary Lab and Co-Creation Studio?", "d5696d57-18c7-4cce-961f-32fcfcc85df9": "What are the main objectives of the International Brotherhood of Teamsters in relation to technology?", "61c0fd7b-0e62-4a0b-9328-ce82b486e261": "How does the Leadership Conference on Civil and Human Rights engage with technology policy?", "6c72d4a0-152b-4b01-afbc-bbec5f6e4b2f": "What is Thorn's mission in the context of technology and human rights?", "8478afea-bd7b-47d0-8e3b-4ade048996f5": "What is the US Chamber of Commerce\u2019s Technology Engagement Center's stance on emerging technologies?", "f855fd92-7288-4188-bcc8-9e9d6658cb4c": "How does the University of Pittsburgh Undergraduate Student Collaborative contribute to tech research?", "9363da58-a7e3-417b-8b3f-f36f8cc73b2e": "What is the focus of the XR Safety Initiative in the realm of extended reality technologies?", "c7a8a9e5-f64e-4726-a954-752b9bfa62d8": "What are the key components of establishing policies, procedures, and processes for oversight functions in the GAI lifecycle?", "d4f81aa1-536d-4926-ab69-7bf7a5842d58": "How can organizations ensure effective oversight from problem formulation to system decommission in AI projects?", "6ec865eb-0ef1-4d0f-a574-83c9bcb8e93a": "What roles do senior leadership, legal, and compliance teams play in the oversight of AI systems?", "10621bd5-3020-4fd2-8090-ee0a5a365370": "What are the main tasks involved in AI deployment, design, development, operation, and monitoring?", "8a985b69-031f-42d8-b2fe-c3c003206f80": "How should organizational teams document the risks and potential impacts of AI technology?", "d55c5fb5-723c-4f9c-b466-fbe12ec885fb": "What are the best practices for communicating the impacts of AI technology more broadly within an organization?", "87cdba18-ff3f-490d-97f4-3679a7ab585f": "Why is it important to establish terms of use and terms of service for GAI systems?", "d1a0a484-a7da-40ad-b494-d8fba48d4cc8": "How can organizations address risks related to intellectual property, dangerous content, and abusive content in AI systems?", "d95e85a2-6162-47ed-bc37-6e7bf3a90580": "What is the significance of including relevant AI actors in the GAI system risk identification process?", "c8a9e9a9-ec2d-4b97-a906-cc52fec59a02": "How can organizations verify the downstream impacts of GAI systems, especially when using third-party components?", "050deb8f-2d38-4b19-abce-8d42367a3897": "Why is data privacy considered a foundational principle in modern business models?", "e4ead17c-b324-49df-9768-46f3947244e3": "How do companies use data collection to build individual profiles?", "2e67721a-7a35-4081-a350-c289a7b797bb": "What are some examples of how data privacy can protect against surveillance?", "ffcf5bdb-0269-4d50-a0fc-e0f5cd5d6cf0": "How do government agencies utilize data collection for law enforcement purposes?", "805ffbfc-6fde-4a13-af7b-92d425ac5ff2": "What are the potential risks of companies tracking the behavior of the public?", "b7173668-8845-4ff4-a8bf-82d58d4670cf": "How does data privacy impact the use of automated systems?", "f786de70-6184-40ef-bcd4-dbc7e418ec99": "Why is data sharing and reuse a concern for data privacy?", "dd27c837-114d-4771-b470-8edadae0e05e": "What technologies are used by law enforcement to enhance surveillance capabilities?", "f986530c-70e1-4674-a4a3-d2c2fa9f3c48": "How can data privacy principles help protect individual freedoms?", "d6f7085a-3f9d-41d4-a61c-de22f689406b": "What are the implications of expanded surveillance capabilities on data privacy?", "69e6c713-aa1a-4a39-b3b8-5a9c3d192560": "What are the potential risks of using the same algorithm in employment and lending decisions?", "b9ded17f-ece6-4a9a-83db-3d1c1ce214b6": "How can correlated failures in AI systems affect the labor market?", "118c93d6-f413-43f8-9eb0-5322555c7701": "What are some examples of unexpected shocks that could impact AI decision-making systems?", "378ad341-25e9-4a99-82e3-2d3b20c79183": "How might the use of AI in employment decisions influence job security?", "3e2c5916-b757-4486-950c-91ae7c275a13": "What are the projected impacts of AI on the workforce according to recent studies?", "604a356d-af0d-4af6-ae0a-2362a5215a8e": "Why are fewer studies examining the impact of General Artificial Intelligence (GAI) on the labor market?", "70c87bb5-4afa-45a5-a192-51c40b99026a": "How are employees and employers reacting to the potential disruption caused by GAI?", "6246f0e9-a17a-44cc-99f0-30db573afe58": "What measures can be taken to mitigate the risks of correlated failures in AI systems?", "e8751b61-9fee-43e4-99ce-3c16b3f81807": "How do industry surveys reflect the concerns of employees regarding AI in the workplace?", "5e9a5f8c-765c-428a-a81c-62574fa7770e": "What are the implications of multiple actors relying on the same algorithm in lending decisions?", "8fd0cff6-85a3-4dba-abda-91cdb762349a": "What are the environmental impacts of high compute resource utilization in training AI models?", "5cd61fe8-2405-49cc-93bf-9ad9a656887b": "How does the use of non-representative training data lead to performance disparities between sub-groups or languages in AI models?", "05c34830-40b3-4fe5-9695-44a5b4ddb868": "In what ways can AI models amplify historical, societal, and systemic biases?", "a5419133-45d4-4c99-9e69-273c2f4058cb": "What are the potential adverse effects on ecosystems due to the high compute resource utilization in AI operations?", "ef754679-4152-42b3-a445-a7b434eebddd": "How can undesired homogeneity in AI model outputs lead to ill-founded decision-making?", "832fc999-15fc-4b42-9654-2177da3e8011": "What is algorithmic aversion, and how does it affect human-AI interactions?", "15f4dc2c-db75-4f81-a8cf-149786ddd579": "How can over-reliance on AI systems impact human decision-making?", "e4d93cdb-eeac-42f2-9ae7-a646b63d3e7c": "What are the risks of anthropomorphizing AI systems in human-AI configurations?", "ec919833-ed12-42e2-9839-457e42e0568f": "How does automation bias manifest in interactions between humans and AI systems?", "3dd2b1f9-4fae-4eb2-8ead-27f913346a48": "What measures can be taken to mitigate the amplification of harmful biases in AI models?", "8eb81f04-21af-4b5e-b8bc-5dd9ddf12a19": "What are the most effective metrics for measuring the effectiveness of data provenance in security?", "7c17281e-f1f1-4b05-a681-4f085c6d7b0d": "How can user feedback be analyzed to improve understanding of content authenticity?", "57455a6a-fdc6-40a8-810e-ce67e69a459c": "What are the common concerns users have regarding content provenance and authenticity?", "cf632d03-f1b6-4eac-82f3-c0eeb5c6a7b7": "How do watermarking and cryptographic signatures contribute to content authentication?", "cbedd326-5ed2-4e1e-8b52-9599a3a3ef5a": "What methods can be used to measure the reliability of content authentication techniques?", "e199ba84-6648-43a1-8075-da2bccf8135f": "How can the rate of false positives and false negatives in content provenance be evaluated?", "42cbc313-aee1-442e-9707-f8a4cda1b659": "What is the importance of model integrity verification in content provenance?", "f174193d-52c9-4008-9de8-a830c2ab8659": "How does the number of unauthorized access attempts reflect the effectiveness of security measures?", "447c315e-07a6-4f78-bbc8-b783bcbf758a": "What role do digital fingerprints play in ensuring information integrity?", "3b87a3bd-9810-4130-9a77-016a6194b4af": "How can access controls and conformity assessments support content provenance techniques?", "6d54ebc0-4ddd-48e8-ac25-0f3b41399d6e": "How can I find out if an automated system is being used to make decisions that impact me?", "6126ed1b-d1c5-4b25-8dc0-506e2f075845": "What kind of documentation should designers and developers provide for automated systems?", "4e4463eb-454a-4f19-83a0-710cb38be7ed": "Who is responsible for maintaining and updating the notice about the use of automated systems?", "0875bf4d-94e5-4bc1-b7bd-a0618b496482": "How can I understand the role of automation in the outcomes that affect me?", "c4015147-e2ab-4686-b432-838d5504d8f4": "What should I do if I notice significant changes in the functionality of an automated system that impacts me?", "8f24b9bc-1fb7-45c6-9731-cce8c51d4bd2": "How can I get a clear explanation of how an automated system determined an outcome that affects me?", "ce820b6d-3bab-4138-b21e-3c94a05b131b": "What does it mean for an explanation from an automated system to be technically valid and meaningful?", "415c6ec6-f835-405b-bbcc-1e0ed22ea3e8": "How often should the notice about the use of automated systems be updated?", "7f81c5f0-eec3-40da-88f8-3a10b20297e9": "What should be included in the plain language documentation for automated systems?", "9fa50cab-5394-4aec-916b-5e47d663a1fb": "How can I know if an automated system is not the sole input determining an outcome that impacts me?", "ca32a8ce-0352-48e1-b059-40b626805076": "What are the key expectations for automated systems to ensure they are free from algorithmic discrimination?", "c303bf4a-19e7-45a2-a17a-3a2cbb7c518b": "How can automated systems be tested to ensure they do not exhibit algorithmic discrimination?", "3248fe7a-931f-43b9-a807-be31c9cba85c": "What does it mean to design automated systems to ensure equity, broadly construed?", "2541eb4b-0325-4e71-a932-aa9c2240ff5f": "How do existing anti-discrimination laws apply to algorithmic discrimination in automated systems?", "f12e1350-6a69-48ac-9443-7bdf2fe8a551": "What proactive technical steps can be taken to reinforce legal protections against algorithmic discrimination?", "d6014c0b-7667-4f4c-8c17-3d64e472ae45": "What policy steps can be taken to ensure equity for underserved communities in the context of automated systems?", "caf9948f-d28b-4304-b2a9-7b8099401937": "How can protections against algorithmic discrimination be integrated throughout the design process of automated systems?", "aa086a0f-d233-467a-a4d1-307fd662e78e": "What are some examples of sectors or contexts where additional technical standards for automated systems might be needed?", "1ed1b3ca-848b-43f1-9413-11eb5e39d5cf": "How can the development of automated systems extend beyond legal protections to ensure equity?", "30377abc-d189-4360-8bda-75aa620ba449": "What are the potential challenges in ensuring automated systems are free from algorithmic discrimination?", "5c610b06-5ba5-458d-96b7-23660211a937": "What are the main reasons people might prefer not to use an automated system?", "f2bd0bb9-78d0-463e-9e19-206aca0bdab8": "How can automated systems lead to unintended outcomes?", "9a107472-63da-4150-b45b-9ee5a0f906d1": "In what ways can automated systems reinforce bias?", "38c99ae1-7267-4154-86cf-10c475dfab87": "Why might automated systems be considered inaccessible to some people?", "1fdcc7e8-a5a5-4068-84b8-8860d4fae85a": "What are the potential inconveniences of using automated systems?", "fabe7ff6-7665-4ee8-9e67-18e690b5c99a": "How can the replacement of paper or manual processes by automated systems affect people?", "4f7e6756-8436-4690-bb5d-8dd5e1dc19d3": "What challenges do people face when trying to reach a human decision-maker after using an automated system?", "5c5aab7b-a9cb-4e35-9853-670e020c2aff": "How does the lack of human reconsideration impact access to rights and opportunities?", "110bdd1f-0551-477c-8ad2-fe3771e46d11": "What are some examples of delayed access caused by reliance on automated systems?", "d584dcfb-bb0c-406b-813d-7ed4f0a60630": "How can the principle of human alternatives and reconsideration protect against the flaws of automated systems?", "6f2db439-4624-4bad-9cfd-f22bb387bf87": "What are the processes for identifying emergent GAI system risks?", "71f13148-d4ab-498f-ad28-d437b31d648c": "How can external AI actors be consulted to identify GAI system risks?", "915bdb26-45ca-41a4-a5c4-026de9664a4d": "What is the role of human-AI configuration in managing GAI risks?", "3f1a9bb4-4f35-4ca2-8c26-6d0fe7f70c0c": "How does confabulation impact GAI systems?", "562ffbd0-42ee-4f9d-a966-3fb61c224bdf": "What tasks are AI actors responsible for in the context of GAI risks?", "6ca39f09-5de1-4418-b8c2-8326d872cf08": "How are feedback processes for end users integrated into AI system evaluation metrics?", "47ffecb1-e88b-42b1-aff7-8feb6a9ed56a": "What are the potential impacts of AI-generated content on different social, economic, and cultural groups?", "7d104609-63ee-48d6-a2e0-efeef372ac3e": "How can harmful bias and homogenization be mitigated in AI systems?", "5d2f156d-e171-4aef-b290-48f2732097ef": "What methods can be used to study end users' perceptions of GAI content?", "4fed3fad-982b-4a59-8f81-d2bad9cb3670": "How can the integrity of information in GAI content be assessed?", "a063b509-7e7c-4889-9a12-8706969e2004": "What are the best practices for AI testing and incident identification in an organization?", "1c222789-364a-4695-8083-7136f31694e0": "How can organizations measure the effectiveness of content provenance methodologies like cryptography and watermarking?", "350eaf07-f263-4761-aa56-cb44ce3ea3d1": "What criteria should be included in an AI system incident report?", "aa8cb59a-2ee8-4000-a39f-520d7f5ab6b3": "How can AI deployment and design be integrated into the value chain and component integration process?", "55a7d693-1922-4dd6-8840-e8017fd5d458": "What organizational practices are essential for effective AI operation and monitoring?", "088e1b4b-78b3-4dca-a354-7414b72febcc": "How can information integrity be maintained through content provenance methodologies?", "4979e972-3cfb-4a75-b2e2-92fc4640b928": "What are the key components of an effective AI incident reporting system?", "826ee64d-fab2-40f1-a6c0-911a7a68785a": "How can organizations ensure information security in AI systems?", "4216a185-7dcb-478c-b3cb-1cf32485b29f": "What tasks are involved in AI development and how do they impact the overall AI governance?", "1f50cb43-e63f-46c6-8a05-c03b8d9f604c": "How can organizations establish effective policies for AI system incident reporting?"} \ No newline at end of file diff --git a/Tasks/Task 4/val_dataset (2).jsonl b/Tasks/Task 4/val_dataset (2).jsonl new file mode 100644 index 0000000000000000000000000000000000000000..199c0acf8f8497d6f23737e4ee8ad919f69a0cca --- /dev/null +++ b/Tasks/Task 4/val_dataset (2).jsonl @@ -0,0 +1 @@ +{"questions": {"61c5d3ec-11af-4a36-a028-e9e22afb5a8f": "What are the five principles outlined in the Blueprint for an AI Bill of Rights?", "662911a9-7407-4a24-95ff-2350dde354be": "How can communities and industry implement the practices suggested in the Blueprint for an AI Bill of Rights?", "09ded63e-b364-42e7-9677-e1dfa4932b9b": "What are the best practices for providing independent evaluators access to automated systems while ensuring privacy and security?", "6b539180-33d5-4cd2-abf5-63cbe6178e6a": "How can organizations ensure that evaluator access to automated systems remains truly independent and cannot be revoked without reasonable justification?", "f35c9f92-67ac-4772-9dab-6cf2ae32812f": "What are the legal requirements for providing notice when making a video recording of someone?", "d0edffef-580d-4890-a6dd-e08925fadd27": "How are companies and researchers improving automated systems to explain decisions that impact consumers?", "4a36f5cd-0f9d-42ba-bd8e-d0eaf0af2d52": "How do advertisement delivery systems reinforce racial and gender stereotypes?", "1c6de01d-b59d-4421-9339-0e501b4fd2b9": "What are the issues faced by transgender travelers with TSA body scanners at airport checkpoints?", "155db437-082c-44f4-8751-960146c3512c": "What are the five principles outlined in the Blueprint for an AI Bill of Rights?", "95cae333-a114-41e8-98f5-10619377f6bf": "How can organizations apply the Blueprint for an AI Bill of Rights to protect civil rights and privacy?", "077e8ee5-5768-4967-b8ed-891c6cc0085d": "What are the benefits of having a human fallback mechanism in automated systems?", "8edf6c51-407d-478c-832a-ef103ea3709e": "How do automated signature matching systems impact voters with mental or physical disabilities?", "7058b177-27f4-4d6b-a478-176ead46f325": "What are the best practices for documenting the sources and types of training data in AI models?", "1e48abdd-a664-4c7a-8f19-151ca61e5006": "How can user feedback be effectively integrated into system updates to address problematic content?", "e5aba341-abc2-4965-a224-fa10823f4d2f": "What is the two-part test used in the AI Bill of Rights framework to determine which systems are in scope?", "23c3711f-c55b-49e5-9936-22d6bfc010af": "How does the AI Bill of Rights framework ensure that automated systems do not negatively impact the American public's rights and access to critical resources?", "08a12dd0-5dd7-4f87-8913-d86a9cc2c8b7": "What are adversarial role-playing exercises and how do they help in identifying failure modes in GAI systems?", "7d2b3bbe-6d0b-470d-b85d-d0c636ac4354": "How can profiling threats and negative impacts improve the security of GAI systems?", "c385b92d-1c01-48ae-be4c-f6b42b5e6af6": "What are the potential negative impacts of school surveillance on students via laptops?", "b4286477-40f0-46b8-bba8-4fe204b0dafa": "How does \"Bossware\" affect the health of employees according to the Center for Democracy & Technology report?", "6c98dd15-2a73-4c66-8a6a-c578c67a2434": "How can employers ensure their use of AI in hiring complies with the Americans with Disabilities Act (ADA)?", "00ab3a02-dffb-482b-ad10-3cab6ad77520": "What are the potential risks of using healthcare algorithms that rely on past medical costs to predict future needs?", "510ed741-6a36-4d13-a7dc-6a42262136be": "What are some effective context-based measures to identify new impacts of GAI systems?", "8f74dbe1-c3ed-48ca-9635-d701d26e829a": "How can regular engagements with AI Actors help in evaluating unanticipated impacts of GAI systems?", "3809c393-b89e-494c-b529-c65e601c1544": "What are acceptable use policies for GAI interfaces and how do they determine the types of queries GAI applications should refuse to respond to?", "edb9b7b1-11c1-421c-a07f-7abe3d6e7c21": "How can organizations establish effective user feedback mechanisms for GAI systems, and what should these mechanisms include?", "5a48e740-85f0-48c7-b0c7-6247c384f052": "How often should adversarial testing be conducted to effectively map and measure GAI risks?", "d6567db0-b18c-4dcb-b80c-146f2047bc13": "What are the benefits of evaluating GAI system performance in real-world scenarios compared to controlled testing environments?", "59a37c01-7bac-4f9d-980f-48f5489e61e6": "What are the common statistics reported about who chooses the human alternative in automated systems?", "e24a71f0-8b86-461a-92bd-fa6cef7ca33b": "How often should reports on the accessibility, timeliness, and effectiveness of human consideration and fallback be made public?", "63dcc302-d64d-47f5-a304-a64d4d6642b4": "What are some examples of companies that have successfully implemented bias testing in their product quality assessment?", "9b9b4805-12cb-453d-a3f4-ddbb20679c39": "How are federal government agencies developing standards to prevent algorithmic discrimination?", "7b3d457a-d0bf-4b13-b59c-df184af98f08": "What are some common protections against unlawful surveillance and violations of privacy in both public and private sectors?", "9473baea-32cd-4147-a547-5d45b0daa757": "How can individuals ensure equitable access to education, housing, and employment opportunities?", "d4388801-831e-45e0-bf67-b67974027277": "What are the key principles outlined in the AI Bill of Rights?", "d4107956-2806-4098-a79e-e753cab1bf82": "How can the AI Bill of Rights be practically implemented in technical systems?", "829774bb-4770-46cf-9f1b-86f51e7b6679": "How can you ensure the data used in automated systems is of high quality and relevant to the task?", "79c355b3-3945-402d-9d15-e460689ba635": "What methods can be employed to measure and limit errors from data entry in automated systems?", "e558dbd7-ca81-4070-9777-49636694d674": "What are some reasons why certain risks cannot be measured quantitatively in AI systems?", "e1ce22f6-cad0-4bbe-87ae-5222158a4393": "How can organizations involve independent assessors and domain experts in the regular assessment of AI systems?", "ae84398b-1649-4cce-8fa2-6295c80f7ec9": "What are the risks associated with confabulated content in healthcare applications using GAI?", "648a7032-05c8-45c2-a7bb-2dca8fa9ffd0": "How can confabulated logic or citations from GAI systems mislead users?", "2b743770-5d66-4aa8-b9b4-c33adc78c1e3": "How can companies ethically use data to monitor employee performance without violating privacy?", "3b6f61ff-349d-4817-8c82-d064b9a71c86": "What are the legal implications of employers using surveillance data to intervene in employee discussions?", "f596fded-c16b-49cb-b400-734c65b185af": "What are the risks of using AI in high-stakes settings as highlighted by Pamela Wisniewski and Seny Kamara?", "1626655d-7f72-4d0a-9170-3abdc8ed86ec": "Why is it important to place trust in people rather than technologies when designing AI systems?", "cec6f35c-1b45-4d56-8c2f-aef7bc860a01": "How can organizations ensure that their demographic assessments are inclusive of all protected classifications?", "12aca964-2112-4b36-8a40-14ab1512ac75": "What are the best practices for separating demographic data used for disparity assessment from data used in automated systems?", "53a48063-f4fb-482f-bd70-36915ec63956": "What are some emerging technologies being used to improve social welfare systems?", "7fdbbfed-73aa-45a8-9f1c-58ec2c0f3912": "How can digital welfare systems impact life chances according to experts like Christiaan van Veen?", "0ed0fb9c-47c4-4c7c-a5ae-d7e3a35670a1": "What are some best practices for developers to ensure privacy by design in smartphone apps?", "88297ffa-b5ca-460c-81ed-a61975ab39ef": "How can developers make app permissions clear and use-specific for users?", "38409d77-4936-4266-a7f3-2d910d3bea91": "What are the privacy implications of using biometric identification technologies in New York schools?", "3d2d3a9e-a6a7-49f5-bdd8-5db95fc8b602": "What are the reporting requirements for employers who surveil employees during a labor dispute?", "ca685f83-ccd7-4a17-a31d-bfc648b58840": "What measures are included in the AI Bill of Rights to ensure automated systems are safe and effective?", "ce1fdffd-851d-463e-8f24-4596865b62dc": "How does the AI Bill of Rights propose to handle the risks and potential impacts of automated systems?", "1a82989c-3ead-4aea-9098-53d3dca7f9b7": "What are the potential downstream impacts of errors in third-party GAI components on system accuracy and robustness?", "a30ea710-3349-4357-8dcb-915f6c69f2da": "How can inaccuracies in test dataset labels affect the stability and robustness of GAI benchmarks?", "004b52ee-6a49-47d7-a4bd-77ec96fadc31": "What are the best practices for developing and updating GAI system incident response and recovery plans?", "a5ad1cc1-318a-4210-8838-22015d780344": "How can organizations ensure their response and recovery plans account for the entire GAI system value chain?", "f05e4729-18f1-4664-9f41-2ad997f9d726": "How can we assess the proportion of synthetic to non-synthetic training data in AI models?", "81c90ac3-caf0-4c9d-8e02-8c62d26a047e": "What are the best practices for documenting the environmental impacts of AI model development and deployment?", "0abf12fc-3e73-41e5-8594-5e2bb6ecdb24": "What are the primary considerations for organizations designing and developing GAI according to the GAI PWG consultation process?", "e3abf868-922a-42e7-8c5a-b1ff0a353d39": "How can governance principles and techniques be applied to manage risks in GAI systems?", "55c79cd5-dee3-4e43-b8a3-839028518379": "What are the key considerations for documenting the intended purposes and beneficial uses of an AI system?", "456333eb-689e-4896-b2d4-0cf136672c77": "How do internal vs external use and narrow vs broad application scope impact the identification of intended purposes for AI systems?", "8834b86c-b1b9-43d6-92e0-3c64ca09e854": "How can feedback from internal and external AI actors be used to assess the impact of AI-generated content?", "e84f1a90-e702-4594-84b8-5c5b67352195": "What are the benefits of using real-time auditing tools for tracking and validating the lineage and authenticity of AI-generated data?", "490b6ca7-059f-41fe-82ae-b8d2c3890cf1": "What are the main findings of Carlini et al (2024) regarding the vulnerabilities in production language models?", "59bed72b-bd80-47c3-bb57-08dd086ecf9d": "How does the study by Chandra et al (2023) propose to combat Chinese influence operations and disinformation?", "625e3e66-e1fc-4223-a201-e88b765f449e": "What is the role of the Electronic Privacy Information Center (EPIC) in AI policy and regulation?", "da4a10c9-db2a-45fa-bad5-b66ef842c023": "How does the Innocence Project utilize AI to support its mission?", "40ab1b55-bc53-4cae-8f7e-4657a5b2bdc2": "What is the role of the National Center for Missing & Exploited Children?", "46de7819-7250-4050-8bf9-4635a1a02f3e": "How does the New York Civil Liberties Union contribute to civil rights advocacy?", "6feae899-9900-454f-a64d-39e842af8c76": "How can AI tools be misused in the development of chemical or biological agents?", "36826afc-57e4-4d70-bc7e-4ca62e3e3e67": "What are the potential risks associated with the use of biological design tools (BDTs) in chemistry and biology?", "84440495-e768-4885-b78b-d8a0c17f3809": "How can expert AI red-teamers enhance the effectiveness of general public AI red-teamers?", "9c3a8107-d49c-4dc0-9f78-d71a506df892": "What are the benefits of using GAI-led red-teaming compared to human red-teamers alone?", "068d8bd2-9336-4e18-bd93-2199100e631f": "How can error ranges be calculated and included in explanations for decision-making systems?", "3138ca26-38b8-4e17-9b31-b38bc8a8eb4f": "What are the best practices for balancing usability and interface complexity when presenting decision-making information?", "095919bc-18fa-4316-b1e8-07572983b77b": "What are the potential benefits and drawbacks of using predictive policing in the criminal justice system?", "39406f17-a757-4201-91b5-284ba4ebbd39": "How can data-driven approaches be balanced with the need for community safety in criminal justice reform?", "2744b9cf-981d-42e5-aed3-bb8e5acb0b2e": "What are the reporting expectations for entities developing or using automated systems?", "798b53f4-f798-418a-abcd-6dd05f707c67": "How can the public access the Agency Inventories of AI Use Cases provided by the National Artificial Intelligence Initiative Office?", "d7fa2d65-26f8-4442-86f6-f1d6256e588a": "What are some effective methods for monitoring and assessing high-impact systems in qualitative user experience research?", "e50c31b3-bab1-4064-baa1-199c946d9789": "How can organizations ensure equity standards are maintained in algorithmic systems, and what steps should be taken if these standards are not met?", "644dcaa5-1731-43fe-b0f5-c6a4bc05564e": "What factors should be considered when updating or defining risk tiers for General Artificial Intelligence (GAI)?", "def43eb9-80b0-4ad2-9198-d84ecb89c720": "How can the psychological impacts of GAI, such as anthropomorphization and emotional entanglement, be mitigated?", "8495a23f-4bb7-47ac-8c54-58cf5675cdd7": "What are the best practices for establishing policies to manage risks related to rollover and fallback technologies in GAI systems?", "74ae51e9-63b3-48ce-9be7-4f88052d7bd6": "How can organizations ensure clear assignment of liability and responsibility in vendor contracts for GAI technologies?", "11cdd3ed-e09b-463d-9853-0be811073b75": "What are the best practices for ensuring the confidentiality of AI training data and model weights?", "9ca1ff0e-0cd9-4362-aca9-fd904077c845": "How can potential attack points in AI systems be identified and secured?", "8fe0054d-51ba-48c5-8cc5-259b2b96f535": "How can AI-powered cameras in delivery vans be improved to avoid incorrectly penalizing drivers?", "03b9f17b-0b61-401b-bc65-47d0655f31d8": "What are the common issues faced by companies using AI to monitor road safety habits of drivers?", "6d622041-fccf-4eb4-9a53-f7d7577856f8": "What are the differences in resource usage between AI training and inference?", "a1738003-3e17-48e7-86a2-1410bc0f1c07": "How can we verify the effectiveness of carbon capture programs for AI training?", "d15e0c10-378f-48a3-9a5c-be0c618106b4": "What protocols should be in place to ensure the safe deactivation of AI systems?", "7e7e2c28-ea80-4568-a71a-41966f9f117f": "What factors need to be considered when decommissioning AI systems to prevent data leakage and ensure security?", "57073541-fc8c-43cd-8b42-f9497eb501af": "What are the best practices for limiting access to sensitive data based on necessity and local control?", "92d9e36d-0fef-4b2e-b40d-ff2b800fcf10": "How should organizations report data security lapses or breaches involving sensitive data?", "6f7aa060-c19a-4614-83d2-134828a7e956": "What is the purpose of the email address ai-equity@ostpeopgov created by OSTP?", "6b95bc28-dbb4-408f-8c5b-f5b37073b6fd": "Where can I find the full responses to the OSTP's Request For Information (RFI) on biometric technologies?", "4776eaa1-b6f0-440c-a6be-923bbf49687d": "What are the practical steps to implement ethical principles in technology?", "acf74d86-1184-4092-8a1d-3ca58f5fe97a": "How can risk management be integrated into technological innovation to protect people from harm?", "2c1b02c6-1919-49ea-beff-165567d20b47": "What are the key capabilities needed for automated systems to help users make consent, access, and control decisions in a complex data ecosystem?", "2d15dfed-c66d-4fac-89dd-3aded02ec63e": "How can independent evaluations of data policies help ensure data privacy and user control in automated systems?", "e71beb7c-7564-4f2c-83f7-ec9bb3134847": "How can the rate of implementing recommendations from security checks and incidents be measured effectively?", "6c079fa0-60c3-4c8d-826a-2816c65d3ea0": "What are the best practices for performing AI red-teaming to assess resilience against various types of attacks?", "543e9bfb-b5f4-4247-89c8-41e0e7fb11a9": "What are the legal and regulatory requirements for reporting GAI incidents under HIPAA?", "8613b055-c817-4a59-84cf-1ae29a7c2269": "How does the NHTSA's 2022 autonomous vehicle crash reporting requirements impact AI deployment and monitoring?", "ce252388-c4d9-4968-aadf-218b47f609a5": "How do you document the justification for each data attribute in an automated system?", "f46088d7-1004-41cb-87c5-8a2b0bcdef59": "What are the best practices for ensuring that the use of high-dimensional data attributes does not violate applicable laws?", "b5f49997-5049-4865-9b5b-c18d880e2baf": "How can organizations adjust their governance regimes to effectively manage the risks associated with generative AI systems?", "eeb5acfd-3be2-4488-b45e-e0979bd5c855": "What are the key considerations for third-party governance across the AI value chain when dealing with generative AI?"}, "relevant_contexts": {"61c5d3ec-11af-4a36-a028-e9e22afb5a8f": ["80e81c8c-bb97-4604-bdef-dcc56813587a"], "662911a9-7407-4a24-95ff-2350dde354be": ["80e81c8c-bb97-4604-bdef-dcc56813587a"], "09ded63e-b364-42e7-9677-e1dfa4932b9b": ["d0a6097e-42c8-499f-8d6d-bcfae7f992d5"], "6b539180-33d5-4cd2-abf5-63cbe6178e6a": ["d0a6097e-42c8-499f-8d6d-bcfae7f992d5"], "f35c9f92-67ac-4772-9dab-6cf2ae32812f": ["51421b31-1a41-49da-a2c2-65df54ae93ce"], "d0edffef-580d-4890-a6dd-e08925fadd27": ["51421b31-1a41-49da-a2c2-65df54ae93ce"], "4a36f5cd-0f9d-42ba-bd8e-d0eaf0af2d52": ["758f783b-3fdc-4890-9de4-da3c035c1141"], "1c6de01d-b59d-4421-9339-0e501b4fd2b9": ["758f783b-3fdc-4890-9de4-da3c035c1141"], "155db437-082c-44f4-8751-960146c3512c": ["96838aa0-1bf7-4ae3-a8d7-5d093e9feb39"], "95cae333-a114-41e8-98f5-10619377f6bf": ["96838aa0-1bf7-4ae3-a8d7-5d093e9feb39"], "077e8ee5-5768-4967-b8ed-891c6cc0085d": ["66c96cba-2674-4734-a869-d002faab751c"], "8edf6c51-407d-478c-832a-ef103ea3709e": ["66c96cba-2674-4734-a869-d002faab751c"], "7058b177-27f4-4d6b-a478-176ead46f325": ["2689bb50-4ffd-4610-856c-c8fad4ab7285"], "1e48abdd-a664-4c7a-8f19-151ca61e5006": ["2689bb50-4ffd-4610-856c-c8fad4ab7285"], "e5aba341-abc2-4965-a224-fa10823f4d2f": ["7515dd00-b05d-49ea-baa0-7cedeb05eb39"], "23c3711f-c55b-49e5-9936-22d6bfc010af": ["7515dd00-b05d-49ea-baa0-7cedeb05eb39"], "08a12dd0-5dd7-4f87-8913-d86a9cc2c8b7": ["f339987a-b2cd-4258-85c5-a864712a9e98"], "7d2b3bbe-6d0b-470d-b85d-d0c636ac4354": ["f339987a-b2cd-4258-85c5-a864712a9e98"], "c385b92d-1c01-48ae-be4c-f6b42b5e6af6": ["673465c5-faf7-4ab1-86e0-d7cc5751143d"], "b4286477-40f0-46b8-bba8-4fe204b0dafa": ["673465c5-faf7-4ab1-86e0-d7cc5751143d"], "6c98dd15-2a73-4c66-8a6a-c578c67a2434": ["3df80c8e-fd5b-436c-9411-42e36faeeaef"], "00ab3a02-dffb-482b-ad10-3cab6ad77520": ["3df80c8e-fd5b-436c-9411-42e36faeeaef"], "510ed741-6a36-4d13-a7dc-6a42262136be": ["225534bb-e40d-42be-9258-309083656512"], "8f74dbe1-c3ed-48ca-9635-d701d26e829a": ["225534bb-e40d-42be-9258-309083656512"], "3809c393-b89e-494c-b529-c65e601c1544": ["52b00ce1-0f48-46fb-9bdb-6c3ab575940b"], "edb9b7b1-11c1-421c-a07f-7abe3d6e7c21": ["52b00ce1-0f48-46fb-9bdb-6c3ab575940b"], "5a48e740-85f0-48c7-b0c7-6247c384f052": ["3604ee55-dc85-43ef-8409-908fe897aef7"], "d6567db0-b18c-4dcb-b80c-146f2047bc13": ["3604ee55-dc85-43ef-8409-908fe897aef7"], "59a37c01-7bac-4f9d-980f-48f5489e61e6": ["760e42ec-824f-4c12-98b7-856008ae5680"], "e24a71f0-8b86-461a-92bd-fa6cef7ca33b": ["760e42ec-824f-4c12-98b7-856008ae5680"], "63dcc302-d64d-47f5-a304-a64d4d6642b4": ["706f37a3-1ae3-462f-9ae9-f447c8386d34"], "9b9b4805-12cb-453d-a3f4-ddbb20679c39": ["706f37a3-1ae3-462f-9ae9-f447c8386d34"], "7b3d457a-d0bf-4b13-b59c-df184af98f08": ["0d9098f6-5346-47fb-b91d-0a76054887ac"], "9473baea-32cd-4147-a547-5d45b0daa757": ["0d9098f6-5346-47fb-b91d-0a76054887ac"], "d4388801-831e-45e0-bf67-b67974027277": ["a5324dcc-7f7d-4d13-a7b4-c61a11b3471b"], "d4107956-2806-4098-a79e-e753cab1bf82": ["a5324dcc-7f7d-4d13-a7b4-c61a11b3471b"], "829774bb-4770-46cf-9f1b-86f51e7b6679": ["2c82cba7-cefa-41fd-a6d5-c90edb9b59f9"], "79c355b3-3945-402d-9d15-e460689ba635": ["2c82cba7-cefa-41fd-a6d5-c90edb9b59f9"], "e558dbd7-ca81-4070-9777-49636694d674": ["ab16f609-33d2-4f10-9b50-ff0066dc6a13"], "e1ce22f6-cad0-4bbe-87ae-5222158a4393": ["ab16f609-33d2-4f10-9b50-ff0066dc6a13"], "ae84398b-1649-4cce-8fa2-6295c80f7ec9": ["ff613344-c661-48a5-af0c-950d87f38882"], "648a7032-05c8-45c2-a7bb-2dca8fa9ffd0": ["ff613344-c661-48a5-af0c-950d87f38882"], "2b743770-5d66-4aa8-b9b4-c33adc78c1e3": ["ff7088b4-e4f7-4ef1-89b6-2293bc428ded"], "3b6f61ff-349d-4817-8c82-d064b9a71c86": ["ff7088b4-e4f7-4ef1-89b6-2293bc428ded"], "f596fded-c16b-49cb-b400-734c65b185af": ["c7bdee72-9ac2-418f-ac50-b41a38e31eb7"], "1626655d-7f72-4d0a-9170-3abdc8ed86ec": ["c7bdee72-9ac2-418f-ac50-b41a38e31eb7"], "cec6f35c-1b45-4d56-8c2f-aef7bc860a01": ["689778c9-90f6-4c4a-ab36-6fb05ad68144"], "12aca964-2112-4b36-8a40-14ab1512ac75": ["689778c9-90f6-4c4a-ab36-6fb05ad68144"], "53a48063-f4fb-482f-bd70-36915ec63956": ["2f4d5ac1-d6b0-48df-a313-39f40766a20c"], "7fdbbfed-73aa-45a8-9f1c-58ec2c0f3912": ["2f4d5ac1-d6b0-48df-a313-39f40766a20c"], "0ed0fb9c-47c4-4c7c-a5ae-d7e3a35670a1": ["473f218e-e471-4506-a9ba-a4840bcf9eb1"], "88297ffa-b5ca-460c-81ed-a61975ab39ef": ["473f218e-e471-4506-a9ba-a4840bcf9eb1"], "38409d77-4936-4266-a7f3-2d910d3bea91": ["3d2d1cf5-ddbb-40dc-a570-3f55f091e095"], "3d2d3a9e-a6a7-49f5-bdd8-5db95fc8b602": ["3d2d1cf5-ddbb-40dc-a570-3f55f091e095"], "ca685f83-ccd7-4a17-a31d-bfc648b58840": ["fcbeb8b3-4cff-4248-b03e-fc6879248660"], "ce1fdffd-851d-463e-8f24-4596865b62dc": ["fcbeb8b3-4cff-4248-b03e-fc6879248660"], "1a82989c-3ead-4aea-9098-53d3dca7f9b7": ["5ff1ba24-2f90-4f45-a3a3-6e1c50395575"], "a30ea710-3349-4357-8dcb-915f6c69f2da": ["5ff1ba24-2f90-4f45-a3a3-6e1c50395575"], "004b52ee-6a49-47d7-a4bd-77ec96fadc31": ["62a002de-0d3c-44dd-a41c-3fd464e4087a"], "a5ad1cc1-318a-4210-8838-22015d780344": ["62a002de-0d3c-44dd-a41c-3fd464e4087a"], "f05e4729-18f1-4664-9f41-2ad997f9d726": ["7a809df5-be14-43b9-9219-bb0b8d1f7d2c"], "81c90ac3-caf0-4c9d-8e02-8c62d26a047e": ["7a809df5-be14-43b9-9219-bb0b8d1f7d2c"], "0abf12fc-3e73-41e5-8594-5e2bb6ecdb24": ["1b4ea0b8-2883-4f20-8b10-198e6ad55155"], "e3abf868-922a-42e7-8c5a-b1ff0a353d39": ["1b4ea0b8-2883-4f20-8b10-198e6ad55155"], "55c79cd5-dee3-4e43-b8a3-839028518379": ["5d49e42f-479a-415f-8de0-91ebbd0e77df"], "456333eb-689e-4896-b2d4-0cf136672c77": ["5d49e42f-479a-415f-8de0-91ebbd0e77df"], "8834b86c-b1b9-43d6-92e0-3c64ca09e854": ["d8dc77d4-d7bc-40c8-bb38-e6f96f77391c"], "e84f1a90-e702-4594-84b8-5c5b67352195": ["d8dc77d4-d7bc-40c8-bb38-e6f96f77391c"], "490b6ca7-059f-41fe-82ae-b8d2c3890cf1": ["c3a79cf4-99fe-41a5-94a9-9972c547b027"], "59bed72b-bd80-47c3-bb57-08dd086ecf9d": ["c3a79cf4-99fe-41a5-94a9-9972c547b027"], "625e3e66-e1fc-4223-a201-e88b765f449e": ["ecf9714c-7e5b-4f00-9fad-45441a3db2a8"], "da4a10c9-db2a-45fa-bad5-b66ef842c023": ["ecf9714c-7e5b-4f00-9fad-45441a3db2a8"], "40ab1b55-bc53-4cae-8f7e-4657a5b2bdc2": ["e8c07b22-d96c-4cfc-be67-00e326b77e19"], "46de7819-7250-4050-8bf9-4635a1a02f3e": ["e8c07b22-d96c-4cfc-be67-00e326b77e19"], "6feae899-9900-454f-a64d-39e842af8c76": ["1787e4ab-ddaa-436b-a84c-5b09e0444b2b"], "36826afc-57e4-4d70-bc7e-4ca62e3e3e67": ["1787e4ab-ddaa-436b-a84c-5b09e0444b2b"], "84440495-e768-4885-b78b-d8a0c17f3809": ["963066ad-85cd-44d7-a513-c5fc3b5f1733"], "9c3a8107-d49c-4dc0-9f78-d71a506df892": ["963066ad-85cd-44d7-a513-c5fc3b5f1733"], "068d8bd2-9336-4e18-bd93-2199100e631f": ["5ad44c84-503d-4b61-95dc-22017c580f31"], "3138ca26-38b8-4e17-9b31-b38bc8a8eb4f": ["5ad44c84-503d-4b61-95dc-22017c580f31"], "095919bc-18fa-4316-b1e8-07572983b77b": ["ac5d591f-9174-44b6-be57-08f8b0e48100"], "39406f17-a757-4201-91b5-284ba4ebbd39": ["ac5d591f-9174-44b6-be57-08f8b0e48100"], "2744b9cf-981d-42e5-aed3-bb8e5acb0b2e": ["d41067f5-b199-46fa-95e6-571e133d23ff"], "798b53f4-f798-418a-abcd-6dd05f707c67": ["d41067f5-b199-46fa-95e6-571e133d23ff"], "d7fa2d65-26f8-4442-86f6-f1d6256e588a": ["c100cd93-2611-4d50-a99b-8728ccb99ba1"], "e50c31b3-bab1-4064-baa1-199c946d9789": ["c100cd93-2611-4d50-a99b-8728ccb99ba1"], "644dcaa5-1731-43fe-b0f5-c6a4bc05564e": ["0b2a13ab-790a-4e74-97a6-dbd3f2f3834d"], "def43eb9-80b0-4ad2-9198-d84ecb89c720": ["0b2a13ab-790a-4e74-97a6-dbd3f2f3834d"], "8495a23f-4bb7-47ac-8c54-58cf5675cdd7": ["c65eb4b9-10bb-4fcf-b682-fca84d3f37a1"], "74ae51e9-63b3-48ce-9be7-4f88052d7bd6": ["c65eb4b9-10bb-4fcf-b682-fca84d3f37a1"], "11cdd3ed-e09b-463d-9853-0be811073b75": ["2ac15af5-0f67-4ab6-803a-169153471fbe"], "9ca1ff0e-0cd9-4362-aca9-fd904077c845": ["2ac15af5-0f67-4ab6-803a-169153471fbe"], "8fe0054d-51ba-48c5-8cc5-259b2b96f535": ["c3a647af-08ee-42b7-87a6-57644e59b9eb"], "03b9f17b-0b61-401b-bc65-47d0655f31d8": ["c3a647af-08ee-42b7-87a6-57644e59b9eb"], "6d622041-fccf-4eb4-9a53-f7d7577856f8": ["9aa5eff7-f727-421e-835d-3def1111689a"], "a1738003-3e17-48e7-86a2-1410bc0f1c07": ["9aa5eff7-f727-421e-835d-3def1111689a"], "d15e0c10-378f-48a3-9a5c-be0c618106b4": ["ecb13fde-537f-49b6-82bd-ad0e6de18a8c"], "7e7e2c28-ea80-4568-a71a-41966f9f117f": ["ecb13fde-537f-49b6-82bd-ad0e6de18a8c"], "57073541-fc8c-43cd-8b42-f9497eb501af": ["8f297398-44b9-4be9-bbfb-ff90fef13d5f"], "92d9e36d-0fef-4b2e-b40d-ff2b800fcf10": ["8f297398-44b9-4be9-bbfb-ff90fef13d5f"], "6f7aa060-c19a-4614-83d2-134828a7e956": ["04e3f601-a4a2-4cc0-9978-8595281b3c94"], "6b95bc28-dbb4-408f-8c5b-f5b37073b6fd": ["04e3f601-a4a2-4cc0-9978-8595281b3c94"], "4776eaa1-b6f0-440c-a6be-923bbf49687d": ["6690225c-fbc4-4316-bef9-9cf1d5e5957c"], "acf74d86-1184-4092-8a1d-3ca58f5fe97a": ["6690225c-fbc4-4316-bef9-9cf1d5e5957c"], "2c1b02c6-1919-49ea-beff-165567d20b47": ["73043a09-91db-4768-9c0b-702c2dfcd9f0"], "2d15dfed-c66d-4fac-89dd-3aded02ec63e": ["73043a09-91db-4768-9c0b-702c2dfcd9f0"], "e71beb7c-7564-4f2c-83f7-ec9bb3134847": ["2cfdb40f-4c06-45c7-ab73-2bcc65986c58"], "6c079fa0-60c3-4c8d-826a-2816c65d3ea0": ["2cfdb40f-4c06-45c7-ab73-2bcc65986c58"], "543e9bfb-b5f4-4247-89c8-41e0e7fb11a9": ["65cc819a-a0c3-4ffa-b6f0-e47f846de5a5"], "8613b055-c817-4a59-84cf-1ae29a7c2269": ["65cc819a-a0c3-4ffa-b6f0-e47f846de5a5"], "ce252388-c4d9-4968-aadf-218b47f609a5": ["f258f74e-4463-4558-a8be-88fcc9da5b5a"], "f46088d7-1004-41cb-87c5-8a2b0bcdef59": ["f258f74e-4463-4558-a8be-88fcc9da5b5a"], "b5f49997-5049-4865-9b5b-c18d880e2baf": ["16d54bad-34c2-4427-a979-eb6a860bc22e"], "eeb5acfd-3be2-4488-b45e-e0979bd5c855": ["16d54bad-34c2-4427-a979-eb6a860bc22e"]}, "corpus": {"80e81c8c-bb97-4604-bdef-dcc56813587a": "- \nUSING THIS TECHNICAL COMPANION\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the design, \nuse, and deployment of automated systems to protect the rights of the American public in the age of artificial \nintelligence. This technical companion considers each principle in the Blueprint for an AI Bill of Rights and \nprovides examples and concrete steps for communities, industry, governments, and others to take in order to \nbuild these protections into policy, practice, or the technological design process. \nTaken together, the technical protections and practices laid out in the Blueprint for an AI Bill of Rights can help \nguard the American public against many of the potential and actual harms identified by researchers, technolo\u00ad\ngists, advocates, journalists, policymakers, and communities in the United States and around the world. This", "d0a6097e-42c8-499f-8d6d-bcfae7f992d5": "via application programming interfaces). Independent evaluators, such as researchers, journalists, ethics \nreview boards, inspectors general, and third-party auditors, should be given access to the system and samples \nof associated data, in a manner consistent with privacy, security, law, or regulation (including, e.g., intellectual \nproperty law), in order to perform such evaluations. Mechanisms should be included to ensure that system \naccess for evaluation is: provided in a timely manner to the deployment-ready version of the system; trusted to \nprovide genuine, unfiltered access to the full system; and truly independent such that evaluator access cannot \nbe revoked without reasonable and verified justification. \nReporting.12 Entities responsible for the development or use of automated systems should provide \nregularly-updated reports that include: an overview of the system, including how it is embedded in the", "51421b31-1a41-49da-a2c2-65df54ae93ce": "requirement. \nProviding notice has long been a standard practice, and in many cases is a legal requirement, when, for example, \nmaking a video recording of someone (outside of a law enforcement or national security context). In some cases, such \nas credit, lenders are required to provide notice and explanation to consumers. Techniques used to automate the \nprocess of explaining such systems are under active research and improvement and such explanations can take many \nforms. Innovative companies and researchers are rising to the challenge and creating and deploying explanatory \nsystems that can help the public better understand decisions that impact them. \nWhile notice and explanation requirements are already in place in some sectors or situations, the American public \ndeserve to know consistently and across sectors if an automated system is being used in a way that impacts their rights, \nopportunities, or access. This knowledge should provide confidence in how the public is being treated, and trust in the", "758f783b-3fdc-4890-9de4-da3c035c1141": "than role models, toys, or activities.40 Some search engines have been working to reduce the prevalence of\nthese results, but the problem remains.41\n\u2022\nAdvertisement delivery systems that predict who is most likely to click on a job advertisement end up deliv-\nering ads in ways that reinforce racial and gender stereotypes, such as overwhelmingly directing supermar-\nket cashier ads to women and jobs with taxi companies to primarily Black people.42\u00ad\n\u2022\nBody scanners, used by TSA at airport checkpoints, require the operator to select a \u201cmale\u201d or \u201cfemale\u201d\nscanning setting based on the passenger\u2019s sex, but the setting is chosen based on the operator\u2019s perception of\nthe passenger\u2019s gender identity. These scanners are more likely to flag transgender travelers as requiring\nextra screening done by a person. Transgender travelers have described degrading experiences associated\nwith these extra screenings.43 TSA has recently announced plans to implement a gender-neutral algorithm44", "96838aa0-1bf7-4ae3-a8d7-5d093e9feb39": "ABOUT THIS FRAMEWORK\u00ad\u00ad\u00ad\u00ad\u00ad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizations\u2014from governments at all levels to companies of \nall sizes\u2014to uphold these values. Experts from across the private sector, governments, and international", "66c96cba-2674-4734-a869-d002faab751c": "in place, providing an important alternative to ensure access. Companies that have introduced automated call centers \noften retain the option of dialing zero to reach an operator. When automated identity controls are in place to board an \nairplane or enter the country, there is a person supervising the systems who can be turned to for help or to appeal a \nmisidentification. \nThe American people deserve the reassurance that such procedures are in place to protect their rights, opportunities, \nand access. People make mistakes, and a human alternative or fallback mechanism will not always have the right \nanswer, but they serve as an important check on the power and validity of automated systems. \n\u2022 An automated signature matching system is used as part of the voting process in many parts of the country to\ndetermine whether the signature on a mail-in ballot matches the signature on file. These signature matching\nsystems are less likely to work correctly for some voters, including voters with mental or physical", "2689bb50-4ffd-4610-856c-c8fad4ab7285": "data augmentations, parameter adjustments, or other modi\ufb01cations. Access to \nun-tuned (baseline) models supports debugging the relative in\ufb02uence of the pre-\ntrained weights compared to the \ufb01ne-tuned model weights or other system \nupdates. \nInformation Integrity; Data Privacy \nMG-3.2-003 \nDocument sources and types of training data and their origins, potential biases \npresent in the data related to the GAI application and its content provenance, \narchitecture, training process of the pre-trained model including information on \nhyperparameters, training duration, and any \ufb01ne-tuning or retrieval-augmented \ngeneration processes applied. \nInformation Integrity; Harmful Bias \nand Homogenization; Intellectual \nProperty \nMG-3.2-004 Evaluate user reported problematic content and integrate feedback into system \nupdates. \nHuman-AI Con\ufb01guration, \nDangerous, Violent, or Hateful \nContent \nMG-3.2-005 \nImplement content \ufb01lters to prevent the generation of inappropriate, harmful,", "7515dd00-b05d-49ea-baa0-7cedeb05eb39": "SECTION TITLE\nApplying The Blueprint for an AI Bill of Rights \nWhile many of the concerns addressed in this framework derive from the use of AI, the technical \ncapabilities and specific definitions of such systems change with the speed of innovation, and the potential \nharms of their use occur even with less technologically sophisticated tools. Thus, this framework uses a two-\npart test to determine what systems are in scope. This framework applies to (1) automated systems that (2) \nhave the potential to meaningfully impact the American public\u2019s rights, opportunities, or access to \ncritical resources or services. These rights, opportunities, and access to critical resources of services should \nbe enjoyed equally and be fully protected, regardless of the changing role that automated systems may play in \nour lives. \nThis framework describes protections that should be applied with respect to all automated systems that \nhave the potential to meaningfully impact individuals' or communities' exercise of:", "f339987a-b2cd-4258-85c5-a864712a9e98": "Content; Harmful Bias and \nHomogenization \nMP-5.1-005 Conduct adversarial role-playing exercises, GAI red-teaming, or chaos testing to \nidentify anomalous or unforeseen failure modes. \nInformation Security \nMP-5.1-006 \nPro\ufb01le threats and negative impacts arising from GAI systems interacting with, \nmanipulating, or generating content, and outlining known and potential \nvulnerabilities and the likelihood of their occurrence. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Design, AI Development, AI Impact Assessment, A\ufb00ected Individuals and Communities, End-\nUsers, Operation and Monitoring", "673465c5-faf7-4ab1-86e0-d7cc5751143d": "61. See, e.g., Nir Kshetri. School surveillance of students via laptops may do more harm than good. The\nConversation. Jan. 21, 2022.\nhttps://theconversation.com/school-surveillance-of-students-via-laptops-may-do-more-harm-than\u00ad\ngood-170983; Matt Scherer. Warning: Bossware May be Hazardous to Your Health. Center for Democracy\n& Technology Report.\nhttps://cdt.org/wp-content/uploads/2021/07/2021-07-29-Warning-Bossware-May-Be-Hazardous-To\u00ad\nYour-Health-Final.pdf; Human Impact Partners and WWRC. The Public Health Crisis Hidden in Amazon\nWarehouses. HIP and WWRC report. Jan. 2021.\nhttps://humanimpact.org/wp-content/uploads/2021/01/The-Public-Health-Crisis-Hidden-In-Amazon\u00ad\nWarehouses-HIP-WWRC-01-21.pdf; Drew Harwell. Contract lawyers face a growing invasion of\nsurveillance programs that monitor their work. The Washington Post. Nov. 11, 2021. https://\nwww.washingtonpost.com/technology/2021/11/11/lawyer-facial-recognition-monitoring/;", "3df80c8e-fd5b-436c-9411-42e36faeeaef": "The Equal Employment Opportunity Commission and the Department of Justice have clearly \nlaid out how employers\u2019 use of AI and other automated systems can result in \ndiscrimination against job applicants and employees with disabilities.53 The documents explain \nhow employers\u2019 use of software that relies on algorithmic decision-making may violate existing requirements \nunder Title I of the Americans with Disabilities Act (\u201cADA\u201d). This technical assistance also provides practical \ntips to employers on how to comply with the ADA, and to job applicants and employees who think that their \nrights may have been violated. \nDisparity assessments identified harms to Black patients' healthcare access. A widely \nused healthcare algorithm relied on the cost of each patient\u2019s past medical care to predict future medical needs, \nrecommending early interventions for the patients deemed most at risk. This process discriminated", "225534bb-e40d-42be-9258-309083656512": "28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Con\ufb01guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Con\ufb01guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, A\ufb00ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring", "52b00ce1-0f48-46fb-9bdb-6c3ab575940b": "and Homogenization \nGV-3.2-003 \nDe\ufb01ne acceptable use policies for GAI interfaces, modalities, and human-AI \ncon\ufb01gurations (i.e., for chatbots and decision-making tasks), including criteria for \nthe kinds of queries GAI applications should refuse to respond to. \nHuman-AI Con\ufb01guration \nGV-3.2-004 \nEstablish policies for user feedback mechanisms for GAI systems which include \nthorough instructions and any mechanisms for recourse. \nHuman-AI Con\ufb01guration \nGV-3.2-005 \nEngage in threat modeling to anticipate potential risks from GAI systems. \nCBRN Information or Capabilities; \nInformation Security \nAI Actors: AI Design \n \nGOVERN 4.1: Organizational policies and practices are in place to foster a critical thinking and safety-\ufb01rst mindset in the design, \ndevelopment, deployment, and uses of AI systems to minimize potential negative impacts. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.1-001 \nEstablish policies and procedures that address continual improvement processes", "3604ee55-dc85-43ef-8409-908fe897aef7": "MEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Con\ufb01guration; \nConfabulation; Information \nSecurity \nMS-4.2-003", "760e42ec-824f-4c12-98b7-856008ae5680": "Demonstrate access to human alternatives, consideration, and fallback \nReporting. Reporting should include an assessment of timeliness and the extent of additional burden for \nhuman alternatives, aggregate statistics about who chooses the human alternative, along with the results of \nthe assessment about brevity, clarity, and accessibility of notice and opt-out instructions. Reporting on the \naccessibility, timeliness, and effectiveness of human consideration and fallback should be made public at regu\u00ad\nlar intervals for as long as the system is in use. This should include aggregated information about the number \nand type of requests for consideration, fallback employed, and any repeated requests; the timeliness of the \nhandling of these requests, including mean wait times for different types of requests as well as maximum wait \ntimes; and information about the procedures used to address requests for consideration along with the results", "706f37a3-1ae3-462f-9ae9-f447c8386d34": "protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design", "0d9098f6-5346-47fb-b91d-0a76054887ac": "voting, and protections from discrimination, excessive punishment, unlawful surveillance, and violations of \nprivacy and other freedoms in both public and private sector contexts; equal opportunities, including equitable \naccess to education, housing, credit, employment, and other programs; or, access to critical resources or \nservices, such as healthcare, financial services, safety, social services, non-deceptive information about goods \nand services, and government benefits. \n10", "a5324dcc-7f7d-4d13-a7b4-c61a11b3471b": "FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12", "2c82cba7-cefa-41fd-a6d5-c90edb9b59f9": "reuse \nRelevant and high-quality data. Data used as part of any automated system\u2019s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally,", "ab16f609-33d2-4f10-9b50-ff0066dc6a13": "measured quantitatively, including explanations as to why some risks cannot be \nmeasured (e.g., due to technological limitations, resource constraints, or \ntrustworthy considerations). Include unmeasured risks in marginal risks. \nInformation Integrity \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMEASURE 1.3: Internal experts who did not serve as front-line developers for the system and/or independent assessors are \ninvolved in regular assessments and updates. Domain experts, users, AI Actors external to the team that developed or deployed the \nAI system, and a\ufb00ected communities are consulted in support of assessments as necessary per organizational risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.3-001 \nDe\ufb01ne relevant groups of interest (e.g., demographic groups, subject matter \nexperts, experience with GAI technology) within the context of use as part of \nplans for gathering structured public feedback. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization; CBRN", "ff613344-c661-48a5-af0c-950d87f38882": "it comes to open-ended prompts for long-form responses and in domains which require highly \ncontextual and/or domain expertise. \nRisks from confabulations may arise when users believe false content \u2013 often due to the con\ufb01dent nature \nof the response \u2013 leading users to act upon or promote the false information. This poses a challenge for \nmany real-world applications, such as in healthcare, where a confabulated summary of patient \ninformation reports could cause doctors to make incorrect diagnoses and/or recommend the wrong \ntreatments. Risks of confabulated content may be especially important to monitor when integrating GAI \ninto applications involving consequential decision making. \nGAI outputs may also include confabulated logic or citations that purport to justify or explain the \nsystem\u2019s answer, which may further mislead humans into inappropriately trusting the system\u2019s output. \nFor instance, LLMs sometimes provide logical steps for how they arrived at an answer even when the", "ff7088b4-e4f7-4ef1-89b6-2293bc428ded": "resulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32", "c7bdee72-9ac2-418f-ac50-b41a38e31eb7": "\u2022\nPamela Wisniewski, Associate Professor of Computer Science, University of Central Florida; Director,\nSocio-technical Interaction Research (STIR) Lab\n\u2022\nSeny Kamara, Associate Professor of Computer Science, Brown University\nEach panelist individually emphasized the risks of using AI in high-stakes settings, including the potential for \nbiased data and discriminatory outcomes, opaque decision-making processes, and lack of public trust and \nunderstanding of the algorithmic systems. The interventions and key needs various panelists put forward as \nnecessary to the future design of critical AI systems included ongoing transparency, value sensitive and \nparticipatory design, explanations designed for relevant stakeholders, and public consultation. \nVarious \npanelists emphasized the importance of placing trust in people, not technologies, and in engaging with \nimpacted communities to understand the potential harms of technologies and build protection by design into \nfuture systems.", "689778c9-90f6-4c4a-ab36-6fb05ad68144": "The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\u00ad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\u00ad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\u00ad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.", "2f4d5ac1-d6b0-48df-a313-39f40766a20c": "future systems. \nPanel 5: Social Welfare and Development. This event explored current and emerging uses of technology to \nimplement or improve social welfare systems, social development programs, and other systems that can impact \nlife chances. \nWelcome:\n\u2022\nSuresh Venkatasubramanian, Assistant Director for Science and Justice, White House Office of Science\nand Technology Policy\n\u2022\nAnne-Marie Slaughter, CEO, New America\nModerator: Michele Evermore, Deputy Director for Policy, Office of Unemployment Insurance \nModernization, Office of the Secretary, Department of Labor \nPanelists:\n\u2022\nBlake Hall, CEO and Founder, ID.Me\n\u2022\nKarrie Karahalios, Professor of Computer Science, University of Illinois, Urbana-Champaign\n\u2022\nChristiaan van Veen, Director of Digital Welfare State and Human Rights Project, NYU School of Law's\nCenter for Human Rights and Global Justice\n58", "473f218e-e471-4506-a9ba-a4840bcf9eb1": "and data agency can be meaningful and not overwhelming. These choices\u2014such as contextual, timely \nalerts about location tracking\u2014are brief, direct, and use-specific. Many of the expectations listed here for \nprivacy by design and use-specific consent mirror those distributed to developers as best practices when \ndeveloping for smart phone devices,82 such as being transparent about how user data will be used, asking for app \npermissions during their use so that the use-context will be clear to users, and ensuring that the app will still \nwork if users deny (or later revoke) some permissions. \n39", "3d2d1cf5-ddbb-40dc-a570-3f55f091e095": "the privacy, civil rights, and civil liberties implications of the use of such technologies be issued before \nbiometric identification technologies can be used in New York schools. \nFederal law requires employers, and any consultants they may retain, to report the costs \nof surveilling employees in the context of a labor dispute, providing a transparency \nmechanism to help protect worker organizing. Employers engaging in workplace surveillance \"where \nan object there-of, directly or indirectly, is [\u2026] to obtain information concerning the activities of employees or a \nlabor organization in connection with a labor dispute\" must report expenditures relating to this surveillance to \nthe Department of Labor Office of Labor-Management Standards, and consultants who employers retain for \nthese purposes must also file reports regarding their activities.81\nPrivacy choices on smartphones show that when technologies are well designed, privacy", "fcbeb8b3-4cff-4248-b03e-fc6879248660": "AI BILL OF RIGHTS\nFFECTIVE SYSTEMS\nineffective systems. Automated systems should be \ncommunities, stakeholders, and domain experts to identify \nSystems should undergo pre-deployment testing, risk \nthat demonstrate they are safe and effective based on \nincluding those beyond the intended use, and adherence to \nprotective measures should include the possibility of not \nAutomated systems should not be designed with an intent \nreasonably foreseeable possibility of endangering your safety or the safety of your community. They should \nstemming from unintended, yet foreseeable, uses or \n \n \n \n \n \n \n \nSECTION TITLE\nBLUEPRINT FOR AN\nSAFE AND E \nYou should be protected from unsafe or \ndeveloped with consultation from diverse \nconcerns, risks, and potential impacts of the system. \nidentification and mitigation, and ongoing monitoring \ntheir intended use, mitigation of unsafe outcomes \ndomain-specific standards. Outcomes of these \ndeploying the system or removing a system from use. \nor", "5ff1ba24-2f90-4f45-a3a3-6e1c50395575": "which leads to extensive reuse of limited numbers of models; and the extent to which GAI may be \nintegrated into other devices and services. As GAI systems often involve many distinct third-party \ncomponents and data sources, it may be di\ufb03cult to attribute issues in a system\u2019s behavior to any one of \nthese sources. \nErrors in third-party GAI components can also have downstream impacts on accuracy and robustness. \nFor example, test datasets commonly used to benchmark or validate models can contain label errors. \nInaccuracies in these labels can impact the \u201cstability\u201d or robustness of these benchmarks, which many \nGAI practitioners consider during the model selection process. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n3. \nSuggested Actions to Manage GAI Risks \nThe following suggested actions target risks unique to or exacerbated by GAI.", "62a002de-0d3c-44dd-a41c-3fd464e4087a": "Confabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identi\ufb01ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, noti\ufb01cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring", "7a809df5-be14-43b9-9219-bb0b8d1f7d2c": "37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, A\ufb00ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities \u2013 as identi\ufb01ed in the MAP \nfunction \u2013 are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, \ufb01ne tuning, and deploying models: Verify tradeo\ufb00s", "1b4ea0b8-2883-4f20-8b10-198e6ad55155": "47 \nAppendix A. Primary GAI Considerations \nThe following primary considerations were derived as overarching themes from the GAI PWG \nconsultation process. These considerations (Governance, Pre-Deployment Testing, Content Provenance, \nand Incident Disclosure) are relevant for voluntary use by any organization designing, developing, and \nusing GAI and also inform the Actions to Manage GAI risks. Information included about the primary \nconsiderations is not exhaustive, but highlights the most relevant topics derived from the GAI PWG. \nAcknowledgments: These considerations could not have been surfaced without the helpful analysis and \ncontributions from the community and NIST sta\ufb00 GAI PWG leads: George Awad, Luca Belli, Harold Booth, \nMat Heyman, Yooyoung Lee, Mark Pryzbocki, Reva Schwartz, Martin Stanley, and Kyra Yee. \nA.1. Governance \nA.1.1. Overview \nLike any other technology system, governance principles and techniques can be used to manage risks", "5d49e42f-479a-415f-8de0-91ebbd0e77df": "Information Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV, Third-party entities \n \nMAP 1.1: Intended purposes, potentially bene\ufb01cial uses, context speci\ufb01c laws, norms and expectations, and prospective settings in \nwhich the AI system will be deployed are understood and documented. Considerations include: the speci\ufb01c set or types of users \nalong with their expectations; potential positive and negative impacts of system uses to individuals, communities, organizations, \nsociety, and the planet; assumptions and related limitations about AI system purposes, uses, and risks across the development or \nproduct AI lifecycle; and related TEVV and system metrics. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.1-001 \nWhen identifying intended purposes, consider factors such as internal vs. \nexternal use, narrow vs. broad application scope, \ufb01ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation).", "d8dc77d4-d7bc-40c8-bb38-e6f96f77391c": "41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Con\ufb01guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Con\ufb01guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidenti\ufb01able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and", "c3a79cf4-99fe-41a5-94a9-9972c547b027": "https://arxiv.org/pdf/2202.07646 \nCarlini, N. et al. (2024) Stealing Part of a Production Language Model. arXiv. \nhttps://arxiv.org/abs/2403.06634 \nChandra, B. et al. (2023) Dismantling the Disinformation Business of Chinese In\ufb02uence Operations. \nRAND. https://www.rand.org/pubs/commentary/2023/10/dismantling-the-disinformation-business-of-\nchinese.html \nCiriello, R. et al. (2024) Ethical Tensions in Human-AI Companionship: A Dialectical Inquiry into Replika. \nResearchGate. https://www.researchgate.net/publication/374505266_Ethical_Tensions_in_Human-\nAI_Companionship_A_Dialectical_Inquiry_into_Replika \nDahl, M. et al. (2024) Large Legal Fictions: Pro\ufb01ling Legal Hallucinations in Large Language Models. arXiv. \nhttps://arxiv.org/abs/2401.01301", "ecf9714c-7e5b-4f00-9fad-45441a3db2a8": "Electronic Privacy Information \nCenter (EPIC) \nEncode Justice \nEqual AI \nGoogle \nHitachi's AI Policy Committee \nThe Innocence Project \nInstitute of Electrical and \nElectronics Engineers (IEEE) \nIntuit \nLawyers Committee for Civil Rights \nUnder Law \nLegal Aid Society \nThe Leadership Conference on \nCivil and Human Rights \nMeta \nMicrosoft \nThe MIT AI Policy Forum \nMovement Alliance Project \nThe National Association of \nCriminal Defense Lawyers \nO\u2019Neil Risk Consulting & \nAlgorithmic Auditing \nThe Partnership on AI \nPinterest \nThe Plaintext Group \npymetrics \nSAP \nThe Security Industry Association \nSoftware and Information Industry \nAssociation (SIIA) \nSpecial Competitive Studies Project \nThorn \nUnited for Respect \nUniversity of California at Berkeley \nCitris Policy Lab \nUniversity of California at Berkeley \nLabor Center \nUnfinished/Project Liberty \nUpturn \nUS Chamber of Commerce \nUS Chamber of Commerce \nTechnology Engagement Center \nA.I. Working Group\nVibrent Health\nWarehouse Worker Resource\nCenter\nWaymap\n62", "e8c07b22-d96c-4cfc-be67-00e326b77e19": "APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew America\u2019s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation", "1787e4ab-ddaa-436b-a84c-5b09e0444b2b": "likelihood of such an attack. The physical synthesis development, production, and use of chemical or \nbiological agents will continue to require both applicable expertise and supporting materials and \ninfrastructure. The impact of GAI on chemical or biological agent misuse will depend on what the key \nbarriers for malicious actors are (e.g., whether information access is one such barrier), and how well GAI \ncan help actors address those barriers. \nFurthermore, chemical and biological design tools (BDTs) \u2013 highly specialized AI systems trained on \nscienti\ufb01c data that aid in chemical and biological design \u2013 may augment design capabilities in chemistry \nand biology beyond what text-based LLMs are able to provide. As these models become more \ne\ufb03cacious, including for bene\ufb01cial uses, it will be important to assess their potential to be used for \nharm, such as the ideation and design of novel harmful chemical or biological agents.", "963066ad-85cd-44d7-a513-c5fc3b5f1733": "51 \ngeneral public participants. For example, expert AI red-teamers could modify or verify the \nprompts written by general public AI red-teamers. These approaches may also expand coverage \nof the AI risk attack surface. \n\u2022 \nHuman / AI: Performed by GAI in combination with specialist or non-specialist human teams. \nGAI-led red-teaming can be more cost e\ufb00ective than human red-teamers alone. Human or GAI-\nled AI red-teaming may be better suited for eliciting di\ufb00erent types of harms. \n \nA.1.6. Content Provenance \nOverview \nGAI technologies can be leveraged for many applications such as content generation and synthetic data. \nSome aspects of GAI outputs, such as the production of deepfake content, can challenge our ability to \ndistinguish human-generated content from AI-generated synthetic content. To help manage and mitigate \nthese risks, digital transparency mechanisms like provenance data tracking can trace the origin and", "5ad44c84-503d-4b61-95dc-22017c580f31": "to a particular decision, and should be meaningful for the particular customization based on purpose, target, \nand level of risk. While approximation and simplification may be necessary for the system to succeed based on \nthe explanatory purpose and target of the explanation, or to account for the risk of fraud or other concerns \nrelated to revealing decision-making information, such simplifications should be done in a scientifically \nsupportable way. Where appropriate based on the explanatory system, error ranges for the explanation should \nbe calculated and included in the explanation, with the choice of presentation of such information balanced \nwith usability and overall interface complexity concerns. \nDemonstrate protections for notice and explanation \nReporting. Summary reporting should document the determinations made based on the above consider\u00ad\nations, including: the responsible entities for accountability purposes; the goal and use cases for the system,", "ac5d591f-9174-44b6-be57-08f8b0e48100": "and Technology Policy\n\u2022\nBen Winters, Counsel, Electronic Privacy Information Center\nModerator: Chiraag Bains, Deputy Assistant to the President on Racial Justice & Equity \nPanelists: \n\u2022\nSean Malinowski, Director of Policing Innovation and Reform, University of Chicago Crime Lab\n\u2022\nKristian Lum, Researcher\n\u2022\nJumana Musa, Director, Fourth Amendment Center, National Association of Criminal Defense Lawyers\n\u2022\nStanley Andrisse, Executive Director, From Prison Cells to PHD; Assistant Professor, Howard University\nCollege of Medicine\n\u2022\nMyaisha Hayes, Campaign Strategies Director, MediaJustice\nPanelists discussed uses of technology within the criminal justice system, including the use of predictive \npolicing, pretrial risk assessments, automated license plate readers, and prison communication tools. The \ndiscussion emphasized that communities deserve safety, and strategies need to be identified that lead to safety; \nsuch strategies might include data-driven approaches, but the focus on safety should be primary, and", "d41067f5-b199-46fa-95e6-571e133d23ff": "ENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public", "c100cd93-2611-4d50-a99b-8728ccb99ba1": "qualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections", "0b2a13ab-790a-4e74-97a6-dbd3f2f3834d": "Action ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or de\ufb01ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, o\ufb00ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signi\ufb01cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002", "c65eb4b9-10bb-4fcf-b682-fca84d3f37a1": "Harmful Bias and Homogenization \nGV-6.2-006 \nEstablish policies and procedures to test and manage risks related to rollover and \nfallback technologies for GAI systems, acknowledging that rollover and fallback \nmay include manual processing. \nInformation Integrity \nGV-6.2-007 \nReview vendor contracts and avoid arbitrary or capricious termination of critical \nGAI technologies or vendor services and non-standard terms that may amplify or \ndefer liability in unexpected ways and/or contribute to unauthorized data \ncollection by vendors or third-parties (e.g., secondary data use). Consider: Clear \nassignment of liability and responsibility for incidents, GAI system changes over \ntime (e.g., \ufb01ne-tuning, drift, decay); Request: Noti\ufb01cation and disclosure for \nserious incidents arising from third-party data and systems; Service Level \nAgreements (SLAs) in vendor contracts that address incident response, response \ntimes, and availability of critical support. \nHuman-AI Con\ufb01guration; \nInformation Security; Value Chain", "2ac15af5-0f67-4ab6-803a-169153471fbe": "and the integrity and (when applicable) the con\ufb01dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speci\ufb01c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published.", "c3a647af-08ee-42b7-87a6-57644e59b9eb": "\u2022\nA company installed AI-powered cameras in its delivery vans in order to evaluate the road safety habits of its driv\u00ad\ners, but the system incorrectly penalized drivers when other cars cut them off or when other events beyond\ntheir control took place on the road. As a result, drivers were incorrectly ineligible to receive a bonus.11\n17", "9aa5eff7-f727-421e-835d-3def1111689a": "between resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify e\ufb00ectiveness of carbon capture or o\ufb00set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV", "ecb13fde-537f-49b6-82bd-ad0e6de18a8c": "17 \nGOVERN 1.7: Processes and procedures are in place for decommissioning and phasing out AI systems safely and in a manner that \ndoes not increase risks or decrease the organization\u2019s trustworthiness. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.7-001 Protocols are put in place to ensure GAI systems are able to be deactivated when \nnecessary. \nInformation Security; Value Chain \nand Component Integration \nGV-1.7-002 \nConsider the following factors when decommissioning GAI systems: Data \nretention requirements; Data security, e.g., containment, protocols, Data leakage \nafter decommissioning; Dependencies between upstream, downstream, or other \ndata, internet of things (IOT) or AI systems; Use of open-source data or models; \nUsers\u2019 emotional entanglement with GAI functions. \nHuman-AI Con\ufb01guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring", "8f297398-44b9-4be9-bbfb-ff90fef13d5f": "shared, or made public as part of data brokerage or other agreements. Sensitive data includes data that can be \nused to infer sensitive information; even systems that are not directly marketed as sensitive domain technologies \nare expected to keep sensitive data private. Access to such data should be limited based on necessity and based \non a principle of local control, such that those individuals closest to the data subject have more access while \nthose who are less proximate do not (e.g., a teacher has access to their students\u2019 daily progress data while a \nsuperintendent does not). \nReporting. In addition to the reporting on data privacy (as listed above for non-sensitive data), entities devel-\noping technologies related to a sensitive domain and those collecting, using, storing, or sharing sensitive data \nshould, whenever appropriate, regularly provide public reports describing: any data security lapses or breaches", "04e3f601-a4a2-4cc0-9978-8595281b3c94": "APPENDIX\nSummaries of Additional Engagements: \n\u2022 OSTP created an email address (ai-equity@ostp.eop.gov) to solicit comments from the public on the use of\nartificial intelligence and other data-driven technologies in their lives.\n\u2022 OSTP issued a Request For Information (RFI) on the use and governance of biometric technologies.113 The\npurpose of this RFI was to understand the extent and variety of biometric technologies in past, current, or\nplanned use; the domains in which these technologies are being used; the entities making use of them; current\nprinciples, practices, or policies governing their use; and the stakeholders that are, or may be, impacted by their\nuse or regulation. The 130 responses to this RFI are available in full online114 and were submitted by the below\nlisted organizations and individuals:\nAccenture \nAccess Now \nACT | The App Association \nAHIP \nAIethicist.org \nAirlines for America \nAlliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union", "6690225c-fbc4-4316-bef9-9cf1d5e5957c": "ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9", "73043a09-91db-4768-9c0b-702c2dfcd9f0": "Automated system support. Entities designing, developing, and deploying automated systems should \nestablish and maintain the capabilities that will allow individuals to use their own automated systems to help \nthem make consent, access, and control decisions in a complex data ecosystem. Capabilities include machine \nreadable data, standardized data formats, metadata or tags for expressing data processing permissions and \npreferences and data provenance and lineage, context of use and access-specific tags, and training models for \nassessing privacy risk. \nDemonstrate that data privacy and user control are protected \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of the claims made regarding data policies. These independent evaluations should be \nmade public whenever possible. Care will need to be taken to balance individual privacy with evaluation data \naccess needs.", "2cfdb40f-4c06-45c7-ab73-2bcc65986c58": "Information Integrity \nMS-2.7-006 \nMeasure the rate at which recommendations from security checks and incidents \nare implemented. Assess how quickly the AI system can adapt and improve \nbased on lessons learned from security incidents and feedback. \nInformation Integrity; Information \nSecurity \nMS-2.7-007 \nPerform AI red-teaming to assess resilience against: Abuse to facilitate attacks on \nother systems (e.g., malicious code generation, enhanced phishing content), GAI \nattacks (e.g., prompt injection), ML attacks (e.g., adversarial examples/prompts, \ndata poisoning, membership inference, model extraction, sponge examples). \nInformation Security; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nMS-2.7-008 Verify \ufb01ne-tuning does not compromise safety and security controls. \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content", "65cc819a-a0c3-4ffa-b6f0-e47f846de5a5": "46 \nMG-4.3-003 \nReport GAI incidents in compliance with legal and regulatory requirements (e.g., \nHIPAA breach reporting, e.g., OCR (2023) or NHTSA (2022) autonomous vehicle \ncrash reporting requirements. \nInformation Security; Data Privacy \nAI Actor Tasks: AI Deployment, A\ufb00ected Individuals and Communities, Domain Experts, End-Users, Human Factors, Operation and \nMonitoring", "f258f74e-4463-4558-a8be-88fcc9da5b5a": "justification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19", "16d54bad-34c2-4427-a979-eb6a860bc22e": "related to generative AI models, capabilities, and applications. Organizations may choose to apply their \nexisting risk tiering to GAI systems, or they may opt to revise or update AI system risk levels to address \nthese unique GAI risks. This section describes how organizational governance regimes may be re-\nevaluated and adjusted for GAI contexts. It also addresses third-party considerations for governing across \nthe AI value chain. \nA.1.2. Organizational Governance \nGAI opportunities, risks and long-term performance characteristics are typically less well-understood \nthan non-generative AI tools and may be perceived and acted upon by humans in ways that vary greatly. \nAccordingly, GAI may call for di\ufb00erent levels of oversight from AI Actors or di\ufb00erent human-AI \ncon\ufb01gurations in order to manage their risks e\ufb00ectively. Organizations\u2019 use of GAI systems may also \nwarrant additional human review, tracking and documentation, and greater management oversight."}} \ No newline at end of file diff --git a/Tasks/Task 4/val_questions.json b/Tasks/Task 4/val_questions.json new file mode 100644 index 0000000000000000000000000000000000000000..714621eb7c45f2d4433a731f238297fcca974413 --- /dev/null +++ b/Tasks/Task 4/val_questions.json @@ -0,0 +1 @@ +{"61c5d3ec-11af-4a36-a028-e9e22afb5a8f": "What are the five principles outlined in the Blueprint for an AI Bill of Rights?", "662911a9-7407-4a24-95ff-2350dde354be": "How can communities and industry implement the practices suggested in the Blueprint for an AI Bill of Rights?", "09ded63e-b364-42e7-9677-e1dfa4932b9b": "What are the best practices for providing independent evaluators access to automated systems while ensuring privacy and security?", "6b539180-33d5-4cd2-abf5-63cbe6178e6a": "How can organizations ensure that evaluator access to automated systems remains truly independent and cannot be revoked without reasonable justification?", "f35c9f92-67ac-4772-9dab-6cf2ae32812f": "What are the legal requirements for providing notice when making a video recording of someone?", "d0edffef-580d-4890-a6dd-e08925fadd27": "How are companies and researchers improving automated systems to explain decisions that impact consumers?", "4a36f5cd-0f9d-42ba-bd8e-d0eaf0af2d52": "How do advertisement delivery systems reinforce racial and gender stereotypes?", "1c6de01d-b59d-4421-9339-0e501b4fd2b9": "What are the issues faced by transgender travelers with TSA body scanners at airport checkpoints?", "155db437-082c-44f4-8751-960146c3512c": "What are the five principles outlined in the Blueprint for an AI Bill of Rights?", "95cae333-a114-41e8-98f5-10619377f6bf": "How can organizations apply the Blueprint for an AI Bill of Rights to protect civil rights and privacy?", "077e8ee5-5768-4967-b8ed-891c6cc0085d": "What are the benefits of having a human fallback mechanism in automated systems?", "8edf6c51-407d-478c-832a-ef103ea3709e": "How do automated signature matching systems impact voters with mental or physical disabilities?", "7058b177-27f4-4d6b-a478-176ead46f325": "What are the best practices for documenting the sources and types of training data in AI models?", "1e48abdd-a664-4c7a-8f19-151ca61e5006": "How can user feedback be effectively integrated into system updates to address problematic content?", "e5aba341-abc2-4965-a224-fa10823f4d2f": "What is the two-part test used in the AI Bill of Rights framework to determine which systems are in scope?", "23c3711f-c55b-49e5-9936-22d6bfc010af": "How does the AI Bill of Rights framework ensure that automated systems do not negatively impact the American public's rights and access to critical resources?", "08a12dd0-5dd7-4f87-8913-d86a9cc2c8b7": "What are adversarial role-playing exercises and how do they help in identifying failure modes in GAI systems?", "7d2b3bbe-6d0b-470d-b85d-d0c636ac4354": "How can profiling threats and negative impacts improve the security of GAI systems?", "c385b92d-1c01-48ae-be4c-f6b42b5e6af6": "What are the potential negative impacts of school surveillance on students via laptops?", "b4286477-40f0-46b8-bba8-4fe204b0dafa": "How does \"Bossware\" affect the health of employees according to the Center for Democracy & Technology report?", "6c98dd15-2a73-4c66-8a6a-c578c67a2434": "How can employers ensure their use of AI in hiring complies with the Americans with Disabilities Act (ADA)?", "00ab3a02-dffb-482b-ad10-3cab6ad77520": "What are the potential risks of using healthcare algorithms that rely on past medical costs to predict future needs?", "510ed741-6a36-4d13-a7dc-6a42262136be": "What are some effective context-based measures to identify new impacts of GAI systems?", "8f74dbe1-c3ed-48ca-9635-d701d26e829a": "How can regular engagements with AI Actors help in evaluating unanticipated impacts of GAI systems?", "3809c393-b89e-494c-b529-c65e601c1544": "What are acceptable use policies for GAI interfaces and how do they determine the types of queries GAI applications should refuse to respond to?", "edb9b7b1-11c1-421c-a07f-7abe3d6e7c21": "How can organizations establish effective user feedback mechanisms for GAI systems, and what should these mechanisms include?", "5a48e740-85f0-48c7-b0c7-6247c384f052": "How often should adversarial testing be conducted to effectively map and measure GAI risks?", "d6567db0-b18c-4dcb-b80c-146f2047bc13": "What are the benefits of evaluating GAI system performance in real-world scenarios compared to controlled testing environments?", "59a37c01-7bac-4f9d-980f-48f5489e61e6": "What are the common statistics reported about who chooses the human alternative in automated systems?", "e24a71f0-8b86-461a-92bd-fa6cef7ca33b": "How often should reports on the accessibility, timeliness, and effectiveness of human consideration and fallback be made public?", "63dcc302-d64d-47f5-a304-a64d4d6642b4": "What are some examples of companies that have successfully implemented bias testing in their product quality assessment?", "9b9b4805-12cb-453d-a3f4-ddbb20679c39": "How are federal government agencies developing standards to prevent algorithmic discrimination?", "7b3d457a-d0bf-4b13-b59c-df184af98f08": "What are some common protections against unlawful surveillance and violations of privacy in both public and private sectors?", "9473baea-32cd-4147-a547-5d45b0daa757": "How can individuals ensure equitable access to education, housing, and employment opportunities?", "d4388801-831e-45e0-bf67-b67974027277": "What are the key principles outlined in the AI Bill of Rights?", "d4107956-2806-4098-a79e-e753cab1bf82": "How can the AI Bill of Rights be practically implemented in technical systems?", "829774bb-4770-46cf-9f1b-86f51e7b6679": "How can you ensure the data used in automated systems is of high quality and relevant to the task?", "79c355b3-3945-402d-9d15-e460689ba635": "What methods can be employed to measure and limit errors from data entry in automated systems?", "e558dbd7-ca81-4070-9777-49636694d674": "What are some reasons why certain risks cannot be measured quantitatively in AI systems?", "e1ce22f6-cad0-4bbe-87ae-5222158a4393": "How can organizations involve independent assessors and domain experts in the regular assessment of AI systems?", "ae84398b-1649-4cce-8fa2-6295c80f7ec9": "What are the risks associated with confabulated content in healthcare applications using GAI?", "648a7032-05c8-45c2-a7bb-2dca8fa9ffd0": "How can confabulated logic or citations from GAI systems mislead users?", "2b743770-5d66-4aa8-b9b4-c33adc78c1e3": "How can companies ethically use data to monitor employee performance without violating privacy?", "3b6f61ff-349d-4817-8c82-d064b9a71c86": "What are the legal implications of employers using surveillance data to intervene in employee discussions?", "f596fded-c16b-49cb-b400-734c65b185af": "What are the risks of using AI in high-stakes settings as highlighted by Pamela Wisniewski and Seny Kamara?", "1626655d-7f72-4d0a-9170-3abdc8ed86ec": "Why is it important to place trust in people rather than technologies when designing AI systems?", "cec6f35c-1b45-4d56-8c2f-aef7bc860a01": "How can organizations ensure that their demographic assessments are inclusive of all protected classifications?", "12aca964-2112-4b36-8a40-14ab1512ac75": "What are the best practices for separating demographic data used for disparity assessment from data used in automated systems?", "53a48063-f4fb-482f-bd70-36915ec63956": "What are some emerging technologies being used to improve social welfare systems?", "7fdbbfed-73aa-45a8-9f1c-58ec2c0f3912": "How can digital welfare systems impact life chances according to experts like Christiaan van Veen?", "0ed0fb9c-47c4-4c7c-a5ae-d7e3a35670a1": "What are some best practices for developers to ensure privacy by design in smartphone apps?", "88297ffa-b5ca-460c-81ed-a61975ab39ef": "How can developers make app permissions clear and use-specific for users?", "38409d77-4936-4266-a7f3-2d910d3bea91": "What are the privacy implications of using biometric identification technologies in New York schools?", "3d2d3a9e-a6a7-49f5-bdd8-5db95fc8b602": "What are the reporting requirements for employers who surveil employees during a labor dispute?", "ca685f83-ccd7-4a17-a31d-bfc648b58840": "What measures are included in the AI Bill of Rights to ensure automated systems are safe and effective?", "ce1fdffd-851d-463e-8f24-4596865b62dc": "How does the AI Bill of Rights propose to handle the risks and potential impacts of automated systems?", "1a82989c-3ead-4aea-9098-53d3dca7f9b7": "What are the potential downstream impacts of errors in third-party GAI components on system accuracy and robustness?", "a30ea710-3349-4357-8dcb-915f6c69f2da": "How can inaccuracies in test dataset labels affect the stability and robustness of GAI benchmarks?", "004b52ee-6a49-47d7-a4bd-77ec96fadc31": "What are the best practices for developing and updating GAI system incident response and recovery plans?", "a5ad1cc1-318a-4210-8838-22015d780344": "How can organizations ensure their response and recovery plans account for the entire GAI system value chain?", "f05e4729-18f1-4664-9f41-2ad997f9d726": "How can we assess the proportion of synthetic to non-synthetic training data in AI models?", "81c90ac3-caf0-4c9d-8e02-8c62d26a047e": "What are the best practices for documenting the environmental impacts of AI model development and deployment?", "0abf12fc-3e73-41e5-8594-5e2bb6ecdb24": "What are the primary considerations for organizations designing and developing GAI according to the GAI PWG consultation process?", "e3abf868-922a-42e7-8c5a-b1ff0a353d39": "How can governance principles and techniques be applied to manage risks in GAI systems?", "55c79cd5-dee3-4e43-b8a3-839028518379": "What are the key considerations for documenting the intended purposes and beneficial uses of an AI system?", "456333eb-689e-4896-b2d4-0cf136672c77": "How do internal vs external use and narrow vs broad application scope impact the identification of intended purposes for AI systems?", "8834b86c-b1b9-43d6-92e0-3c64ca09e854": "How can feedback from internal and external AI actors be used to assess the impact of AI-generated content?", "e84f1a90-e702-4594-84b8-5c5b67352195": "What are the benefits of using real-time auditing tools for tracking and validating the lineage and authenticity of AI-generated data?", "490b6ca7-059f-41fe-82ae-b8d2c3890cf1": "What are the main findings of Carlini et al (2024) regarding the vulnerabilities in production language models?", "59bed72b-bd80-47c3-bb57-08dd086ecf9d": "How does the study by Chandra et al (2023) propose to combat Chinese influence operations and disinformation?", "625e3e66-e1fc-4223-a201-e88b765f449e": "What is the role of the Electronic Privacy Information Center (EPIC) in AI policy and regulation?", "da4a10c9-db2a-45fa-bad5-b66ef842c023": "How does the Innocence Project utilize AI to support its mission?", "40ab1b55-bc53-4cae-8f7e-4657a5b2bdc2": "What is the role of the National Center for Missing & Exploited Children?", "46de7819-7250-4050-8bf9-4635a1a02f3e": "How does the New York Civil Liberties Union contribute to civil rights advocacy?", "6feae899-9900-454f-a64d-39e842af8c76": "How can AI tools be misused in the development of chemical or biological agents?", "36826afc-57e4-4d70-bc7e-4ca62e3e3e67": "What are the potential risks associated with the use of biological design tools (BDTs) in chemistry and biology?", "84440495-e768-4885-b78b-d8a0c17f3809": "How can expert AI red-teamers enhance the effectiveness of general public AI red-teamers?", "9c3a8107-d49c-4dc0-9f78-d71a506df892": "What are the benefits of using GAI-led red-teaming compared to human red-teamers alone?", "068d8bd2-9336-4e18-bd93-2199100e631f": "How can error ranges be calculated and included in explanations for decision-making systems?", "3138ca26-38b8-4e17-9b31-b38bc8a8eb4f": "What are the best practices for balancing usability and interface complexity when presenting decision-making information?", "095919bc-18fa-4316-b1e8-07572983b77b": "What are the potential benefits and drawbacks of using predictive policing in the criminal justice system?", "39406f17-a757-4201-91b5-284ba4ebbd39": "How can data-driven approaches be balanced with the need for community safety in criminal justice reform?", "2744b9cf-981d-42e5-aed3-bb8e5acb0b2e": "What are the reporting expectations for entities developing or using automated systems?", "798b53f4-f798-418a-abcd-6dd05f707c67": "How can the public access the Agency Inventories of AI Use Cases provided by the National Artificial Intelligence Initiative Office?", "d7fa2d65-26f8-4442-86f6-f1d6256e588a": "What are some effective methods for monitoring and assessing high-impact systems in qualitative user experience research?", "e50c31b3-bab1-4064-baa1-199c946d9789": "How can organizations ensure equity standards are maintained in algorithmic systems, and what steps should be taken if these standards are not met?", "644dcaa5-1731-43fe-b0f5-c6a4bc05564e": "What factors should be considered when updating or defining risk tiers for General Artificial Intelligence (GAI)?", "def43eb9-80b0-4ad2-9198-d84ecb89c720": "How can the psychological impacts of GAI, such as anthropomorphization and emotional entanglement, be mitigated?", "8495a23f-4bb7-47ac-8c54-58cf5675cdd7": "What are the best practices for establishing policies to manage risks related to rollover and fallback technologies in GAI systems?", "74ae51e9-63b3-48ce-9be7-4f88052d7bd6": "How can organizations ensure clear assignment of liability and responsibility in vendor contracts for GAI technologies?", "11cdd3ed-e09b-463d-9853-0be811073b75": "What are the best practices for ensuring the confidentiality of AI training data and model weights?", "9ca1ff0e-0cd9-4362-aca9-fd904077c845": "How can potential attack points in AI systems be identified and secured?", "8fe0054d-51ba-48c5-8cc5-259b2b96f535": "How can AI-powered cameras in delivery vans be improved to avoid incorrectly penalizing drivers?", "03b9f17b-0b61-401b-bc65-47d0655f31d8": "What are the common issues faced by companies using AI to monitor road safety habits of drivers?", "6d622041-fccf-4eb4-9a53-f7d7577856f8": "What are the differences in resource usage between AI training and inference?", "a1738003-3e17-48e7-86a2-1410bc0f1c07": "How can we verify the effectiveness of carbon capture programs for AI training?", "d15e0c10-378f-48a3-9a5c-be0c618106b4": "What protocols should be in place to ensure the safe deactivation of AI systems?", "7e7e2c28-ea80-4568-a71a-41966f9f117f": "What factors need to be considered when decommissioning AI systems to prevent data leakage and ensure security?", "57073541-fc8c-43cd-8b42-f9497eb501af": "What are the best practices for limiting access to sensitive data based on necessity and local control?", "92d9e36d-0fef-4b2e-b40d-ff2b800fcf10": "How should organizations report data security lapses or breaches involving sensitive data?", "6f7aa060-c19a-4614-83d2-134828a7e956": "What is the purpose of the email address ai-equity@ostpeopgov created by OSTP?", "6b95bc28-dbb4-408f-8c5b-f5b37073b6fd": "Where can I find the full responses to the OSTP's Request For Information (RFI) on biometric technologies?", "4776eaa1-b6f0-440c-a6be-923bbf49687d": "What are the practical steps to implement ethical principles in technology?", "acf74d86-1184-4092-8a1d-3ca58f5fe97a": "How can risk management be integrated into technological innovation to protect people from harm?", "2c1b02c6-1919-49ea-beff-165567d20b47": "What are the key capabilities needed for automated systems to help users make consent, access, and control decisions in a complex data ecosystem?", "2d15dfed-c66d-4fac-89dd-3aded02ec63e": "How can independent evaluations of data policies help ensure data privacy and user control in automated systems?", "e71beb7c-7564-4f2c-83f7-ec9bb3134847": "How can the rate of implementing recommendations from security checks and incidents be measured effectively?", "6c079fa0-60c3-4c8d-826a-2816c65d3ea0": "What are the best practices for performing AI red-teaming to assess resilience against various types of attacks?", "543e9bfb-b5f4-4247-89c8-41e0e7fb11a9": "What are the legal and regulatory requirements for reporting GAI incidents under HIPAA?", "8613b055-c817-4a59-84cf-1ae29a7c2269": "How does the NHTSA's 2022 autonomous vehicle crash reporting requirements impact AI deployment and monitoring?", "ce252388-c4d9-4968-aadf-218b47f609a5": "How do you document the justification for each data attribute in an automated system?", "f46088d7-1004-41cb-87c5-8a2b0bcdef59": "What are the best practices for ensuring that the use of high-dimensional data attributes does not violate applicable laws?", "b5f49997-5049-4865-9b5b-c18d880e2baf": "How can organizations adjust their governance regimes to effectively manage the risks associated with generative AI systems?", "eeb5acfd-3be2-4488-b45e-e0979bd5c855": "What are the key considerations for third-party governance across the AI value chain when dealing with generative AI?"} \ No newline at end of file diff --git a/Tasks/Task 5/Colab-task5-assessing-performance.ipynb b/Tasks/Task 5/Colab-task5-assessing-performance.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6a3b47f24ef22782c4c33aaaaf662f229bb1cfcc --- /dev/null +++ b/Tasks/Task 5/Colab-task5-assessing-performance.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"code","execution_count":2,"metadata":{"executionInfo":{"elapsed":22879,"status":"ok","timestamp":1727203947022,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"BF4AqVh7L7af","colab":{"base_uri":"https://localhost:8080/"},"outputId":"81ec0ace-36ef-4682-af0f-eb38844ff954"},"outputs":[{"output_type":"stream","name":"stdout","text":["\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m50.4/50.4 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m290.2/290.2 kB\u001b[0m \u001b[31m13.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m397.0/397.0 kB\u001b[0m \u001b[31m33.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m3.5/3.5 MB\u001b[0m \u001b[31m93.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m185.7/185.7 kB\u001b[0m \u001b[31m16.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m38.0/38.0 MB\u001b[0m \u001b[31m61.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m15.9/15.9 MB\u001b[0m \u001b[31m114.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m27.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m207.2/207.2 kB\u001b[0m \u001b[31m21.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m51.5/51.5 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m76.4/76.4 kB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m50.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m375.6/375.6 kB\u001b[0m \u001b[31m32.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m15.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m71.1/71.1 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m258.9/258.9 kB\u001b[0m \u001b[31m24.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m245.3/245.3 kB\u001b[0m \u001b[31m22.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m69.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m519.3/519.3 kB\u001b[0m \u001b[31m41.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m11.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m96.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m318.9/318.9 kB\u001b[0m \u001b[31m30.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m4.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m13.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m194.1/194.1 kB\u001b[0m \u001b[31m20.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”ā”\u001b[0m \u001b[32m57.5/57.5 kB\u001b[0m \u001b[31m5.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h"]}],"source":["!pip install -qU langsmith==0.1.125 \\\n"," langchain-core==0.2.41 \\\n"," langchain-community \\\n"," langchain-qdrant==0.1.4 \\\n"," langchain-experimental \\\n"," langchain-openai \\\n"," langchain_huggingface \\\n"," PyMuPDF==1.24.10 \\\n"," ragas==0.1.18 \\\n"," protobuf==3.20.3 \\\n"," pyarrow==14.0.1 \\\n"," fsspec==2024.6.1\n"]},{"cell_type":"code","execution_count":3,"metadata":{"executionInfo":{"elapsed":4102,"status":"ok","timestamp":1727203955273,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"sPbvd4OyL7ag"},"outputs":[],"source":["import os\n","import getpass\n","from uuid import uuid4\n","from google.colab import userdata\n","\n","\n","os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n","# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangChain API Key:\")\n","os.environ[\"LANGCHAIN_API_KEY\"] = userdata.get('LANGCHAIN_API_KEY')\n","\n","os.environ[\"LANGCHAIN_PROJECT\"] = \"AIM-SDG-MidTerm - AI Safety\"\n","# os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n","os.environ[\"OPENAI_API_KEY\"] = userdata.get('OPENAI_API_KEY')\n","\n","# os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")\n","os.environ[\"QDRANT_API_KEY\"] = userdata.get('QDRANT_API_KEY')\n","\n","os.environ[\"QDRANT_URL\"] = userdata.get('QDRANT_URL')\n"]},{"cell_type":"markdown","metadata":{"id":"WZ-fIcQbL7ah"},"source":["# Synthetic data generation using Ragas framework"]},{"cell_type":"markdown","metadata":{"id":"ekj8y4xaL7ah"},"source":["We will generate set of synthetic data for evaluating different opetions\n","1. Evaluating Embedding model\n","2. Evaluating Chunking Strategies"]},{"cell_type":"code","execution_count":4,"metadata":{"executionInfo":{"elapsed":4764,"status":"ok","timestamp":1727203970815,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"axerEGy0L7ai"},"outputs":[],"source":["from langchain_experimental.text_splitter import SemanticChunker\n","\n","from enum import Enum\n","from typing import List\n","from langchain_community.document_loaders import PyMuPDFLoader\n","from langchain_core.documents import Document\n","import asyncio\n","\n","class PDFLoaderWrapper():\n"," class LoaderType(str, Enum):\n"," PYMUPDF = \"pymupdf\"\n","\n"," def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n"," self.file_path = file_path if isinstance(file_path, list) else [file_path]\n"," self.loader_type = loader_type\n","\n"," async def aload(self) -> List[Document]:\n"," all_docs = []\n"," for file_path in self.file_path:\n"," if self.loader_type == self.LoaderType.PYMUPDF:\n"," try:\n"," loader = PyMuPDFLoader(file_path)\n"," docs = await loader.aload()\n"," all_docs.extend(docs)\n"," except Exception as e:\n"," print(f\"Error loading file {file_path}: {e}\")\n"," continue\n"," return all_docs\n","\n","BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n","NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n","documents = [\n"," BOR_FILE_PATH,\n"," NIST_FILE_PATH\n","]\n","\n","pdf_loader = PDFLoaderWrapper(\n"," documents, PDFLoaderWrapper.LoaderType.PYMUPDF\n",")\n","documents = await pdf_loader.aload()\n","\n"]},{"cell_type":"code","execution_count":5,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":7829,"status":"ok","timestamp":1727203990782,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"PEemuAR2L7ai","outputId":"a698d5af-7b56-4d9c-cce4-91161e293466"},"outputs":[{"output_type":"stream","name":"stdout","text":["Packages import complete\n","Getting the Embedding model from Huggingface\n","Embedding model loaded\n","Splitting the documents into semantic chunks\n","Creating the document store for ragas and loading LLM models\n","Creating the testset generator\n"]}],"source":["from ragas.testset.generator import TestsetGenerator\n","from ragas.testset.evolutions import simple, reasoning, multi_context\n","from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n","from ragas.testset.docstore import Document, DocumentStore,InMemoryDocumentStore\n","from langchain.text_splitter import RecursiveCharacterTextSplitter\n","from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline\n","from ragas.testset.extractor import KeyphraseExtractor\n","\n","print (\"Packages import complete\")\n","print (\"Getting the Embedding model from Huggingface\")\n","# Using best performing embedding model from hugging face to generate quality dataset.\n","# # Need GPU\n","# model_name = \"Snowflake/snowflake-arctic-embed-l\"\n","# embedding_model = HuggingFaceEmbeddings(model_name=model_name)\n","\n","model = \"text-embedding-3-large\"\n","dimension = 3072\n","embedding_model = OpenAIEmbeddings(model=model,dimensions=dimension)\n","print (\"Embedding model loaded\")\n","\n","print (\"Splitting the documents into semantic chunks\")\n","text_splitter = RecursiveCharacterTextSplitter(\n"," chunk_size = 1024,\n"," chunk_overlap = 100,\n"," length_function = len,\n",")\n","chunked_docs = text_splitter.split_documents(documents)\n","\n","print (\"Creating the document store for ragas and loading LLM models\")\n","generator_llm = ChatOpenAI(model=\"gpt-4o\")\n","critic_llm = ChatOpenAI(model=\"gpt-4o\")\n","\n","# keyphrase_extractor = KeyphraseExtractor(llm=generator_llm)\n","# docstore = InMemoryDocumentStore(splitter=text_splitter,extractor=keyphrase_extractor, embeddings=embedding_model)\n","\n","print (\"Creating the testset generator\")\n","generator = TestsetGenerator.from_langchain( # Default uses TokenTextSplitter\n"," generator_llm=generator_llm,\n"," critic_llm=critic_llm,\n"," embeddings=embedding_model,\n"," #docstore=docstore # Document store uses SemenaticChunker\n",")\n","\n","distributions = {\n"," simple: 0.5,\n"," multi_context: 0.3,\n"," reasoning: 0.2\n","}"]},{"cell_type":"code","execution_count":6,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000,"output_embedded_package_id":"1MpNy1YyUsYoRqZwyhtp2MW69b_jqQzFH","referenced_widgets":["c44511b1233d47c9aa3f294ffe046cd6","48f3a7c91d4e4f5e8b4579bc6ddc8075","8087926c570547bf8c4fb5ccebd014ca","349178bff9c14ac2be638b8eaeaf14be","e28fe04f82e5459a868158c825914c33","e7d41e2b26d742ed9c8e516c574b7abe","99700d061e5e4c219d815e9a0b7d76b7","498c3c835eb44cfaaa0e58bd5ca9fc97","ac003e361a52420e9b368ab3a315d50d","aaa553c5eb044ab9aa26de9f0dd713ef","bc5d9989c9d4441aa62962c35fbe1bd4","e39750bad741402b95dba6e1d67f3302","f778da18131d471f9db08e0904ae4ecf","39dd92ce29084a048959c0d60220339e","d1364508a15246059d6a1982eb391081","e0dc95cc892b4e36941c877e95d11f9e","1f97c93c48434e69832f16c7a514f8c4","a4c574a957fb48d2856d77ad8adb65c9","ae61b02afc5e4d118dcacd3367b3eb41","2a38dc3e468046ed80a9037592610baa","b921d7b9dbc94524bb68a23bdda5d780","274905cf106546078f459319650f2af7"]},"executionInfo":{"elapsed":152165,"status":"ok","timestamp":1727204152630,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"},"user_tz":-60},"id":"wuCZeairL7ai","outputId":"23e49b8a-6cd9-4a9a-8313-35e43403477b"},"outputs":[{"output_type":"display_data","data":{"text/plain":"Output hidden; open in https://colab.research.google.com to view."},"metadata":{}}],"source":["test_size = 50\n","\n","testset = generator.generate_with_langchain_docs(\n"," documents,\n"," test_size,\n"," distributions,\n"," with_debugging_logs=True\n",") # Default RunConfig(max_retries=15, max_wait=90)"]},{"cell_type":"code","execution_count":7,"metadata":{"id":"9kLxv3hzL7ai","colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"status":"ok","timestamp":1727204193820,"user_tz":-60,"elapsed":468,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"aee23829-674b-48cf-91e4-8029ccf6d75d"},"outputs":[{"output_type":"execute_result","data":{"text/plain":[" question \\\n","0 What techniques are suggested to assess and ma... \n","1 What actions are suggested to ensure informati... \n","2 What are the requirements for providing access... \n","3 What issues arose from the system awarding ben... \n","4 How is the federal government working to comba... \n","5 What are the characteristics of trustworthy AI... \n","6 What are the conditions under which individual... \n","7 What is data poisoning and how can it affect G... \n","8 How do opaque decision-making processes in aut... \n","9 Who participated in the OSTP meetings focused ... \n","10 What actions are suggested for explaining and ... \n","11 What provisions are included in the Biometric ... \n","12 How should designers, developers, and deployer... \n","13 How do systems related to the assignment of pe... \n","14 What should be the focus of risk identificatio... \n","15 What procedures should be established and main... \n","16 How do GAI systems contribute to the creation ... \n","17 How can synthetic content detection help manag... \n","18 What organizational practices are in place to ... \n","19 What techniques are suggested to minimize risk... \n","20 Who were some of the participants from the pri... \n","21 What role does the National Institute of Stand... \n","22 What should entities responsible for the devel... \n","23 How has the customer service industry successf... \n","24 What steps should be taken to inform AI stakeh... \n","25 How do the U.S. AI Safety Institute and AI Ris... \n","26 How to balance synthetic vs. non-synthetic dat... \n","27 How to address data privacy and ensure AI inte... \n","28 How can public feedback and incident reporting... \n","29 How could automation bias worsen misinformatio... \n","30 How do consultations and monitoring ensure aut... \n","31 How do EO 13960 and NIST AI RMF ensure AI tran... \n","32 How does surveillance software for monitoring ... \n","33 How can provenance tracking and public feedbac... \n","34 How can designers ensure user understanding an... \n","35 How do equity and disparity assessments help p... \n","36 How do school audio surveillance systems for s... \n","37 How does the tech companion help implement AI ... \n","38 How to mitigate Human-AI risks in evaluations ... \n","39 How to design explanatory mechanisms in high-r... \n","40 How do biometrics affect access and decisions ... \n","41 How do equity assessments and disparity tests ... \n","42 How does synthetic data proportion help preven... \n","43 How do hiring algorithms and social media data... \n","44 How can unproven tech and data misuse cause harm? \n","45 Why compare system performance with human meth... \n","46 How do confident errors mislead users? \n","47 How do data biases impact loans, hiring, and e... \n","48 How to design systems to avoid algorithmic bia... \n","\n"," contexts \\\n","0 [ \\n30 \\nMEASURE 2.2: Evaluations involving hu... \n","1 [ \\n28 \\nMAP 5.2: Practices and personnel for ... \n","2 [ \\n \\n \\n \\n \\n \\nHUMAN ALTERNATIVES, \\nCONSI... \n","3 [ \\n \\n \\n \\n \\nNOTICE & \\nEXPLANATION \\nWHY ... \n","4 [ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n... \n","5 [ \\n14 \\nGOVERN 1.2: The characteristics of tr... \n","6 [ \\nSECTION TITLE\\nHUMAN ALTERNATIVES, CONSIDE... \n","7 [ \\n11 \\nvalue chain (e.g., data inputs, proce... \n","8 [ \\n \\n \\n \\n \\nNOTICE & \\nEXPLANATION \\nWHY ... \n","9 [APPENDIX\\nā€¢ OSTP conducted meetings with a va... \n","10 [ \\n35 \\nMEASURE 2.9: The AI model is explaine... \n","11 [ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n... \n","12 [ \\n \\n \\n \\n \\nSECTION TITLE\\nDATA PRIVACY\\nY... \n","13 [APPENDIX\\nSystems that impact the safety of c... \n","14 [ \\n \\n \\n \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYS... \n","15 [ \\n42 \\nMG-2.4-002 \\nEstablish and maintain p... \n","16 [ \\n10 \\nGAI systems can ease the unintentiona... \n","17 [ \\n51 \\ngeneral public participants. For exam... \n","18 [ \\n19 \\nGV-4.1-003 \\nEstablish policies, proc... \n","19 [ \\n30 \\nMEASURE 2.2: Evaluations involving hu... \n","20 [APPENDIX\\nā€¢ OSTP conducted meetings with a va... \n","21 [ \\n \\n \\nAbout AI at NIST: The National Insti... \n","22 [ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nSAFE AND ... \n","23 [ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n... \n","24 [ \\n41 \\nMG-2.2-006 \\nUse feedback from intern... \n","25 [ \\n \\n \\nAbout AI at NIST: The National Insti... \n","26 [ \\n37 \\nMS-2.11-005 \\nAssess the proportion o... \n","27 [ \\n30 \\nMEASURE 2.2: Evaluations involving hu... \n","28 [ \\n53 \\nDocumenting, reporting, and sharing i... \n","29 [ \\n9 \\nand reduced content diversity). Overly... \n","30 [ \\n \\n \\n \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYS... \n","31 [ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nSAF... \n","32 [ \\n \\n \\n \\nDATA PRIVACY \\nWHY THIS PRINCIPL... \n","33 [ \\n51 \\ngeneral public participants. For exam... \n","34 [ \\n \\n \\n \\n \\nSECTION TITLE\\nDATA PRIVACY\\nY... \n","35 [ \\n \\n \\n \\n \\n \\n \\nWHAT SHOULD BE EXPECTED ... \n","36 [ \\n \\n \\n \\n \\n \\n \\nDATA PRIVACY \\nEXTRA PRO... \n","37 [ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\... \n","38 [ \\n30 \\nMEASURE 2.2: Evaluations involving hu... \n","39 [ \\n \\n \\n \\n \\n \\nNOTICE & \\nEXPLANATION \\nWH... \n","40 [APPENDIX\\nSystems that impact the safety of c... \n","41 [ Ā­Ā­Ā­Ā­Ā­Ā­Ā­\\nALGORITHMIC DISCRIMINATION Protecti... \n","42 [ \\n37 \\nMS-2.11-005 \\nAssess the proportion o... \n","43 [ \\nSECTION TITLEĀ­\\nFOREWORD\\nAmong the great ... \n","44 [ \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYSTEMS \\nW... \n","45 [ \\n \\n \\n \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYS... \n","46 [ \\n4 \\n1. CBRN Information or Capabilities: E... \n","47 [ \\n \\n \\n \\n \\n \\n \\n \\nAlgorithmic \\nDiscri... \n","48 [ AI BILL OF RIGHTS\\nFFECTIVE SYSTEMS\\nineffec... \n","\n"," ground_truth evolution_type \\\n","0 Techniques such as re-sampling, re-weighting, ... simple \n","1 The suggested actions to ensure information in... simple \n","2 Those impacted by an automated system should b... simple \n","3 Individuals were denied benefits due to data e... simple \n","4 The federal government is working to combat di... simple \n","5 The characteristics of trustworthy AI that sho... simple \n","6 Individuals should be able to opt out from aut... simple \n","7 Data poisoning is a cybersecurity risk where a... simple \n","8 Opaque decision-making processes in automated ... simple \n","9 Participants in the OSTP meetings included Ado... simple \n","10 Suggested actions for explaining and validatin... simple \n","11 The Biometric Information Privacy Act enacted ... simple \n","12 Designers, developers, and deployers of automa... simple \n","13 Systems related to the assignment of penalties... simple \n","14 Identified risks should focus on the potential... simple \n","15 Establish and maintain procedures for escalati... simple \n","16 GAI systems contribute to the creation of deep... simple \n","17 Synthetic content detection can help manage an... simple \n","18 Organizational practices are in place to enabl... simple \n","19 Techniques such as anonymization, differential... simple \n","20 Participants in the OSTP meetings from the pri... simple \n","21 The National Institute of Standards and Techno... simple \n","22 Entities responsible for the development or us... simple \n","23 The customer service industry has successfully... simple \n","24 Establish and maintain communication plans to ... simple \n","25 The U.S. AI Safety Institute and the AI Risk M... multi_context \n","26 To balance synthetic vs. non-synthetic data an... multi_context \n","27 To address data privacy and ensure AI integrit... multi_context \n","28 Public feedback and incident reporting can imp... multi_context \n","29 Automation bias can exacerbate other risks of ... multi_context \n","30 Consultations ensure automated systems' safety... multi_context \n","31 Executive Order 13960 requires that AI used by... multi_context \n","32 Companies use surveillance software to track e... multi_context \n","33 Provenance tracking and public feedback can im... multi_context \n","34 Designers can ensure user understanding and pr... multi_context \n","35 Equity and disparity assessments help prevent ... multi_context \n","36 School audio surveillance systems monitor stud... multi_context \n","37 The technical companion provides examples and ... multi_context \n","38 To mitigate Human-AI risks in evaluations and ... multi_context \n","39 In high-risk automated systems, explanatory me... multi_context \n","40 The answer to given question is not present in... reasoning \n","41 Equity assessments and disparity tests reduce ... reasoning \n","42 Assessing the proportion of synthetic to non-s... reasoning \n","43 Algorithms used in hiring and credit decisions... reasoning \n","44 Unproven technology and data misuse can cause ... reasoning \n","45 System performance should be compared with the... reasoning \n","46 Confident errors, also known as confabulations... reasoning \n","47 Data biases impact loans by causing applicants... reasoning \n","48 To design systems to avoid algorithmic bias an... reasoning \n","\n"," metadata episode_done \n","0 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","1 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","2 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","3 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","4 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","5 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","6 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","7 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","8 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","9 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","10 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","11 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","12 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","13 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","14 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","15 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","16 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","17 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","18 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","19 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","20 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","21 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","22 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","23 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","24 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","25 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","26 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","27 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","28 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","29 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","30 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","31 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","32 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","33 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","34 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","35 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","36 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","37 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","38 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","39 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","40 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","41 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","42 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","43 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","44 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","45 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","46 [{'source': 'https://nvlpubs.nist.gov/nistpubs... True \n","47 [{'source': 'https://www.whitehouse.gov/wp-con... True \n","48 [{'source': 'https://www.whitehouse.gov/wp-con... True "],"text/html":["\n","
\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsground_truthevolution_typemetadataepisode_done
0What techniques are suggested to assess and ma...[ \\n30 \\nMEASURE 2.2: Evaluations involving hu...Techniques such as re-sampling, re-weighting, ...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
1What actions are suggested to ensure informati...[ \\n28 \\nMAP 5.2: Practices and personnel for ...The suggested actions to ensure information in...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
2What are the requirements for providing access...[ \\n \\n \\n \\n \\n \\nHUMAN ALTERNATIVES, \\nCONSI...Those impacted by an automated system should b...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
3What issues arose from the system awarding ben...[ \\n \\n \\n \\n \\nNOTICE & \\nEXPLANATION \\nWHY ...Individuals were denied benefits due to data e...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
4How is the federal government working to comba...[ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n...The federal government is working to combat di...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
5What are the characteristics of trustworthy AI...[ \\n14 \\nGOVERN 1.2: The characteristics of tr...The characteristics of trustworthy AI that sho...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
6What are the conditions under which individual...[ \\nSECTION TITLE\\nHUMAN ALTERNATIVES, CONSIDE...Individuals should be able to opt out from aut...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
7What is data poisoning and how can it affect G...[ \\n11 \\nvalue chain (e.g., data inputs, proce...Data poisoning is a cybersecurity risk where a...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
8How do opaque decision-making processes in aut...[ \\n \\n \\n \\n \\nNOTICE & \\nEXPLANATION \\nWHY ...Opaque decision-making processes in automated ...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
9Who participated in the OSTP meetings focused ...[APPENDIX\\nā€¢ OSTP conducted meetings with a va...Participants in the OSTP meetings included Ado...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
10What actions are suggested for explaining and ...[ \\n35 \\nMEASURE 2.9: The AI model is explaine...Suggested actions for explaining and validatin...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
11What provisions are included in the Biometric ...[ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n...The Biometric Information Privacy Act enacted ...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
12How should designers, developers, and deployer...[ \\n \\n \\n \\n \\nSECTION TITLE\\nDATA PRIVACY\\nY...Designers, developers, and deployers of automa...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
13How do systems related to the assignment of pe...[APPENDIX\\nSystems that impact the safety of c...Systems related to the assignment of penalties...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
14What should be the focus of risk identificatio...[ \\n \\n \\n \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYS...Identified risks should focus on the potential...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
15What procedures should be established and main...[ \\n42 \\nMG-2.4-002 \\nEstablish and maintain p...Establish and maintain procedures for escalati...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
16How do GAI systems contribute to the creation ...[ \\n10 \\nGAI systems can ease the unintentiona...GAI systems contribute to the creation of deep...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
17How can synthetic content detection help manag...[ \\n51 \\ngeneral public participants. For exam...Synthetic content detection can help manage an...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
18What organizational practices are in place to ...[ \\n19 \\nGV-4.1-003 \\nEstablish policies, proc...Organizational practices are in place to enabl...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
19What techniques are suggested to minimize risk...[ \\n30 \\nMEASURE 2.2: Evaluations involving hu...Techniques such as anonymization, differential...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
20Who were some of the participants from the pri...[APPENDIX\\nā€¢ OSTP conducted meetings with a va...Participants in the OSTP meetings from the pri...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
21What role does the National Institute of Stand...[ \\n \\n \\nAbout AI at NIST: The National Insti...The National Institute of Standards and Techno...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
22What should entities responsible for the devel...[ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nSAFE AND ...Entities responsible for the development or us...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
23How has the customer service industry successf...[ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n...The customer service industry has successfully...simple[{'source': 'https://www.whitehouse.gov/wp-con...True
24What steps should be taken to inform AI stakeh...[ \\n41 \\nMG-2.2-006 \\nUse feedback from intern...Establish and maintain communication plans to ...simple[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
25How do the U.S. AI Safety Institute and AI Ris...[ \\n \\n \\nAbout AI at NIST: The National Insti...The U.S. AI Safety Institute and the AI Risk M...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
26How to balance synthetic vs. non-synthetic dat...[ \\n37 \\nMS-2.11-005 \\nAssess the proportion o...To balance synthetic vs. non-synthetic data an...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
27How to address data privacy and ensure AI inte...[ \\n30 \\nMEASURE 2.2: Evaluations involving hu...To address data privacy and ensure AI integrit...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
28How can public feedback and incident reporting...[ \\n53 \\nDocumenting, reporting, and sharing i...Public feedback and incident reporting can imp...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
29How could automation bias worsen misinformatio...[ \\n9 \\nand reduced content diversity). Overly...Automation bias can exacerbate other risks of ...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
30How do consultations and monitoring ensure aut...[ \\n \\n \\n \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYS...Consultations ensure automated systems' safety...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
31How do EO 13960 and NIST AI RMF ensure AI tran...[ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\nSAF...Executive Order 13960 requires that AI used by...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
32How does surveillance software for monitoring ...[ \\n \\n \\n \\nDATA PRIVACY \\nWHY THIS PRINCIPL...Companies use surveillance software to track e...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
33How can provenance tracking and public feedbac...[ \\n51 \\ngeneral public participants. For exam...Provenance tracking and public feedback can im...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
34How can designers ensure user understanding an...[ \\n \\n \\n \\n \\nSECTION TITLE\\nDATA PRIVACY\\nY...Designers can ensure user understanding and pr...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
35How do equity and disparity assessments help p...[ \\n \\n \\n \\n \\n \\n \\nWHAT SHOULD BE EXPECTED ...Equity and disparity assessments help prevent ...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
36How do school audio surveillance systems for s...[ \\n \\n \\n \\n \\n \\n \\nDATA PRIVACY \\nEXTRA PRO...School audio surveillance systems monitor stud...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
37How does the tech companion help implement AI ...[ \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\...The technical companion provides examples and ...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
38How to mitigate Human-AI risks in evaluations ...[ \\n30 \\nMEASURE 2.2: Evaluations involving hu...To mitigate Human-AI risks in evaluations and ...multi_context[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
39How to design explanatory mechanisms in high-r...[ \\n \\n \\n \\n \\n \\nNOTICE & \\nEXPLANATION \\nWH...In high-risk automated systems, explanatory me...multi_context[{'source': 'https://www.whitehouse.gov/wp-con...True
40How do biometrics affect access and decisions ...[APPENDIX\\nSystems that impact the safety of c...The answer to given question is not present in...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
41How do equity assessments and disparity tests ...[ Ā­Ā­Ā­Ā­Ā­Ā­Ā­\\nALGORITHMIC DISCRIMINATION Protecti...Equity assessments and disparity tests reduce ...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
42How does synthetic data proportion help preven...[ \\n37 \\nMS-2.11-005 \\nAssess the proportion o...Assessing the proportion of synthetic to non-s...reasoning[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
43How do hiring algorithms and social media data...[ \\nSECTION TITLEĀ­\\nFOREWORD\\nAmong the great ...Algorithms used in hiring and credit decisions...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
44How can unproven tech and data misuse cause harm?[ \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYSTEMS \\nW...Unproven technology and data misuse can cause ...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
45Why compare system performance with human meth...[ \\n \\n \\n \\n \\n \\n \\nSAFE AND EFFECTIVE \\nSYS...System performance should be compared with the...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
46How do confident errors mislead users?[ \\n4 \\n1. CBRN Information or Capabilities: E...Confident errors, also known as confabulations...reasoning[{'source': 'https://nvlpubs.nist.gov/nistpubs...True
47How do data biases impact loans, hiring, and e...[ \\n \\n \\n \\n \\n \\n \\n \\nAlgorithmic \\nDiscri...Data biases impact loans by causing applicants...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
48How to design systems to avoid algorithmic bia...[ AI BILL OF RIGHTS\\nFFECTIVE SYSTEMS\\nineffec...To design systems to avoid algorithmic bias an...reasoning[{'source': 'https://www.whitehouse.gov/wp-con...True
\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"," \n"," \n","
\n","\n","
\n","
\n"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"dataframe","variable_name":"testset_df","summary":"{\n \"name\": \"testset_df\",\n \"rows\": 49,\n \"fields\": [\n {\n \"column\": \"question\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 49,\n \"samples\": [\n \"How do systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties?\",\n \"Why compare system performance with human methods after extensive tests?\",\n \"How do data biases impact loans, hiring, and education?\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"contexts\",\n \"properties\": {\n \"dtype\": \"object\",\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"ground_truth\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 49,\n \"samples\": [\n \"Systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information or matching records, and by assisting in the adjudication process.\",\n \"System performance should be compared with the in-place, potentially human-driven, status quo procedures, with existing human performance considered as a performance baseline for the algorithm to meet pre-deployment, and as a lifecycle minimum performance standard.\",\n \"Data biases impact loans by causing applicants who attended Historically Black Colleges or Universities (HBCUs) to be charged higher loan prices for refinancing student loans compared to those who did not attend HBCUs, even when controlling for other credit-related factors. In hiring, a tool that learned features from a company's predominantly male employees rejected women applicants for discriminatory reasons, penalizing resumes with the word 'women\\u2019s.' In education, a predictive model used by universities to predict student dropout rates used race directly as a predictor, resulting in Black students being deemed at higher risk of dropping out compared to their white peers, which could guide them away from certain majors.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"evolution_type\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n \"simple\",\n \"multi_context\",\n \"reasoning\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"metadata\",\n \"properties\": {\n \"dtype\": \"object\",\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"episode_done\",\n \"properties\": {\n \"dtype\": \"boolean\",\n \"num_unique_values\": 1,\n \"samples\": [\n true\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"}},"metadata":{},"execution_count":7}],"source":["testset_df = testset.to_pandas()\n","testset_df"]},{"cell_type":"code","execution_count":8,"metadata":{"id":"hgtowo7NL7ai","executionInfo":{"status":"ok","timestamp":1727204204418,"user_tz":-60,"elapsed":631,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["testset_df.to_csv('task5-ai-safety-sdg3.csv', index=False)\n","test_questions = testset_df[\"question\"].values.tolist()\n","test_groundtruths = testset_df[\"ground_truth\"].values.tolist()"]},{"cell_type":"markdown","metadata":{"id":"0r7MJagKL7aj"},"source":["## Create Rag chain to generate answers for above questions in the dataset"]},{"cell_type":"markdown","metadata":{"id":"_RvccDwEL7aj"},"source":["> Note that we are usig Qdrant cloud where the pdf document is processed and saved for us to consume. For the RAG pipeline we use the same embedding model originally used to populate the Qdrant vectorstore."]},{"cell_type":"code","source":["len(chunked_docs)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"qB-GRnyjTt9Z","executionInfo":{"status":"ok","timestamp":1727204213039,"user_tz":-60,"elapsed":2,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"68a69e5c-bc04-4bce-d548-5ab8618ac27d"},"execution_count":9,"outputs":[{"output_type":"execute_result","data":{"text/plain":["466"]},"metadata":{},"execution_count":9}]},{"cell_type":"code","execution_count":10,"metadata":{"id":"54pN9_FdL7aj","executionInfo":{"status":"ok","timestamp":1727204219211,"user_tz":-60,"elapsed":2343,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["from langchain_qdrant import QdrantVectorStore\n","from langchain_core.documents import Document\n","from qdrant_client import QdrantClient\n","from qdrant_client.http.models import Distance, VectorParams\n","\n","chunked_docs\n","model = \"text-embedding-3-large\"\n","dimension = 3072\n","embeddings = OpenAIEmbeddings(model=model,dimensions=dimension)\n","\n","qdrant_client = QdrantClient(location=\":memory:\")\n","qdrant_client.create_collection(\n"," collection_name='sdg-answers',\n"," vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n",")\n","\n","vector_store = QdrantVectorStore(\n"," client=qdrant_client,\n"," collection_name='sdg-answers',\n"," embedding=embeddings,\n",")\n","\n","retriever = vector_store.as_retriever(search_kwargs={'k':10})"]},{"cell_type":"code","execution_count":11,"metadata":{"id":"i-NT6G6cL7aj","executionInfo":{"status":"ok","timestamp":1727204226969,"user_tz":-60,"elapsed":486,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["from langchain.prompts import ChatPromptTemplate\n","\n","RAG_PROMPT = \"\"\"\\\n","Given a provided context and question, you must answer the question based only on context.\n","\n","If you cannot answer the question based on the context - you must say \"I don't know\".\n","\n","Context: {context}\n","Question: {question}\n","\"\"\"\n","\n","rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)"]},{"cell_type":"code","execution_count":12,"metadata":{"id":"tCwt2pjLL7aj","executionInfo":{"status":"ok","timestamp":1727204230719,"user_tz":-60,"elapsed":475,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["from langchain_openai import ChatOpenAI\n","\n","# Using the same model used in the app.\n","chat_model_name = \"gpt-4o\"\n","llm = ChatOpenAI(model=chat_model_name)"]},{"cell_type":"code","execution_count":13,"metadata":{"id":"wwsGcWogL7aj","executionInfo":{"status":"ok","timestamp":1727204234204,"user_tz":-60,"elapsed":617,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["from operator import itemgetter\n","from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n","from langchain.schema import StrOutputParser\n","\n","rag_chain_generate_anwsers = (\n"," {\"context\": itemgetter(\"question\") | retriever, \"question\": itemgetter(\"question\")}\n"," | rag_prompt | llm | StrOutputParser()\n",")"]},{"cell_type":"code","execution_count":14,"metadata":{"id":"DvTHB2OuL7aj","colab":{"base_uri":"https://localhost:8080/","height":35},"executionInfo":{"status":"ok","timestamp":1727204241469,"user_tz":-60,"elapsed":3901,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"508bc18c-1fef-481d-d3ab-ac4bbc6a5fa9"},"outputs":[{"output_type":"execute_result","data":{"text/plain":["\"I don't know.\""],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":14}],"source":["rag_chain_generate_anwsers.invoke({\"question\" : \"What procedures should be established and maintained for escalating GAI system incidents to the organizational risk management authority?\"})"]},{"cell_type":"markdown","metadata":{"id":"XLbVBQVmL7aj"},"source":["# Create Rag Chain with config"]},{"cell_type":"markdown","metadata":{"id":"AOYrCWkWL7aj"},"source":["We are going to replicate the exact implementation used in the hosted RAG app but with different configuration to evaluate and compare."]},{"cell_type":"code","execution_count":15,"metadata":{"id":"GlqLvdfgL7aj","executionInfo":{"status":"ok","timestamp":1727204252175,"user_tz":-60,"elapsed":3838,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["# Utility function to create a rag chain with config\n","from langchain_experimental.text_splitter import SemanticChunker\n","from enum import Enum\n","from typing import List\n","from langchain_community.document_loaders import PyMuPDFLoader\n","from langchain_core.documents import Document\n","import asyncio\n","from langchain_qdrant import QdrantVectorStore\n","from langchain_core.documents import Document\n","from qdrant_client import QdrantClient\n","from qdrant_client.http.models import Distance, VectorParams\n","from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n","from langchain.retrievers.document_compressors import LLMChainExtractor\n","from langchain_openai import ChatOpenAI\n","from langchain.prompts import ChatPromptTemplate\n","from operator import itemgetter\n","from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n","from langchain.schema import StrOutputParser\n","from langchain.chains.combine_documents import create_stuff_documents_chain\n","from langchain.prompts import MessagesPlaceholder\n","from langchain.prompts import ChatPromptTemplate\n","from langchain.chains.history_aware_retriever import create_history_aware_retriever\n","from langchain.chains.retrieval import create_retrieval_chain\n","from langchain_core.runnables.history import RunnableWithMessageHistory\n","from langchain_core.chat_history import BaseChatMessageHistory\n","from langchain_community.chat_message_histories import ChatMessageHistory\n","\n","BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n","NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n","documents_to_preload = [\n"," BOR_FILE_PATH,\n"," NIST_FILE_PATH\n","]\n","store = {}\n","\n","class PDFLoaderWrapper():\n"," class LoaderType(str, Enum):\n"," PYMUPDF = \"pymupdf\"\n","\n"," def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n"," self.file_path = file_path if isinstance(file_path, list) else [file_path]\n"," self.loader_type = loader_type\n","\n"," async def aload(self) -> List[Document]:\n"," all_docs = []\n"," for file_path in self.file_path:\n"," if self.loader_type == self.LoaderType.PYMUPDF:\n"," try:\n"," loader = PyMuPDFLoader(file_path)\n"," docs = await loader.aload()\n"," all_docs.extend(docs)\n"," except Exception as e:\n"," print(f\"Error loading file {file_path}: {e}\")\n"," continue\n"," return all_docs\n","\n","async def get_contextual_compressed_retriever(retriver):\n","\n"," compressor_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\", max_tokens=1500)\n"," compressor = LLMChainExtractor.from_llm(compressor_llm)\n","\n"," #Combine the retriever with the compressor\n"," compression_retriever = ContextualCompressionRetriever(\n"," base_compressor=compressor,\n"," base_retriever=retriver\n"," )\n"," return compression_retriever\n","\n","def create_history_aware_retriever_self(chat_model, retriever):\n"," contextualize_q_system_prompt = (\n"," \"Given a chat history and the latest user question which might reference context in the chat history, \"\n"," \"formulate a standalone question which can be understood without the chat history. Do NOT answer the question, \"\n"," \"just reformulate it if needed and otherwise return it as is.\"\n"," )\n"," contextualize_q_prompt = ChatPromptTemplate.from_messages(\n"," [\n"," (\"system\", contextualize_q_system_prompt),\n"," MessagesPlaceholder(\"chat_history\"),\n"," (\"human\", \"{input}\"),\n"," ]\n"," )\n"," return create_history_aware_retriever(chat_model, retriever, contextualize_q_prompt)\n","\n","def create_qa_chain(chat_model):\n"," qa_system_prompt = (\n"," \"You are an helpful assistant named 'Shield' and your task is to answer any questions related to AI Safety for the given context.\"\n"," \"Use the following pieces of retrieved context to answer the question.\"\n"," # \"If any questions asked outside AI Safety context, just say that you are a specialist in AI Safety and can't answer that.\"\n"," # f\"When introducing you, just say that you are an AI assistant powered by embedding model {embedding_model_name} and chat model {chat_model_name} and your knowledge is limited to 'Blueprint for an AI Bill of Rights' and 'NIST AI Standards' documents.\"\n"," \"If you don't know the answer, just say that you don't know.\\n\\n\"\n"," \"{context}\"\n"," )\n"," qa_prompt = ChatPromptTemplate.from_messages(\n"," [\n"," (\"system\", qa_system_prompt),\n"," MessagesPlaceholder(\"chat_history\"),\n"," (\"human\", \"{input}\"),\n"," ]\n"," )\n"," return create_stuff_documents_chain(chat_model, qa_prompt)\n","\n","def create_conversational_rag_chain(chat_model, retriever):\n"," history_aware_retriever = create_history_aware_retriever_self(chat_model, retriever)\n"," question_answer_chain = create_qa_chain(chat_model)\n"," return create_retrieval_chain(history_aware_retriever, question_answer_chain)\n","\n","def get_session_history(session_id: str) -> BaseChatMessageHistory:\n"," if session_id not in store:\n"," store[session_id] = ChatMessageHistory()\n"," return store[session_id]\n","\n","\n","pdf_loader = PDFLoaderWrapper(\n"," documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n",")\n","documents = await pdf_loader.aload()\n","\n","async def get_contextual_compressed_retriever(retriver):\n","\n"," base_retriever = retriver\n"," compressor_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\", max_tokens=4000)\n"," compressor = LLMChainExtractor.from_llm(compressor_llm)\n","\n"," #Combine the retriever with the compressor\n"," compression_retriever = ContextualCompressionRetriever(\n"," base_compressor=compressor,\n"," base_retriever=base_retriever\n"," )\n"," return compression_retriever\n","\n","async def create_rag_chain(huggingface_embedding,text_splitter,collection_name,):\n","\n"," chunked_docs = text_splitter.split_documents(documents)\n"," dimension = 1024\n"," qdrant_client = QdrantClient(location=\":memory:\")\n","\n"," # Below fails if collection already exists so make sure to delete the collection first\n"," qdrant_client.create_collection(\n"," collection_name=collection_name,\n"," vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n"," )\n","\n"," vector_store = QdrantVectorStore(\n"," client=qdrant_client,\n"," collection_name=collection_name,\n"," embedding=huggingface_embedding,\n"," )\n"," vector_store.add_documents(chunked_docs)\n","\n"," retriever = vector_store.as_retriever(search_kwargs={'k':10})\n","\n"," # Using the same model used in the app.\n"," chat_model_name = \"gpt-4o\"\n"," llm = ChatOpenAI(model=chat_model_name,temperature=0)\n"," RAG_PROMPT = \"\"\"\\\n"," Given a provided context and question, you must answer the question based only on context.\n","\n"," If you cannot answer the question based on the context - you must say \"I don't know\".\n","\n"," Context: {context}\n"," Question: {question}\n"," \"\"\"\n","\n"," rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)\n"," ai_safety_rag_chain = (\n"," {\"context\": itemgetter(\"question\") | retriever, \"question\": itemgetter(\"question\")}\n"," | RunnablePassthrough.assign(context=itemgetter(\"context\"))\n"," | {\"response\": rag_prompt | llm | StrOutputParser(), \"context\": itemgetter(\"context\")}\n"," )\n"," return ai_safety_rag_chain, retriever\n","\n"]},{"cell_type":"markdown","metadata":{"id":"6qjrgNA0L7ak"},"source":["# RAGAS Evaluation for Embedding Model"]},{"cell_type":"code","execution_count":16,"metadata":{"id":"ek4PlaOrL7ak","colab":{"base_uri":"https://localhost:8080/","height":721,"referenced_widgets":["acc378e22b9c41c7aac97fbdafa6c5f8","91f5aad3c506446098481ed123583952","f5f7ca3ae4334ba6845aaebb6e6ef4f2","10f8cbddf1e2466cbb4e394634e0b0d2","426bd8f8107445ba8e1b086a8570100e","40d14c028ab246e8b399ca86b6642433","af22d37b9ce94ff2a187ad8c0077b5e4","bb2e260555b7452baa929dc2ec09d1f2","1d7675c29cd542af8ec5725d7eba916c","ec15e5265a5d475c86ca0fc9a518351c","18239c9c3c7e4e97a3d4f2f8a8441fe8","622615a518eb452ba8fdb6758f432b35","5ad78bc865d94857bf24b083eac5e46c","a01452341af741e5ba9ac7b55d21a78c","068fbf6e595d4332961d1f910054b215","5da5381f972e47c495e283869c0e4bc3","677f839fc04a448bb76cf3abf52ea84b","e9ee57bd83c84a828f8c881f1f3ac8c3","4b7f36d001db46c0bbf565002234ab18","3fb6a4d04fcd4c399b0790bc4eb5712c","0a7dcf6447af4aa783db37b48c84b688","f15e6c12ed1a4478a8f1f97970671c65","cf284052a9b94efda4812274812ee059","8473636ca7d141209699dced51d1c981","76fa9b2bccce477cb698af80e4119aa8","91a2f1a67ab84dbbabee2ce0960d3fc6","f26618a6c2ea470b91e9cfd1fec0c01a","b7fe3cc89b2745b6a0dcee1b0cb23fa7","5ed8c96b094e4aa58397d5e030369361","ac643757d51b4611b46e4536ca900589","d65b5b5430e64f08839842e22b10b4bb","a0ecc063893d4505b33e57d524be5af3","6c67aeca324f4e508258a90625987463","f2217e3c09a146a6a62240003634ea53","af3271f8fc124662a7bd606b837d10fe","8683275138a24b5898928139657d7637","2d927689334a4e4eaa80a493ae21ceaa","d573d26f7daa4ef78a5ee5305745a751","16d2867d3c494b45b0fcf21f9908faee","77f9bc5aa4aa47528fe74cee979f5924","60feadb9a1df4f719b8996d4d1fc5d84","09615354f8c04e8c99af3e74a107b0c5","8da9ce52896a403d96ced1d8b68f62b9","aae33dec48324052ad5c2dfbd819eb59","d282c878a3744bf1af2108bf156fa39d","e36e1181ba1f40f792c3784e361ee0b2","1255b07dc7824afcae083741dbfe518a","462a2e70d3ec4b1187c55fbeb343d63b","01ba6d708ebd4ad2bfd914b4d41d36cd","58c9073769ed406b881f143bc39ab9f9","3e5e6086fe324208a9fc1ad99b66b873","dbd49d199d444879801506c4b36ad8b7","d70432c444d141f8a59481a9e689bae8","8f1157e0f6bb47d4afda1d6a2ecb7260","f577c00780664959beba245bd0cb5be5","23907f6347ff467fa190e55d67c7be86","65965fe3dd844bccbfe7a3ca80b7671c","eba60bb33c144bf5adcbbe837670b2a2","9304f1069160432d8db0423e3ac53e47","02b7a7cdd9c24d78bd14e30aa46180ba","5fc01868d62140ff81137ab03b62babd","eebeadafb2b14b3b866a9de4b7bf8787","1c9b2a3a1ff6489f8ba430d3b55980ab","80c650923aa04781ae230ca1cf2c734f","b035a788ccda4cccb4876a94d69c7196","68c3d2518f7a478b99055d25a5e590d6","138fc40abc4f4d56a71124d6b0edbc66","cb4523a6c5354a37b73e2378265ba421","d61629e8381e46648604ebf3617cc738","1541ba898c504ea9a31803715657757d","7fac3018a8ea46aabbeaa7638cf0955b","072dcba6721048c0b5f9b3118a2b60bc","cd339b2be7134142bcb2e171f1b674fa","54beb492d35547c8b982846c30b83ccc","4d24e296a067445c81f1d8822090d94e","dfa0976ca21c441faa95c120df38a372","9e42f65225e64de08a5814f3e6e0c08a","a8a539cd3d064bf0b872bd661bd04ea7","cfe981a2a1a54ded9b678e85fe46ac18","9ce6f4aacdfe451f9b41ff79b6ca99f8","4a846afcfdd84c9db79d7e974376bcf8","5711b30ecf6d4b4e9e1f4bcf758ad599","34a02f0aecd9493b9d84d5b6856d68a3","099db16d5ba44a79962227a71f39615a","3a69155072db491a804f1ae492faf2fb","7770223b77ae4877a3a731fe0c5839c3","ef60881d1aa14b8fb568d02684501344","4bd41ff686884594b2957a1d9f967a04","4226ecb991fe4142b7e66e29dd72302b","d3129da0803f407aaffee2458c6c3986","23b5503757d34aa2a65f7c7ceb6f45d2","bc0c58360bab4c389a582181555a374d","2185240d63f34b928002fd78e4efea64","40505f46820948efb2f8bde65dc068fc","13c244d566494863a6a0854f8ffe0616","a26de5f36a884773902dff24b0675ce3","695bd216987c404b80605d4cfad86136","186bebdb4f504be7a07f6cbfb1eb50ad","34087c9db2534ff1b0ca1e20dd9bcb8e","ffe5a0b43f3840a6b20348d35b60dd56","1e74c5f0193a4e488c0e168299c7b566","374aa0f623334dac8267aa4a89e54dac","0b9cb2ad73e3455cacec0bf089316328","805ce4d268aa4c82acc08c2c6c33addf","71a8bcf1281b4936b575e40d725a3a39","0046ba366a0d4850927a74a5ec1f813e","33769cc3cc024647a3b5c6f7c9a3ed2a","007221a90c0440f385117f5a76f21fc6","28988e76f4024aaaa680944513eea9d1","dc15c9f9a8bd4630820285f22b33c483","a737433e6b4a4e35bf9eb70cd1052124","9953739863c94a71a14aaff9213b57ed","2c50ded119dc4707adfee868d167084c","67a4a60f751646b6ab4b6247ded3f9e3","1d5756bf10ed4dffba350b3685662cf9","5fdb555a5e804b73b57cd284c0c58a8b","baaf29de2d51467c9026e5b063dcb491","320b9bcafd084ef79b95610851768769","c2fc5e9ca17547acbf113751c5f14a43","bd0d0e3384724a5e8e624bb096b89221","bd9991542ffb49d5bed76e31f99617f0","8eca00b2b4d64ad78dbaba001f843eef","a5cb78a70b344104930f388d1265d82a","43fe25dab4dd4639b862e80c96e617c9","c6852309e4d94231a95bf65d9458d44c","712cdd608a094e5f9ca67c3f26630638","2a3dfa3ed5e047b1862d92de2edb9e0a","aca246802e5a43a8bd69fc9ebab2c2d5","4150ffdda851437aa4628c319df29b20","01e3d2f5cf42437699f69141c55c6bf5","aa53805767f5476d9df83a3618d70416","8f666ad87d634b42838e6eef1497b567","d473b2536bc742d28400c18d5773c945","7185bb94734e439c97a70c03f655a119","9eba6f0fad0e49f8aae4128a3dabada4","d9e4811fc42f494cac21ea50f4957584","174e72fa89e3479bb392a273903e7fc1","c1fc1b64a2b84d2ca6b4f8c085a16efc","c80b0a19a4064ef386286f6bf585d6c2","76fa88923f0c4fd6ada62725551671b6","cf000bc9c858433bbb5ac76b8b0adcef","42d5d2159983482e84dcea002ea675c3","382f432c43fb4af6a02e693c10ba763d","528b1b3718a54125bd12a7a0a10308f1","a18a814a6ae346728bba4595ef97b0ab","8d6b8a880952414694dfafb2e76d79d0","7db32f9323f74fd4aa32394b5b576d0c","8df20ffe0edf4941846d52cfda777eab","1b25cc1b5d06407282d25b25e951f7ab","cc4a2c5f2b2348e1964683756fea8c20","9ef4f0b8655e40d28e5101169cc163ea","c600c0292ebf442c8f23558b523915fb","7573ce52d0c642e5ae6cd2a1d0c45dce","c05f110f2459470aa195c360c12e8790","2e7a54d80e34401b8cab156d22bccef5","61816b365b6d48d8a723fe196751aa8c","a9912e11e4794c8e822ebee164db18d3","165093c9763444ef8c5485c198e4477a","035f7dcb863142258f2137e81a4762e1","50671d72075b4fbe82096f4aae2197a5","9fcdd74e05414f61b6854bae8968c292","58cf8a15c39a4980abf6bf61f6eea93b","eadfa8398b6a4b9692df34ca7e4e0536","9149d4e09c1048e48b395a22fa0c3c08","145b62bc002a497ca2fcfa2b542ef115","086c630d0a36484295fa8a77829afea5","e77471f92e9e46b49cae63a2e62f2bdc","3e7e6ee92f734486aa3cfaf9d79007fd","d5295bfdf60d4f30b7676526c8bd99ee","f68c37576f734e61ad797aac1ead1a49","c657107551444ba5b115e483bc7b9f7a","1e18fc833e454d8eb77515949dba5a15","143d692540bd45f99d4909627a91bc89","f52c90d2284446179a3e49bed071a780","402ed63f0501430c884e6f30e8f5cd35","f1154be768144359b66507a1ccfcfb9f","724e460c24424e018687507ddfa0a6f2","056a4b3f084844f1882f466676a867c9","bb420c6f307844cda30bda80d1d7b671","d396fcd1e12b4d71b7d21e8c752f8129","714656117721468f8c9b0430c0b42232","f601d98297cd45cc85643b8d47378119","7a9282ea98824dadaddcf741eb73fd99","b50845a67838440fb53f9947a273fd7e","2332c98b83a143e8a7c5e77f06def4e3","52554b54f7e2439f890bb76fa5548ea1","4625725c5f68420d94fdbedf2c59e5ae","cbe674782efb419aa4cd080b38fd0bc1","ed2347a873ab4aae8ca789a2a9784b44","5c40f63d69964b0bb558880356ada001","345a5b47ac674fb09785374856c6dec5","d0ade386238c499886cf9d54465250a3","0a41b5d0d0564fedac613b33d4546e87","a995ff70a8404050964809847a5f0f9c","b7db564eeae14b2bab8f8566ce9f4a72","55c65b2943c546a086c4fd47254034df","0397a35743a843e0923e6e1234726896","b7e47c1976f5435e80ae44f039ddf2fc","668a151f91694ed8bd73daf3ef2df17e","dbd2b879eaff4cf1a830c705be067f4f","a06ba8261df04a1bb5badeacd89ad1b1","50b50e4a57dc4b05baec75cbd2e43147","d1fa0a6ef9a343958f19713a5d291b9e","6304c039e52c4825ab98dc996940d5d0","d4d6735192d74306bf59975032fb96cd","479811262d0f4bef8691d9733b6a9ecd","3f65e481913c43ba918eb4456c49f706","6001599b97f24e62ba7c7e61143ca4a3","dfd1275bd2c1407293bed5a4920c6135","a30b48c740fc4174a8ef2f8099a4232f","f4be69341fa2420081c9f5de51b91830","7c33b3dc997a4790b65677e904f8ce3f","407ec3385c0a48beaccfb8bdb38e3baf","062e2743e56e4bb98f77b362cb1ed747","eb2bd7455f174abfb2f2f3e41c934fac","7fa16c15684247d1a13d93eef3dfbab8","bce6a63c39a5447da2a8896150cb7633","39d34fb852194b2b831015d10dba0305","da60cfaa12654865ac0be8a35f258ca9","6a86185f627346afb5590b5782b40bd5","260d31349c0648bea6a7ff6b44234715","5ba23385bccd495c9b3863a65e195613","682da715b6f446a19735c00b7e6d3713","ebc5aec5f7694c64aa083b89dd35f59e","e396b16ccac847cf81139b520a669fca","5accf3202ba64800922b67357bdd55e4","b09548fe95e4468e956ad1077c59ec6c","ba00713f1af142e0b35aff402fa86d76","c56acd0abb7748b0a816b1f8676c5c39","7614eaecaae549709f79d64ba3204990","1afb52dcd51e452d957a91675d37f1ba","35dfb1d98e884af0922df7670789df0d","84f500ab900b4328b7aa941d90eefecc","5900e484b9be4786897add8cb3b76d4b","46cc80d23af84d95a59ec8f1cafaf74a","9e60490dca9d4d52a269c66e7b0d26cb","461c0be4767a4e04a53bfc3a7febe316","4d499ab233704334955fa2f944a392be","877af66c0bc448929352e2daadebcc22","cb06b325f4e84f8299e8f6455f56e5a1","0ea181c5f5784d87a9bed12e4794ce08","8ce4a08bed2941c3a1da8249228af255"]},"executionInfo":{"status":"ok","timestamp":1727204334441,"user_tz":-60,"elapsed":47462,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"702f460f-4e29-4162-eb7c-381a584ade97"},"outputs":[{"output_type":"display_data","data":{"text/plain":["modules.json: 0%| | 0.00/349 [00:00\n","
\n","\n","\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
questioncontextsanswerground_truthfaithfulnessanswer_relevancycontext_recallcontext_precisionanswer_correctness
0What techniques are suggested to assess and ma...[ā€¢ Accessibility and reasonable \\naccommodatio...I don't know.Techniques such as re-sampling, re-weighting, ...0.0000000.0000000.0000000.2500000.183427
1What actions are suggested to ensure informati...[Priorities Related to Information Integrity R...I don't know.The suggested actions to ensure information in...0.0000000.0000000.0000000.2678570.183037
2What are the requirements for providing access...[detailed steps toward actualizing these princ...I don't know.Those impacted by an automated system should b...0.0000000.0000000.6666670.3715280.181881
3What issues arose from the system awarding ben...[results, and actions taken; and the procedure...I don't know.Individuals were denied benefits due to data e...0.0000000.0000001.0000000.3333330.186449
4How is the federal government working to comba...[for any resulting algorithmic discrimination....The federal government is working to combat di...The federal government is working to combat di...1.0000000.9924501.0000000.2916670.999895
5What are the characteristics of trustworthy AI...[organizationā€™s business processes or other ac...I don't know.The characteristics of trustworthy AI that sho...0.0000000.0000001.0000001.0000000.179199
6What are the conditions under which individual...[detailed steps toward actualizing these princ...I don't know.Individuals should be able to opt out from aut...0.0000000.0000000.6666670.3666670.185176
7What is data poisoning and how can it affect G...[Information Integrity; Information \\nSecurity...I don't know.Data poisoning is a cybersecurity risk where a...0.0000000.0000000.0000000.0000000.179450
8How do opaque decision-making processes in aut...[for any resulting algorithmic discrimination....I don't know.Opaque decision-making processes in automated ...0.0000000.0000000.6666670.2250000.187585
9Who participated in the OSTP meetings focused ...[BLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKI...I don't know.Participants in the OSTP meetings included Ado...0.0000000.0000000.0000000.5656080.182637
10What actions are suggested for explaining and ...[lifecycle and informed by representative AI A...I don't know.Suggested actions for explaining and validatin...0.0000000.0000000.0000000.2500000.177867
11What provisions are included in the Biometric ...[for any resulting algorithmic discrimination....The Biometric Information Privacy Act enacted ...The Biometric Information Privacy Act enacted ...1.0000000.9524661.0000000.3333330.890441
12How should designers, developers, and deployer...[detailed steps toward actualizing these princ...Designers, developers, and deployers of automa...Designers, developers, and deployers of automa...1.0000000.9620970.7500000.4265870.890689
13How do systems related to the assignment of pe...[for any resulting algorithmic discrimination....Based on the provided context, systems related...Systems related to the assignment of penalties...1.0000000.9576961.0000000.5000000.613236
14What should be the focus of risk identificatio...[detailed steps toward actualizing these princ...The focus of risk identification and mitigatio...Identified risks should focus on the potential...1.0000000.9919161.0000000.9571430.224008
15What procedures should be established and main...[Table of Contents \\n1. \\nIntroduction ..........I don't know.Establish and maintain procedures for escalati...0.0000000.0000000.0000000.1111110.180618
16How do GAI systems contribute to the creation ...[and a human who is interacting with it. Human...GAI systems contribute to the creation of deep...GAI systems contribute to the creation of deep...1.0000000.9712530.5000001.0000000.719440
17How can synthetic content detection help manag...[Information Integrity; Information \\nSecurity...I don't know.Synthetic content detection can help manage an...0.0000000.0000001.0000000.1111110.177285
18What organizational practices are in place to ...[ā€¢ Accessibility and reasonable \\naccommodatio...Based on the provided context, organizational ...Organizational practices are in place to enabl...1.0000001.0000000.5000000.8750000.796277
19What techniques are suggested to minimize risk...[external use, narrow vs. broad application sc...The context suggests that organizations should...Techniques such as anonymization, differential...0.7500000.9604480.0000000.2000000.428300
20Who were some of the participants from the pri...[Technology Policy Institute produced a synops...Based on the provided context, some of the par...Participants in the OSTP meetings from the pri...NaN1.0000001.0000000.8125000.234045
21What role does the National Institute of Stand...[About AI at NIST: The National Institute of S...The National Institute of Standards and Techno...The National Institute of Standards and Techno...1.0000000.9780361.0000000.8928570.999374
22What should entities responsible for the devel...[detailed steps toward actualizing these princ...Entities responsible for the development or us...Entities responsible for the development or us...0.9090910.9784941.0000000.7440480.731790
23How has the customer service industry successf...[BLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKI...I don't know.The customer service industry has successfully...0.0000000.0000000.0000000.2000000.180329
24What steps should be taken to inform AI stakeh...[ā€¢ Accessibility and reasonable \\naccommodatio...I don't know.Establish and maintain communication plans to ...0.0000000.0000000.0000000.2916670.179026
25How do the U.S. AI Safety Institute and AI Ris...[BLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKI...I don't know.The U.S. AI Safety Institute and the AI Risk M...0.0000000.0000000.0000000.4166670.177866
26How to balance synthetic vs. non-synthetic dat...[and data that are considered sensitive are un...I don't know.To balance synthetic vs. non-synthetic data an...0.0000000.0000000.0000000.2000000.181122
27How to address data privacy and ensure AI inte...[FROM \\nPRINCIPLES \\nTO PRACTICE \\nA TECHINCAL...I don't know.To address data privacy and ensure AI integrit...0.0000000.0000000.0000000.5845240.180616
28How can public feedback and incident reporting...[Table of Contents \\n1. \\nIntroduction ..........Public feedback and incident reporting can imp...Public feedback and incident reporting can imp...0.8181821.0000000.2500000.3730160.385729
29How could automation bias worsen misinformatio...[and a human who is interacting with it. Human...Automation bias can worsen misinformation risk...Automation bias can exacerbate other risks of ...0.4615380.9298871.0000000.6000000.981427
30How do consultations and monitoring ensure aut...[detailed steps toward actualizing these princ...Consultations and monitoring ensure the safety...Consultations ensure automated systems' safety...1.0000000.9888830.7500000.6116670.638175
31How do EO 13960 and NIST AI RMF ensure AI tran...[lifecycle and informed by representative AI A...I don't know.Executive Order 13960 requires that AI used by...0.0000000.0000000.0000000.5000000.178102
32How does surveillance software for monitoring ...[Companies use surveillance software to track ...I don't know.Companies use surveillance software to track e...0.0000000.0000001.0000000.6250000.180983
33How can provenance tracking and public feedbac...[FROM \\nPRINCIPLES \\nTO PRACTICE \\nA TECHINCAL...Provenance tracking and public feedback can im...Provenance tracking and public feedback can im...0.8181821.0000000.8000000.4404760.823743
34How can designers ensure user understanding an...[detailed steps toward actualizing these princ...Designers can ensure user understanding and pr...Designers can ensure user understanding and pr...1.0000000.9993090.5000000.2666670.837110
35How do equity and disparity assessments help p...[ethnicity, which can lead to race-based healt...Equity and disparity assessments help prevent ...Equity and disparity assessments help prevent ...1.0000001.0000001.0000000.5216270.991481
36How do school audio surveillance systems for s...[records and education-related data in order t...I don't know.School audio surveillance systems monitor stud...0.0000000.0000000.6666670.6000000.185658
37How does the tech companion help implement AI ...[BLUEPRINT FOR AN \\nAI BILL OF \\nRIGHTS \\nMAKI...The technical companion to the Blueprint for a...The technical companion provides examples and ...1.0000000.9153381.0000000.9626540.498695
38How to mitigate Human-AI risks in evaluations ...[FROM \\nPRINCIPLES \\nTO PRACTICE \\nA TECHINCAL...I don't know.To mitigate Human-AI risks in evaluations and ...0.0000000.0000000.0000000.5259260.242913
39How to design explanatory mechanisms in high-r...[detailed steps toward actualizing these princ...I don't know.In high-risk automated systems, explanatory me...0.0000000.0000001.0000000.2261900.180091
40How do biometrics affect access and decisions ...[for any resulting algorithmic discrimination....Biometric systems are used as access control m...The answer to given question is not present in...0.9285710.8873551.0000000.0000000.926337
41How do equity assessments and disparity tests ...[evaluation and plain language reporting in th...Equity assessments and disparity tests reduce ...Equity assessments and disparity tests reduce ...1.0000000.9975731.0000000.5800000.994207
42How does synthetic data proportion help preven...[lifecycle and informed by representative AI A...I don't know.Assessing the proportion of synthetic to non-s...0.0000000.0000000.0000000.3666670.181362
43How do hiring algorithms and social media data...[for any resulting algorithmic discrimination....Based on the provided context, hiring algorith...Algorithms used in hiring and credit decisions...0.9230771.0000001.0000000.2500000.982130
44How can unproven tech and data misuse cause harm?[Priorities Related to Information Integrity R...The provided context mentions that unproven te...Unproven technology and data misuse can cause ...1.0000000.9513170.8000000.5666670.680572
45Why compare system performance with human meth...[and biological design tools. arXiv. https://a...I don't know.System performance should be compared with the...0.0000000.0000000.0000000.5000000.183202
46How do confident errors mislead users?[for any resulting algorithmic discrimination....I don't know.Confident errors, also known as confabulations...0.0000000.0000000.0000000.0000000.183213
47How do data biases impact loans, hiring, and e...[for any resulting algorithmic discrimination....I don't know.Data biases impact loans by causing applicants...0.0000000.0000000.0000000.1964290.182887
48How to design systems to avoid algorithmic bia...[for any resulting algorithmic discrimination....I don't know.To design systems to avoid algorithmic bias an...0.0000000.0000000.5000000.1000000.178707
\n","
\n","
\n","\n","
\n"," \n","\n"," \n","\n"," \n","
\n","\n","\n","
\n"," \n","\n","\n","\n"," \n","
\n","\n","
\n"," \n"," \n"," \n","
\n","\n","
\n"," \n"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"dataframe","variable_name":"base_chain_eval_results_df","summary":"{\n \"name\": \"base_chain_eval_results_df\",\n \"rows\": 49,\n \"fields\": [\n {\n \"column\": \"question\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 49,\n \"samples\": [\n \"How do systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties?\",\n \"Why compare system performance with human methods after extensive tests?\",\n \"How do data biases impact loans, hiring, and education?\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"contexts\",\n \"properties\": {\n \"dtype\": \"object\",\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answer\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 23,\n \"samples\": [\n \"Provenance tracking and public feedback can improve AI content risk management by providing detailed information on the data used to train machine learning models, including how data sources were processed and interpreted, and identifying any missing, incomplete, or erroneous data. This transparency allows for better risk identification and management assessments, and helps in mitigating potential harms. Public feedback can highlight concerns and influence decisions, ensuring that the AI system aligns with public expectations and values. Additionally, ongoing monitoring and performance testing, informed by provenance tracking, can ensure the system remains accurate and fair over time. \\n\\nSource: Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': \\\"D:20220920133035-04'00'\\\", 'modDate': \\\"D:20221003104118-04'00'\\\", 'trapped': '', '_id': '0dfb25b274794704951bd01224d5c65a', '_collection_name': 'snowflake-arctic-embed-l-recursive-base'}, page_content='organization\\u2019s business processes or other activities, system goals, any human-run procedures that form a \\\\npart of the system, and specific performance expectations; a description of any data used to train machine \\\\nlearning models or for other purposes, including how data sources were processed and interpreted, a \\\\nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \\\\nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \\\\nidentification and management assessments and any steps taken to mitigate potential harms; the results of \\\\nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \\\\nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \\\\nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,').\",\n \"Based on the provided context, some of the participants from the private sector and civil society in the OSTP meetings included:\\n\\n- Microsoft Corporation\\n- MITRE Corporation\\n- National Association for the Advancement of Colored People Legal Defense and Educational Fund\\n- National Association of Criminal Defense Lawyers\\n- National Center for Missing & Exploited Children\\n- National Fair Housing Alliance\\n- National Immigration Law Center\\n- NEC Corporation of America\\n- New America\\u2019s Open Technology Institute\\n- New York Civil Liberties Union\\n- Notre Dame Technology Ethics Center\\n- Onfido\\n- Oosto\\n- Palantir\\n- Pangiam\\n- Parity Technologies\\n- Philadelphia Unemployment Project\\n- Project On Government Oversight\\n- Recording Industry Association of America\\n- Science, Technology, and Public Policy Program at University of Michigan Ann Arbor\\n- Security Industry Association\\n- Software & Information Industry Association\",\n \"I don't know.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"ground_truth\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 49,\n \"samples\": [\n \"Systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information or matching records, and by assisting in the adjudication process.\",\n \"System performance should be compared with the in-place, potentially human-driven, status quo procedures, with existing human performance considered as a performance baseline for the algorithm to meet pre-deployment, and as a lifecycle minimum performance standard.\",\n \"Data biases impact loans by causing applicants who attended Historically Black Colleges or Universities (HBCUs) to be charged higher loan prices for refinancing student loans compared to those who did not attend HBCUs, even when controlling for other credit-related factors. In hiring, a tool that learned features from a company's predominantly male employees rejected women applicants for discriminatory reasons, penalizing resumes with the word 'women\\u2019s.' In education, a predictive model used by universities to predict student dropout rates used race directly as a predictor, resulting in Black students being deemed at higher risk of dropping out compared to their white peers, which could guide them away from certain majors.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"faithfulness\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.4759029695707557,\n \"min\": 0.0,\n \"max\": 1.0,\n \"num_unique_values\": 8,\n \"samples\": [\n 1.0,\n 0.46153846153846156,\n 0.0\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answer_relevancy\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.48960967942202427,\n \"min\": 0.0,\n \"max\": 1.0000000000000002,\n \"num_unique_values\": 21,\n \"samples\": [\n 0.0,\n 0.8873551634118172,\n 0.999308689219987\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"context_recall\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.44327537215094137,\n \"min\": 0.0,\n \"max\": 1.0,\n \"num_unique_values\": 7,\n \"samples\": [\n 0.0,\n 0.6666666666666666,\n 0.25\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"context_precision\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.26991413671949754,\n \"min\": 0.0,\n \"max\": 0.99999999995,\n \"num_unique_values\": 37,\n \"samples\": [\n 0.8124999999796875,\n 0.11111111109999999,\n 0.2916666666520833\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"answer_correctness\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.3246766689364993,\n \"min\": 0.177284727830764,\n \"max\": 0.9998948917804034,\n \"num_unique_values\": 49,\n \"samples\": [\n 0.6132362982414824,\n 0.1832016020730998,\n 0.1828868085143257\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"}},"metadata":{},"execution_count":17}],"source":["# First Base model chain\n","base_chain_eval_results_df = ragas_evaluate(rag_chain_base)"]},{"cell_type":"code","source":["base_chain_eval_results_df.to_csv('base_chain_eval_results_df.csv', index=False)"],"metadata":{"id":"ako__p9XgUVQ","executionInfo":{"status":"ok","timestamp":1727204687162,"user_tz":-60,"elapsed":435,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"execution_count":18,"outputs":[]},{"cell_type":"code","execution_count":20,"metadata":{"id":"uU2b7W5QL7ak","colab":{"base_uri":"https://localhost:8080/","height":156,"referenced_widgets":["17d4977ec7e0472fb90379712b7616ea","241c74b4bb654eada159e64be8429522","829c146c8c744a38a762dcb02573438f","c813fe03f99b4213947c3af04a8c27ee","3088af7fb67d4f079fd3c98a198fcc06","79375495c9444aa5bf9229612bdcb38d","53817832f9724ff382bcb4821e00dd10","368a487e2158427593f8841de07cac4e","79e1f0538b1f4afba34bf5d5e7e32027","9f231687e1ae43fc987fbd8c45097836","07a9a6cbf8114575ba1d58d1b5952d93"]},"collapsed":true,"executionInfo":{"status":"ok","timestamp":1727205035935,"user_tz":-60,"elapsed":227909,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"d12303a8-badb-4a58-f53d-b924b4104146"},"outputs":[{"output_type":"display_data","data":{"text/plain":["Evaluating: 0%| | 0/245 [00:00"],"image/png":"iVBORw0KGgoAAAANSUhEUgAAA90AAAJOCAYAAACqS2TfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABzcklEQVR4nO3dd3gUVf/+8XsT0gsQShIwEAgtkR4EASWg0SBIsYGA0pEHRJpYeFSKhVgAwQaKNBEEKZYHlCqRKiIIIkaaEIp06WACyfn9wS/zZUkCWcywBt6v69oLdubMzGcnZzd7Z2bOOIwxRgAAAAAAIM95uLsAAAAAAABuVIRuAAAAAABsQugGAAAAAMAmhG4AAAAAAGxC6AYAAAAAwCaEbgAAAAAAbELoBgAAAADAJoRuAAAAAABsQugGAAAAAMAmhG4AuAk4HA4NGTLE3WX8Y1OmTFGlSpXk5eWlQoUKubucfCspKUkOh0NJSUn/aD27du2Sw+HQpEmT8qSuG1le7XM7XevnRH7vB/nhZwMgfyN0A7gp7NixQ927d1fZsmXl6+ur4OBg1a9fX6NHj9a5c+fcXR5y4ffff1fHjh0VFRWlcePG6aOPPsqx7ZAhQ+RwOKyHh4eHwsPDdf/99+uHH364jlXnb5MmTXLaj5c+nn/+eXeXp2HDhunLL790dxl56tJ9vmLFiizzjTGKiIiQw+HQ/fff74YKr11muHU4HPr000+zbVO/fn05HA5Vrlz5mrYxbdo0jRo16h9UCQB5r4C7CwAAu82bN0+PPPKIfHx81L59e1WuXFlpaWlasWKFnnnmGW3evPmKAe5GcO7cORUokL8/8pOSkpSRkaHRo0erXLlyuVpmzJgxCgwMVEZGhvbs2aNx48apQYMG+vHHH1W9enV7C76BvPzyyypTpozTtMqVK6t06dI6d+6cvLy83FLXsGHD9PDDD6tly5Zu2b6dfH19NW3aNN1xxx1O07///nvt3btXPj4+bqrsn8t8bY899pjT9F27dmnVqlXy9fW95nVPmzZNv/76q/r27ZvrZRo0aKBz587J29v7mrcLAFeSv7+BAcBV7Ny5U48++qhKly6t7777TuHh4da8J598Utu3b9e8efPcWKF9MjIylJaWJl9f33/0Jfbf4tChQ5Lk0mnlDz/8sIoWLWo9b9mypSpXrqyZM2cSul1w3333qVatWtnOuxH61r9RkyZNNHPmTL3zzjtOfzCbNm2aYmNjdeTIETdW9880adJEX3/9tY4cOeL0/pw2bZpCQ0NVvnx5HTt2zPY6/v77b3l7e8vDw4N+DMBWnF4O4Ib25ptv6vTp0xo/frxT4M5Urlw59enTx3p+4cIFvfLKK4qKipKPj48iIyP13//+V6mpqU7LRUZG6v7771dSUpJq1aolPz8/ValSxbomcM6cOapSpYp8fX0VGxurn3/+2Wn5jh07KjAwUH/88YcSEhIUEBCgEiVK6OWXX5Yxxqnt8OHDVa9ePRUpUkR+fn6KjY3VrFmzsrwWh8OhXr16aerUqbr11lvl4+Oj+fPnW/MuvVbz1KlT6tu3ryIjI+Xj46PixYvrnnvu0fr1653WOXPmTMXGxsrPz09FixbVY489pn379mX7Wvbt26eWLVsqMDBQxYoV04ABA5Senp7DT8bZBx98YNVcokQJPfnkkzp+/LjT/h48eLAkqVixYtd87WlYWJgkOYWYtLQ0DRo0SLGxsSpYsKACAgJ05513aunSpVmWnz59umJjYxUUFKTg4GBVqVJFo0ePdmpz/Phx9e3bVxEREfLx8VG5cuX0xhtvKCMj46r1ffXVV2ratKlKlCghHx8fRUVF6ZVXXsmyHxs2bKjKlSvrt99+U6NGjeTv76+SJUvqzTffzLLOvXv3qmXLlgoICFDx4sXVr1+/LP35WmV3La8r/SEjI0OjRo3SrbfeKl9fX4WGhqp79+65ClwOh0NnzpzR5MmTrVOWO3bsaNUQGRmZZZnMyw4uX0+vXr305ZdfqnLlyvLx8dGtt95qvXcutW/fPnXu3FmhoaFWuwkTJmRplxf7vE2bNjp69KgWLVpkTUtLS9OsWbPUtm3bbJc5c+aMnn76aavvVaxYUcOHD8/ymZKamqp+/fqpWLFiCgoKUvPmzbV3795s15nb1+yKFi1ayMfHRzNnznSaPm3aNLVq1Uqenp7ZLvfpp59an0chISF69NFHtWfPHmt+w4YNNW/ePKWkpFh9IrMfZJ7aPn36dL344osqWbKk/P39dfLkyRyv6V6zZo2aNGmiwoULKyAgQFWrVnV6vx84cECdOnXSLbfcIh8fH4WHh6tFixbatWvXP9o/AG48HOkGcEP73//+p7Jly6pevXq5at+1a1dNnjxZDz/8sJ5++mmtWbNGiYmJSk5O1hdffOHUdvv27Wrbtq26d++uxx57TMOHD1ezZs00duxY/fe//1XPnj0lSYmJiWrVqpW2bNkiD4//+1tnenq6GjdurNtvv11vvvmm5s+fr8GDB+vChQt6+eWXrXajR49W8+bN1a5dO6WlpWn69Ol65JFHNHfuXDVt2tSppu+++06ff/65evXqpaJFi2YbPCTpP//5j2bNmqVevXopJiZGR48e1YoVK5ScnKyaNWtKunhtaadOnXTbbbcpMTFRBw8e1OjRo7Vy5Ur9/PPPTkec09PTlZCQoDp16mj48OFavHixRowYoaioKPXo0eOK+3zIkCEaOnSo4uPj1aNHD23ZskVjxozR2rVrtXLlSnl5eWnUqFH65JNP9MUXX1injFetWvWqP8+//vpL0sVwt2/fPr3yyivy9fVVq1atrDYnT57Uxx9/rDZt2qhbt246deqUxo8fr4SEBKfT0BctWqQ2bdro7rvv1htvvCFJSk5O1sqVK60/3Jw9e1ZxcXHat2+funfvrlKlSmnVqlUaOHCg9u/ff9VrTSdNmqTAwED1799fgYGB+u677zRo0CCdPHlSb731llPbY8eOqXHjxnrwwQfVqlUrzZo1S88995yqVKmi++67T9LFywruvvtu7d69W71791aJEiU0ZcoUfffdd1fdd5c6ceJEliOrlx6hvFxu+0P37t2tfta7d2/t3LlT7733nn7++WfrZ5+TKVOmqGvXrqpdu7aeeOIJSVJUVJRLryvTihUrNGfOHPXs2VNBQUF655139NBDD2n37t0qUqSIJOngwYO6/fbbrZBerFgxffvtt+rSpYtOnjxpnc6cV/s8MjJSdevW1WeffWb9PL/99ludOHFCjz76qN555x2n9sYYNW/eXEuXLlWXLl1UvXp1LViwQM8884z27dunt99+22rbtWtXffrpp2rbtq3q1aun7777LstniSuv2VX+/v5q0aKFPvvsM6s/bNy4UZs3b9bHH3+sX375Jcsyr732ml566SW1atVKXbt21eHDh/Xuu++qQYMG1ufRCy+8oBMnTmjv3r3W6w0MDHRazyuvvCJvb28NGDBAqampOZ5SvmjRIt1///0KDw9Xnz59FBYWpuTkZM2dO9d6vz/00EPavHmznnrqKUVGRurQoUNatGiRdu/eneNnL4CblAGAG9SJEyeMJNOiRYtctd+wYYORZLp27eo0fcCAAUaS+e6776xppUuXNpLMqlWrrGkLFiwwkoyfn59JSUmxpn/44YdGklm6dKk1rUOHDkaSeeqpp6xpGRkZpmnTpsbb29scPnzYmn727FmnetLS0kzlypXNXXfd5TRdkvHw8DCbN2/O8tokmcGDB1vPCxYsaJ588skc90VaWpopXry4qVy5sjl37pw1fe7cuUaSGTRoUJbX8vLLLzuto0aNGiY2NjbHbRhjzKFDh4y3t7e59957TXp6ujX9vffeM5LMhAkTrGmDBw82kpz2TU4y217+KFSokJk/f75T2wsXLpjU1FSnaceOHTOhoaGmc+fO1rQ+ffqY4OBgc+HChRy3+8orr5iAgACzdetWp+nPP/+88fT0NLt3775i3Zf/rI0xpnv37sbf39/8/fff1rS4uDgjyXzyySfWtNTUVBMWFmYeeugha9qoUaOMJPP5559b086cOWPKlSuXpU9mZ+LEidnux8yvDzt37jSSzMSJE61lctsfli9fbiSZqVOnOrWbP39+ttOzExAQYDp06JBleocOHUzp0qWzTM/sF5eSZLy9vc327dutaRs3bjSSzLvvvmtN69KliwkPDzdHjhxxWv7RRx81BQsWtH52ebXP165da9577z0TFBRkrfuRRx4xjRo1MsZc/Axq2rSptdyXX35pJJlXX33VaX0PP/ywcTgc1uvL/Jzr2bOnU7u2bdtm+ZzI7WvOrh9kZ+nSpUaSmTlzppk7d65xOBzWe+KZZ54xZcuWNcZc7N+33nqrtdyuXbuMp6enee2115zWt2nTJlOgQAGn6U2bNs32Z5+57bJly2Z5n2XOy/zZXLhwwZQpU8aULl3aHDt2zKltRkaGMebiZ4Qk89Zbb13xNQOAMcZwejmAG9bJkyclSUFBQblq/80330iS+vfv7zT96aeflqQs137HxMSobt261vM6depIku666y6VKlUqy/Q//vgjyzZ79epl/T/zaFJaWpoWL15sTffz87P+f+zYMZ04cUJ33nlnllPBJSkuLk4xMTFXeaUXr4tes2aN/vzzz2zn//TTTzp06JB69uzpdK1j06ZNValSpWyvg//Pf/7j9PzOO+/M9jVfavHixUpLS1Pfvn2dzgLo1q2bgoOD//H19rNnz9aiRYu0cOFCTZw4URUqVNBDDz2kVatWWW08PT2to10ZGRn666+/dOHCBdWqVctpHxcqVEhnzpxxOt33cjNnztSdd96pwoUL68iRI9YjPj5e6enpWrZs2RXrvfRnferUKR05ckR33nmnzp49q99//92pbWBgoNNAVN7e3qpdu7bTPv/mm28UHh6uhx9+2Jrm7+9vHRnOrffff1+LFi1yelzN1frDzJkzVbBgQd1zzz1O+yo2NlaBgYHZnt5vl/j4eKej5FWrVlVwcLBVrzFGs2fPVrNmzWSMcao3ISFBJ06csPpKXu1zSWrVqpXOnTunuXPn6tSpU5o7d26Op5Z/88038vT0VO/evZ2mP/300zLG6Ntvv7XaScrS7vKj1q685mtx7733KiQkRNOnT5cxRtOnT1ebNm2ybTtnzhxlZGSoVatWTnWEhYWpfPnyLvWVDh06OL3PsvPzzz9r586d6tu3b5YxJDIvT/Dz85O3t7eSkpKuy/XnAPI3Ti8HcMMKDg6WdDG85EZKSoo8PDyyjIwdFhamQoUKKSUlxWn6pcFakgoWLChJioiIyHb65V/MPDw8VLZsWadpFSpUkCSnawLnzp2rV199VRs2bHC6LvTya1MlZRlhOidvvvmmOnTooIiICMXGxqpJkyZq3769VU/ma61YsWKWZStVqpTlVka+vr4qVqyY07TChQtf9ctoTtvx9vZW2bJls+xzVzVo0MDpNOiHH35Y5cuX11NPPaV169ZZ0ydPnqwRI0bo999/1/nz563pl+7Pnj176vPPP9d9992nkiVL6t5771WrVq3UuHFjq822bdv0yy+/ZNkXmTIHg8vJ5s2b9eKLL+q7776z/miU6cSJE07Pb7nllix9oHDhwk6n5qakpKhcuXJZ2mX3c72S2rVr5ziQWnZy0x+2bdumEydOqHjx4tmuI3NfnThxwum2ft7e3goJCXGl/Ku6/L18eb2HDx/W8ePH9dFHH+V4p4PMevNqn0sXxy+Ij4/XtGnTdPbsWaWnpzuF+UulpKSoRIkSWf7IGB0dbc3P/NfDwyPLqfiX1+fKa74WXl5eeuSRRzRt2jTVrl1be/bsyfEPCtu2bZMxRuXLl89xXbmVm8/IHTt2SNIVb1vm4+OjN954Q08//bRCQ0N1++236/7771f79u2tsSMAIBOhG8ANKzg4WCVKlNCvv/7q0nLZhdns5DTYT07TzWWDGeXG8uXL1bx5czVo0EAffPCBwsPD5eXlpYkTJ2ratGlZ2l/tCE6mVq1a6c4779QXX3yhhQsX6q233tIbb7yhOXPmWNePuiKn1/xvExgYqDp16uirr77SmTNnFBAQoE8//VQdO3ZUy5Yt9cwzz6h48eLy9PRUYmKi9eVbkooXL64NGzZowYIF+vbbb/Xtt99q4sSJat++vSZPnizp4pHye+65R88++2y228/8o0p2jh8/rri4OAUHB+vll19WVFSUfH19tX79ej333HNZBmLLy36W13LTHzIyMlS8eHFNnTo12/mZob1Pnz7W/pUuns1x+YBXl8vpPZzTwH5X25eZ+/6xxx5Thw4dsm2bmzEGrkXbtm3VrVs3HThwQPfdd59Lo/f/E9fjNbdt21Zjx47VkCFDVK1atRzP0snIyJDD4dC3336b7c/q8uu2ryS3n5G50bdvXzVr1kxffvmlFixYoJdeekmJiYn67rvvVKNGjTzbDoD8j9AN4IZ2//3366OPPtLq1audTgXPTunSpZWRkaFt27ZZR4eki4MJHT9+XKVLl87T2jIyMvTHH384BbGtW7dKkjUIz+zZs+Xr66sFCxY43Zd34sSJ/3j74eHh6tmzp3r27KlDhw6pZs2aeu2113TfffdZr3XLli266667nJbbsmVLnu2LS7dz6VH/tLQ07dy5U/Hx8XmynUtduHBBknT69GkFBARo1qxZKlu2rObMmeMU1jJHS7+Ut7e3mjVrpmbNmikjI0M9e/bUhx9+qJdeeknlypVTVFSUTp8+fU11JyUl6ejRo5ozZ44aNGhgTd+5c+c1vMqLSpcurV9//VXGGKfXtmXLlmteZ16JiorS4sWLVb9+/SsGoWeffdbpNPrChQtb/88pXBcuXNhp9PtM13rmROYo3+np6Vf92eb1Pn/ggQfUvXt3/fDDD5oxY8YVt7t48WKdOnXK6Wh35mUJme+1zM+5HTt2OB3dvrw+V17ztbrjjjtUqlQpJSUlWYMTZicqKkrGGJUpU+aKf7iScv9H0yvJPAvg119/veprj4qK0tNPP62nn35a27ZtU/Xq1TVixAh9+umn/7gOADcOrukGcEN79tlnFRAQoK5du+rgwYNZ5u/YscO6BUyTJk0kKcsI0yNHjpSkbEf3/afee+896//GGL333nvy8vLS3XffLeniETiHw+F0hG7Xrl368ssvr3mb6enpWU5VLl68uEqUKGGdvl6rVi0VL15cY8eOdTql/dtvv1VycnKe7Yv4+Hh5e3vrnXfecTpCO378eJ04cSLP9/lff/2lVatWKSwszDqtOfPI2aXbX7NmjVavXu207NGjR52ee3h4WEf6MvdRq1attHr1ai1YsCDLto8fP24F/uxkV0daWpo++OCDXL++yzVp0kR//vmn0y3mzp49m+PpwtdTq1atlJ6erldeeSXLvAsXLlihOSYmRvHx8dYjNjbWahcQEJBtuI6KitKJEyecTrXfv39/ljsQ5Janp6ceeughzZ49O9szZw4fPmz9P6/3eWBgoMaMGaMhQ4aoWbNmObZr0qSJ0tPTnT5TJOntt9+Ww+GwzmDJ/Pfy0c8v/9xz5TVfK4fDoXfeeUeDBw/W448/nmO7Bx98UJ6enho6dGiWMzmMMU7vzYCAgCyfb66qWbOmypQpo1GjRmXpX5nbP3v2rP7++2+neVFRUQoKCsqzW/IBuHFwpBvADS0qKkrTpk1T69atFR0drfbt26ty5cpKS0vTqlWrNHPmTOvevtWqVVOHDh300UcfWaf6/vjjj5o8ebJatmypRo0a5Wltvr6+mj9/vjp06KA6dero22+/1bx58/Tf//7XOrW2adOmGjlypBo3bqy2bdvq0KFDev/991WuXLlsb6uTG6dOndItt9yihx9+WNWqVVNgYKAWL16stWvXasSIEZIuXiP5xhtvqFOnToqLi1ObNm2sW4ZFRkaqX79+ebIPihUrpoEDB2ro0KFq3Lixmjdvri1btuiDDz7Qbbfd5nSE81rMmjVLgYGBMsbozz//1Pjx43Xs2DGNHTvWOiJ2//33a86cOXrggQfUtGlT7dy5U2PHjlVMTIxOnz5tratr167666+/dNddd+mWW25RSkqK3n33XVWvXt06M+KZZ57R119/rfvvv18dO3ZUbGyszpw5o02bNmnWrFnatWtXjrfaqlevngoXLqwOHTqod+/ecjgcmjJlyj86Xbxbt25677331L59e61bt07h4eGaMmWK/P39r3mdeSUuLk7du3dXYmKiNmzYoHvvvVdeXl7atm2bZs6cqdGjR+d4/XKm2NhYLV68WCNHjlSJEiVUpkwZ1alTR48++qiee+45PfDAA+rdu7fOnj2rMWPGqEKFCtc8+Nfrr7+upUuXqk6dOurWrZtiYmL0119/af369Vq8eLF1ezo79nlOp3dfqlmzZmrUqJFeeOEF7dq1S9WqVdPChQv11VdfqW/fvtbR2+rVq6tNmzb64IMPdOLECdWrV09LlizR9u3br/k1/xMtWrRQixYtrtgmKipKr776qgYOHKhdu3apZcuWCgoK0s6dO/XFF1/oiSee0IABAyRd7BMzZsxQ//79ddtttykwMPCKf6zIjoeHh8aMGaNmzZqpevXq6tSpk8LDw/X7779r8+bNWrBggbZu3aq7775brVq1UkxMjAoUKKAvvvhCBw8e1KOPPnrN+wPADep6DpUOAO6ydetW061bNxMZGWm8vb1NUFCQqV+/vnn33XedbsV0/vx5M3ToUFOmTBnj5eVlIiIizMCBA53aGJP1dj2ZJGW5FVfm7XQuvbVMhw4dTEBAgNmxY4e59957jb+/vwkNDTWDBw92unWWMcaMHz/elC9f3vj4+JhKlSqZiRMn5njro5xuA6ZLbgWUmppqnnnmGVOtWjUTFBRkAgICTLVq1cwHH3yQZbkZM2aYGjVqGB8fHxMSEmLatWtn9u7d69Qm87VcLrsac/Lee++ZSpUqGS8vLxMaGmp69OiR5VY9//SWYQEBAaZu3bpOt3Iy5uItgIYNG2ZKly5tfHx8TI0aNczcuXOz3HZq1qxZ5t577zXFixc33t7eplSpUqZ79+5m//79Tus7deqUGThwoClXrpzx9vY2RYsWNfXq1TPDhw83aWlpV6x75cqV5vbbbzd+fn6mRIkS5tlnn7VuRXfpraYuv6VSpuxulZWSkmKaN29u/P39TdGiRU2fPn2s23K5cvuq7OR0yzBX+sNHH31kYmNjjZ+fnwkKCjJVqlQxzz77rPnzzz+vWJsxxvz++++mQYMGxs/Pz0hyun3YwoULTeXKlY23t7epWLGi+fTTT11635QuXTrL7cgOHjxonnzySRMREWG8vLxMWFiYufvuu81HH33k1M7OfX5pfZd/Bp06dcr069fPlChRwnh5eZny5cubt956y7rNVaZz586Z3r17myJFipiAgADTrFkzs2fPniy3DMvta76WW4ZdSU79e/bs2eaOO+4wAQEBJiAgwFSqVMk8+eSTZsuWLVab06dPm7Zt25pChQoZSdb74UrbvvyWYZlWrFhh7rnnHutzsmrVqtZt5I4cOWKefPJJU6lSJRMQEGAKFixo6tSpk+XzBQCMMcZhzL9gxBUAuMl07NhRs2bNcjqSCgAAgBsP13QDAAAAAGATQjcAAAAAADYhdAMAAAAAYBOu6QYAAAAAwCYc6QYAAAAAwCaEbgAAAAAAbFLA3QVcbxkZGfrzzz8VFBQkh8Ph7nIAAAAAAPmQMUanTp1SiRIl5OGR8/Hsmy50//nnn4qIiHB3GQAAAACAG8CePXt0yy235Dj/pgvdQUFBki7umODgYDdXAwAAAADIj06ePKmIiAgrY+bkpgvdmaeUBwcHE7oBAAAAAP/I1S5bZiA1AAAAAABsQugGAAAAAMAmhG4AAAAAAGxy013TDQAAAODfJSMjQ2lpae4uA3Di5eUlT0/Pf7weQjcAAAAAt0lLS9POnTuVkZHh7lKALAoVKqSwsLCrDpZ2JYRuAAAAAG5hjNH+/fvl6empiIgIeXhw9Sv+HYwxOnv2rA4dOiRJCg8Pv+Z1EboBAAAAuMWFCxd09uxZlShRQv7+/u4uB3Di5+cnSTp06JCKFy9+zaea86ckAAAAAG6Rnp4uSfL29nZzJUD2Mv8YdP78+WteB6EbAAAAgFv9k+tlATvlRd8kdAMAAAAAYBNCNwAAAAAg1xwOh7788stct+/YsaNatmxpWz3/dgykBgAAAOBfZc6W/dd1ew9WdG1k6o4dO2ry5MnW85CQEN1222168803VbVq1bwuL9cmTZqkTp06qVKlSkpOTnaaN3PmTLVq1UqlS5fWrl273FPgTYoj3QAAAADgosaNG2v//v3av3+/lixZogIFCuj+++93d1kKCAjQoUOHtHr1aqfp48ePV6lSpdxU1c2N0A0AAAAALvLx8VFYWJjCwsJUvXp1Pf/889qzZ48OHz5stXnuuedUoUIF+fv7q2zZsnrppZecRsHeuHGjGjVqpKCgIAUHBys2NlY//fSTNX/FihW688475efnp4iICPXu3Vtnzpy5Yl0FChRQ27ZtNWHCBGva3r17lZSUpLZt22ZpP2bMGEVFRcnb21sVK1bUlClTnOZv27ZNDRo0kK+vr2JiYrRo0aIs69izZ49atWqlQoUKKSQkRC1atOBo+iUI3QAAAADwD5w+fVqffvqpypUrpyJFiljTg4KCNGnSJP32228aPXq0xo0bp7ffftua365dO91yyy1au3at1q1bp+eff15eXl6SpB07dqhx48Z66KGH9Msvv2jGjBlasWKFevXqddV6OnfurM8//1xnz56VdPG088aNGys0NNSp3RdffKE+ffro6aef1q+//qru3burU6dOWrp0qSQpIyNDDz74oLy9vbVmzRqNHTtWzz33nNM6zp8/r4SEBAUFBWn58uVauXKlAgMD1bhxY6WlpV3bDr3BcE03AAAAALho7ty5CgwMlCSdOXNG4eHhmjt3rjw8/u+45osvvmj9PzIyUgMGDND06dP17LPPSpJ2796tZ555RpUqVZIklS9f3mqfmJiodu3aqW/fvta8d955R3FxcRozZox8fX1zrK1GjRoqW7asZs2apccff1yTJk3SyJEj9ccffzi1Gz58uDp27KiePXtKkvr3768ffvhBw4cPV6NGjbR48WL9/vvvWrBggUqUKCFJGjZsmO677z5rHTNmzFBGRoY+/vhj6/ZaEydOVKFChZSUlKR7773XtR17A+JINwAAAAC4qFGjRtqwYYM2bNigH3/8UQkJCbrvvvuUkpJitZkxY4bq16+vsLAwBQYG6sUXX9Tu3but+f3791fXrl0VHx+v119/XTt27LDmbdy4UZMmTVJgYKD1SEhIUEZGhnbu3HnV+jp37qyJEyfq+++/15kzZ9SkSZMsbZKTk1W/fn2nafXr17cGYUtOTlZERIQVuCWpbt26Tu03btyo7du3KygoyKozJCREf//9t9PruZkRugEAAADARQEBASpXrpzKlSun2267TR9//LHOnDmjcePGSZJWr16tdu3aqUmTJpo7d65+/vlnvfDCC06nXA8ZMkSbN29W06ZN9d133ykmJkZffPGFpIunrHfv3t0K9hs2bNDGjRu1bds2RUVFXbW+du3a6YcfftCQIUP0+OOPq0ABe05yPn36tGJjY53q3LBhg7Zu3ZrtNeQ3I04vBwAAAIB/yOFwyMPDQ+fOnZMkrVq1SqVLl9YLL7xgtbn0KHimChUqqEKFCurXr5/atGmjiRMn6oEHHlDNmjX122+/qVy5ctdUT0hIiJo3b67PP/9cY8eOzbZNdHS0Vq5cqQ4dOljTVq5cqZiYGGv+nj17tH//foWHX7yt2g8//OC0jpo1a2rGjBkqXry4goODr6nWGx1HugEAAADARampqTpw4IAOHDig5ORkPfXUUzp9+rSaNWsm6eI12Lt379b06dO1Y8cOvfPOO9ZRbEk6d+6cevXqpaSkJKWkpGjlypVau3atoqOjJV0c+XzVqlXq1auXNmzYoG3btumrr77K1UBqmSZNmqQjR45Y14xf7plnntGkSZM0ZswYbdu2TSNHjtScOXM0YMAASVJ8fLwqVKigDh06aOPGjVq+fLnTHxGki0fUixYtqhYtWmj58uXauXOnkpKS1Lt3b+3du9elfXqjInQDAAAAgIvmz5+v8PBwhYeHq06dOlq7dq1mzpyphg0bSpKaN2+ufv36qVevXqpevbpWrVqll156yVre09NTR48eVfv27VWhQgW1atVK9913n4YOHSpJqlq1qr7//ntt3bpVd955p2rUqKFBgwY5XV99NX5+fk6jqV+uZcuWGj16tIYPH65bb71VH374oSZOnGi9Bg8PD33xxRc6d+6cateura5du+q1115zWoe/v7+WLVumUqVK6cEHH1R0dLS6dOmiv//+myPf/5/DGGPcXcT1dPLkSRUsWFAnTpygEwAAcAWjj412dwlu0adwH3eXANw0/v77b+3cuVNlypS54mjcgLtcqY/mNltypBsAAAAAAJsQugEAAAAAsAmhGwAAAAAAmxC6AQAAAACwCaEbAAAAAACbELoBAAAAALAJoRsAAAAAAJsQugEAAAAAsAmhGwAAAAAAmxC6AQAAACCPNGzYUH379nV3GdddUlKSHA6Hjh8/7tY6hgwZourVq+e6/a5du+RwOLRhwwbbaipg25oBAAAA4BqMPjb6um6vT+E+LrXv2LGjJk+enGX6tm3bNGfOHHl5eeVVaTlq2LChqlevrlGjRtm+rbwSGRmplJQUffbZZ3r00Ued5t1666367bffNHHiRHXs2NE9BdqEI90AAAAA4KLGjRtr//79To8yZcooJCREQUFB7i7vXysiIkITJ050mvbDDz/owIEDCggIcFNV9iJ0AwAAAICLfHx8FBYW5vTw9PTMcnp5ZGSkhg0bps6dOysoKEilSpXSRx995LSuPXv2qFWrVipUqJBCQkLUokUL7dq1K8dtd+zYUd9//71Gjx4th8Mhh8OhXbt2adKkSSpUqJBT2y+//FIOh8N6nnn69ZQpUxQZGamCBQvq0Ucf1alTp6w2GRkZSkxMVJkyZeTn56dq1app1qxZTuv95ptvVKFCBfn5+alRo0ZXrPdS7dq10/fff689e/ZY0yZMmKB27dqpQAHnE7F3796tFi1aKDAwUMHBwWrVqpUOHjzo1Ob1119XaGiogoKC1KVLF/39999Ztvnxxx8rOjpavr6+qlSpkj744INc1ZpXCN0AAAAAYKMRI0aoVq1a+vnnn9WzZ0/16NFDW7ZskSSdP39eCQkJCgoK0vLly7Vy5UoFBgaqcePGSktLy3Z9o0ePVt26ddWtWzfrKHtERESu69mxY4e+/PJLzZ07V3PnztX333+v119/3ZqfmJioTz75RGPHjtXmzZvVr18/PfbYY/r+++8lXfwjwYMPPqhmzZppw4YN6tq1q55//vlcbTs0NFQJCQnW6flnz57VjBkz1LlzZ6d2GRkZatGihf766y99//33WrRokf744w+1bt3aavP5559ryJAhGjZsmH766SeFh4dnCdRTp07VoEGD9Nprryk5OVnDhg3TSy+9lO3lAXbhmm4AAAAAcNHcuXMVGBhoPb/vvvs0c+bMbNs2adJEPXv2lCQ999xzevvtt7V06VJVrFhRM2bMUEZGhj7++GPriPTEiRNVqFAhJSUl6d57782yvoIFC8rb21v+/v4KCwtzufaMjAxNmjTJOg3+8ccf15IlS/Taa68pNTVVw4YN0+LFi1W3bl1JUtmyZbVixQp9+OGHiouL05gxYxQVFaURI0ZIkipWrKhNmzbpjTfeyNX2O3furKefflovvPCCZs2apaioqCyDny1ZskSbNm3Szp07rT8ofPLJJ7r11lu1du1a3XbbbRo1apS6dOmiLl26SJJeffVVLV682Olo9+DBgzVixAg9+OCDkqQyZcrot99+04cffqgOHTq4vO+uBUe6AQAAAMBFjRo10oYNG6zHO++8k2PbqlWrWv93OBwKCwvToUOHJEkbN27U9u3bFRQUpMDAQAUGBiokJER///23duzYoeXLl1vTAwMDNXXq1H9ce2RkpNN15+Hh4VY927dv19mzZ3XPPfc4bfeTTz7Rjh07JEnJycmqU6eO0zozA3puNG3aVKdPn9ayZcs0YcKELEe5M7cRERHhdAQ/JiZGhQoVUnJycq7qOHPmjHbs2KEuXbo4vZZXX33Vei3XA0e6AQC4gjlb9ru7BPcp7u4CAODfKyAgQOXKlctV28tHM3c4HMrIyJAknT59WrGxsdmG6WLFisnb29vpdlahoaE5bsfDw0PGGKdp58+fd7keSZo3b55Klizp1M7HxyfHbbuiQIECevzxxzV48GCtWbNGX3zxRZ6s93KZr2XcuHFZwrmnp6ct28wOoRsAAAAA3KRmzZqaMWOGihcvruDg4GzbZBfuvb29lZ6e7jStWLFiOnXqlM6cOWONBO7q/adjYmLk4+Oj3bt3Ky4uLts20dHR+vrrr52m/fDDDy5tp3Pnzho+fLhat26twoULZ7uNPXv2aM+ePdbR7t9++03Hjx9XTEyM1WbNmjVq3759tnWEhoaqRIkS+uOPP9SuXTuX6stLhG4AAAAAcJN27drprbfeUosWLfTyyy/rlltuUUpKiubMmaNnn31Wt9xyS7bLRUZGas2aNdq1a5d1SnqdOnXk7++v//73v+rdu7fWrFmjSZMmuVRPUFCQBgwYoH79+ikjI0N33HGHTpw4oZUrVyo4OFgdOnTQf/7zH40YMULPPPOMunbtqnXr1rm8nejoaB05ckT+/v7Zzo+Pj1eVKlXUrl07jRo1ShcuXFDPnj0VFxenWrVqSZL69Omjjh07qlatWqpfv76mTp2qzZs3q2zZstZ6hg4dqt69e6tgwYJq3LixUlNT9dNPP+nYsWPq37+/SzVfK67pBgAAAAA38ff317Jly1SqVCk9+OCDio6Otm59ldORb0kaMGCAPD09FRMTo2LFimn37t0KCQnRp59+qm+++UZVqlTRZ599piFDhrhc0yuvvKKXXnpJiYmJio6OVuPGjTVv3jyVKVNGklSqVCnNnj1bX375papVq6axY8dq2LBhLm+nSJEi8vPzy3aew+HQV199pcKFC6tBgwaKj49X2bJlNWPGDKtN69at9dJLL+nZZ59VbGysUlJS1KNHD6f1dO3aVR9//LEmTpyoKlWqKC4uTpMmTbJey/XgMJef9H+DO3nypAoWLKgTJ05csRMDACDd3Nd07yn+ubtLcIs+hfu4uwTgpvH3339r586dKlOmjHx9fd1dDpDFlfpobrMlR7oBAAAAALAJoRsAAAAAAJsQugEAAAAAsAmhGwAAAAAAmxC6AQAAAACwCaEbAAAAgFvdZDdUQj6SkZHxj9dRIA/qAAAAAACXeXl5yeFw6PDhwypWrJgcDoe7SwIkXfxDUFpamg4fPiwPDw95e3tf87oI3QAAAADcwtPTU7fccov27t2rXbt2ubscIAt/f3+VKlVKHh7XfpI4oRsAAACA2wQGBqp8+fI6f/68u0sBnHh6eqpAgQL/+AwMQjcAAAAAt/L09JSnp6e7ywBswUBqAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBN/hWh+/3331dkZKR8fX1Vp04d/fjjjzm2nTRpkhwOh9PD19f3OlYLAAAAAEDuuD10z5gxQ/3799fgwYO1fv16VatWTQkJCTp06FCOywQHB2v//v3WIyUl5TpWDAAAAABA7rg9dI8cOVLdunVTp06dFBMTo7Fjx8rf318TJkzIcRmHw6GwsDDrERoaeh0rBgAAAAAgd9wautPS0rRu3TrFx8db0zw8PBQfH6/Vq1fnuNzp06dVunRpRUREqEWLFtq8eXOObVNTU3Xy5EmnBwAAAAAA14NbQ/eRI0eUnp6e5Uh1aGioDhw4kO0yFStW1IQJE/TVV1/p008/VUZGhurVq6e9e/dm2z4xMVEFCxa0HhEREXn+OgAAAAAAyI7bTy93Vd26ddW+fXtVr15dcXFxmjNnjooVK6YPP/ww2/YDBw7UiRMnrMeePXuuc8UAAAAAgJtVAXduvGjRovL09NTBgwedph88eFBhYWG5WoeXl5dq1Kih7du3Zzvfx8dHPj4+/7hWAAAAAABc5dYj3d7e3oqNjdWSJUusaRkZGVqyZInq1q2bq3Wkp6dr06ZNCg8Pt6tMAAAAAACuiVuPdEtS//791aFDB9WqVUu1a9fWqFGjdObMGXXq1EmS1L59e5UsWVKJiYmSpJdfflm33367ypUrp+PHj+utt95SSkqKunbt6s6XAQAAAABAFm4P3a1bt9bhw4c1aNAgHThwQNWrV9f8+fOtwdV2794tD4//OyB/7NgxdevWTQcOHFDhwoUVGxurVatWKSYmxl0vAQAAAACAbDmMMcbdRVxPJ0+eVMGCBXXixAkFBwe7uxwAwL/cnC373V2C2+wp/rm7S3CLPoX7uLsEAEA+kNtsme9GLwcAAAAAIL8gdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGCTAu4uAABuRKOPjXZ3CW7Rp3Afd5cAAADwr8KRbgAAAAAAbELoBgAAAADAJoRuAAAAAABsQugGAAAAAMAmhG4AAAAAAGxC6AYAAAAAwCaEbgAAAAAAbELoBgAAAADAJoRuAAAAAABsQugGAAAAAMAmhG4AAAAAAGxC6AYAAAAAwCaEbgAAAAAAbELoBgAAAADAJoRuAAAAAABsQugGAAAAAMAmhG4AAAAAAGxSwN0FALixzdmy390luEdxdxcAAACAfwNCNwAAwA3uZv0D6IMVw91dAgBwejkAAAAAAHbhSDcAAABuSKOPjXZ3CW7Tp3Afd5cA4P8jdAMAAAD41+MyCeRXnF4OAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANikgLsLAC43+thod5fgFn0K93F3CQAAAADyGEe6AQAAAACwCaEbAAAAAACbELoBAAAAALAJoRsAAAAAAJsQugEAAAAAsAmjl/+Lzdmy390luEdxdxcAAAAAAHmD0A0AAAAA/1LcTjf/4/RyAAAAAABs8q8I3e+//74iIyPl6+urOnXq6Mcff8zVctOnT5fD4VDLli3tLRAAAAAAgGvg9tA9Y8YM9e/fX4MHD9b69etVrVo1JSQk6NChQ1dcbteuXRowYIDuvPPO61QpAAAAAACucXvoHjlypLp166ZOnTopJiZGY8eOlb+/vyZMmJDjMunp6WrXrp2GDh2qsmXLXsdqAQAAAADIPbeG7rS0NK1bt07x8fHWNA8PD8XHx2v16tU5Lvfyyy+rePHi6tKly/UoEwAAAACAa+LW0cuPHDmi9PR0hYaGOk0PDQ3V77//nu0yK1as0Pjx47Vhw4ZcbSM1NVWpqanW85MnT15zvQAAAAAAuMLtp5e74tSpU3r88cc1btw4FS1aNFfLJCYmqmDBgtYjIiLC5ioBAAAAALjIrUe6ixYtKk9PTx08eNBp+sGDBxUWFpal/Y4dO7Rr1y41a9bMmpaRkSFJKlCggLZs2aKoqCinZQYOHKj+/ftbz0+ePEnwBgAAAABcF24N3d7e3oqNjdWSJUus235lZGRoyZIl6tWrV5b2lSpV0qZNm5ymvfjiizp16pRGjx6dbZj28fGRj4+PLfUDAAAAAHAlbg3dktS/f3916NBBtWrVUu3atTVq1CidOXNGnTp1kiS1b99eJUuWVGJionx9fVW5cmWn5QsVKiRJWaYDAAAAAOBubg/drVu31uHDhzVo0CAdOHBA1atX1/z5863B1Xbv3i0Pj3x16TkAAAAAAJL+BaFbknr16pXt6eSSlJSUdMVlJ02alPcFAQAAAACQBziEDAAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYxOXQPXnyZM2bN896/uyzz6pQoUKqV6+eUlJS8rQ4AAAAAADyM5dD97Bhw+Tn5ydJWr16td5//329+eabKlq0qPr165fnBQIAAAAAkF8VcHWBPXv2qFy5cpKkL7/8Ug899JCeeOIJ1a9fXw0bNszr+gAAAAAAyLdcPtIdGBioo0ePSpIWLlyoe+65R5Lk6+urc+fO5W11AAAAAADkYy4f6b7nnnvUtWtX1ahRQ1u3blWTJk0kSZs3b1ZkZGRe1wcAAAAAQL7l8pHu999/X3Xr1tXhw4c1e/ZsFSlSRJK0bt06tWnTJs8LBAAAAAAgv3L5SHehQoX03nvvZZk+dOjQPCkIAAAAAIAbxTXdp3v58uV67LHHVK9ePe3bt0+SNGXKFK1YsSJPiwMAAAAAID9zOXTPnj1bCQkJ8vPz0/r165WamipJOnHihIYNG5bnBQIAAAAAkF+5HLpfffVVjR07VuPGjZOXl5c1vX79+lq/fn2eFgcAAAAAQH7mcujesmWLGjRokGV6wYIFdfz48byoCQAAAACAG4LLoTssLEzbt2/PMn3FihUqW7ZsnhQFAAAAAMCNwOXQ3a1bN/Xp00dr1qyRw+HQn3/+qalTp2rAgAHq0aOHHTUCAAAAAJAvuXzLsOeff14ZGRm6++67dfbsWTVo0EA+Pj4aMGCAnnrqKTtqBAAAAAAgX3IpdKenp2vlypV68skn9cwzz2j79u06ffq0YmJiFBgYaFeNAAAAAADkSy6Fbk9PT917771KTk5WoUKFFBMTY1ddAAAAAADkey5f0125cmX98ccfdtQCAAAAAMAN5Zru0z1gwADNnTtX+/fv18mTJ50eAAAAAADgIpcHUmvSpIkkqXnz5nI4HNZ0Y4wcDofS09PzrjoAAAAAAPIxl0P30qVL7agDAAAAAIAbjsuhOy4uzo46AAAAAAC44bgcuiXp+PHjGj9+vJKTkyVJt956qzp37qyCBQvmaXEAAAAAAORnLg+k9tNPPykqKkpvv/22/vrrL/31118aOXKkoqKitH79ejtqBAAAAAAgX3L5SHe/fv3UvHlzjRs3TgUKXFz8woUL6tq1q/r27atly5bleZEAAAAAAORHLofun376ySlwS1KBAgX07LPPqlatWnlaHAAAAAAA+ZnLp5cHBwdr9+7dWabv2bNHQUFBeVIUAAAAAAA3ApdDd+vWrdWlSxfNmDFDe/bs0Z49ezR9+nR17dpVbdq0saNGAAAAAADyJZdPLx8+fLgcDofat2+vCxcuSJK8vLzUo0cPvf7663leIAAAAAAA+ZXLodvb21ujR49WYmKiduzYIUmKioqSv79/nhcHAAAAAEB+5nLoPnHihNLT0xUSEqIqVapY0//66y8VKFBAwcHBeVogAAAAAAD5lcvXdD/66KOaPn16lumff/65Hn300TwpCgAAAACAG4HLoXvNmjVq1KhRlukNGzbUmjVr8qQoAAAAAABuBC6H7tTUVGsAtUudP39e586dy5OiAAAAAAC4EbgcumvXrq2PPvooy/SxY8cqNjY2T4oCAAAAAOBG4PJAaq+++qri4+O1ceNG3X333ZKkJUuWaO3atVq4cGGeFwgAAAAAQH7l8pHu+vXra/Xq1YqIiNDnn3+u//3vfypXrpx++eUX3XnnnXbUCAAAAABAvuTykW5Jql69uqZOnZrXtQAAAAAAcEPJdei+cOGC0tPT5ePjY007ePCgxo4dqzNnzqh58+a64447bCkSAAAAAID8KNenl3fr1k29e/e2np86dUq33Xab3n//fS1YsECNGjXSN998c01FvP/++4qMjJSvr6/q1KmjH3/8Mce2c+bMUa1atVSoUCEFBASoevXqmjJlyjVtFwAAAAAAO+U6dK9cuVIPPfSQ9fyTTz5Renq6tm3bpo0bN6p///566623XC5gxowZ6t+/vwYPHqz169erWrVqSkhI0KFDh7JtHxISohdeeEGrV6/WL7/8ok6dOqlTp05asGCBy9sGAAAAAMBOuQ7d+/btU/ny5a3nS5Ys0UMPPaSCBQtKkjp06KDNmze7XMDIkSPVrVs3derUSTExMRo7dqz8/f01YcKEbNs3bNhQDzzwgKKjoxUVFaU+ffqoatWqWrFihcvbBgAAAADATrkO3b6+vjp37pz1/IcfflCdOnWc5p8+fdqljaelpWndunWKj4//v4I8PBQfH6/Vq1dfdXljjJYsWaItW7aoQYMGLm0bAAAAAAC75Tp0X3rt9PLly3Xw4EHddddd1vwdO3aoRIkSLm38yJEjSk9PV2hoqNP00NBQHThwIMflTpw4ocDAQHl7e6tp06Z69913dc8992TbNjU1VSdPnnR6AAAAAABwPeR69PJBgwbpvvvu0+eff679+/erY8eOCg8Pt+Z/8cUXql+/vi1FXi4oKEgbNmzQ6dOntWTJEvXv319ly5ZVw4YNs7RNTEzU0KFDr0tdAAAAAABcKtehOy4uTuvWrdPChQsVFhamRx55xGl+9erVVbt2bZc2XrRoUXl6eurgwYNO0w8ePKiwsLAcl/Pw8FC5cuWs7SYnJysxMTHb0D1w4ED179/fen7y5ElFRES4VCcAAAAAANci16FbkqKjoxUdHZ3tvCeeeMLljXt7eys2NlZLlixRy5YtJUkZGRlasmSJevXqlev1ZGRkKDU1Ndt5Pj4+TvcWBwAAAADgenEpdNuhf//+6tChg2rVqqXatWtr1KhROnPmjDp16iRJat++vUqWLKnExERJF08Xr1WrlqKiopSamqpvvvlGU6ZM0ZgxY9z5MgAAAAAAyMLtobt169Y6fPiwBg0apAMHDqh69eqaP3++Nbja7t275eHxf+O9nTlzRj179tTevXvl5+enSpUq6dNPP1Xr1q3d9RIAAAAAAMiW20O3JPXq1SvH08mTkpKcnr/66qt69dVXr0NVAAAAAAD8M7m+ZRgAAAAAAHDNNYXu48eP6+OPP9bAgQP1119/SZLWr1+vffv25WlxAAAAAADkZy6fXv7LL78oPj5eBQsW1K5du9StWzeFhIRozpw52r17tz755BM76gQAAAAAIN9x+Uh3//791bFjR23btk2+vr7W9CZNmmjZsmV5WhwAAAAAAPmZy6F77dq16t69e5bpJUuW1IEDB/KkKAAAAAAAbgQuh24fHx+dPHkyy/StW7eqWLFieVIUAAAAAAA3ApdDd/PmzfXyyy/r/PnzkiSHw6Hdu3frueee00MPPZTnBQIAAAAAkF+5HLpHjBih06dPq3jx4jp37pzi4uJUrlw5BQUF6bXXXrOjRgAAAAAA8iWXRy8vWLCgFi1apBUrVuiXX37R6dOnVbNmTcXHx9tRHwAAAAAA+ZbLoTvTHXfcoTvuuCMvawEAAAAA4Ibicuh+5513sp3ucDjk6+urcuXKqUGDBvL09PzHxQEAAAAAkJ+5HLrffvttHT58WGfPnlXhwoUlSceOHZO/v78CAwN16NAhlS1bVkuXLlVERESeFwwAAAAAQH7h8kBqw4YN02233aZt27bp6NGjOnr0qLZu3ao6depo9OjR2r17t8LCwtSvXz876gUAAAAAIN9w+Uj3iy++qNmzZysqKsqaVq5cOQ0fPlwPPfSQ/vjjD7355pvcPgwAAAAAcNNz+Uj3/v37deHChSzTL1y4oAMHDkiSSpQooVOnTv3z6gAAAAAAyMdcDt2NGjVS9+7d9fPPP1vTfv75Z/Xo0UN33XWXJGnTpk0qU6ZM3lUJAAAAAEA+5HLoHj9+vEJCQhQbGysfHx/5+PioVq1aCgkJ0fjx4yVJgYGBGjFiRJ4XCwAAAABAfuLyNd1hYWFatGiRfv/9d23dulWSVLFiRVWsWNFq06hRo7yrEAAAAACAfMrl0J2pUqVKqlSpUl7WAgAAAADADeWaQvfevXv19ddfa/fu3UpLS3OaN3LkyDwpDAAAAACA/M7l0L1kyRI1b95cZcuW1e+//67KlStr165dMsaoZs2adtQIAAAAAEC+5PJAagMHDtSAAQO0adMm+fr6avbs2dqzZ4/i4uL0yCOP2FEjAAAAAAD5ksuhOzk5We3bt5ckFShQQOfOnVNgYKBefvllvfHGG3leIAAAAAAA+ZXLoTsgIMC6jjs8PFw7duyw5h05ciTvKgMAAAAAIJ9z+Zru22+/XStWrFB0dLSaNGmip59+Wps2bdKcOXN0++2321EjAAAAAAD5ksuhe+TIkTp9+rQkaejQoTp9+rRmzJih8uXLM3I5AAAAAACXcCl0p6ena+/evapataqki6eajx071pbCAAAAAADI71y6ptvT01P33nuvjh07Zlc9AAAAAADcMFweSK1y5cr6448/7KgFAAAAAIAbisuh+9VXX9WAAQM0d+5c7d+/XydPnnR6AAAAAACAi1weSK1JkyaSpObNm8vhcFjTjTFyOBxKT0/Pu+oAAAAAAMjHXA7dS5cutaMOAAAAAABuOC6H7ri4ODvqAAAAAADghuPyNd2StHz5cj322GOqV6+e9u3bJ0maMmWKVqxYkafFAQAAAACQn7kcumfPnq2EhAT5+flp/fr1Sk1NlSSdOHFCw4YNy/MCAQAAAADIr65p9PKxY8dq3Lhx8vLysqbXr19f69evz9PiAAAAAADIz1wO3Vu2bFGDBg2yTC9YsKCOHz+eFzUBAAAAAHBDcDl0h4WFafv27Vmmr1ixQmXLls2TogAAAAAAuBG4HLq7deumPn36aM2aNXI4HPrzzz81depUDRgwQD169LCjRgAAAAAA8iWXbxn2/PPPKyMjQ3fffbfOnj2rBg0ayMfHRwMGDNBTTz1lR40AAAAAAORLLoduh8OhF154Qc8884y2b9+u06dPKyYmRoGBgXbUBwAAAABAvuXy6eWffvqpzp49K29vb8XExKh27doEbgAAAAAAsuFy6O7Xr5+KFy+utm3b6ptvvlF6eroddQEAAAAAkO+5HLr379+v6dOny+FwqFWrVgoPD9eTTz6pVatW2VEfAAAAAAD5lsuhu0CBArr//vs1depUHTp0SG+//bZ27dqlRo0aKSoqyo4aAQAAAADIl1weSO1S/v7+SkhI0LFjx5SSkqLk5OS8qgsAAAAAgHzP5SPdknT27FlNnTpVTZo0UcmSJTVq1Cg98MAD2rx5c17XBwAAAABAvuXyke5HH31Uc+fOlb+/v1q1aqWXXnpJdevWtaM2AAAAAADyNZdDt6enpz7//HMlJCTI09PTad6vv/6qypUr51lxAAAAAADkZy6H7qlTpzo9P3XqlD777DN9/PHHWrduHbcQAwAAAADg/7uma7oladmyZerQoYPCw8M1fPhw3XXXXfrhhx/ysjYAAAAAAPI1l450HzhwQJMmTdL48eN18uRJtWrVSqmpqfryyy8VExNjV40AAAAAAORLuT7S3axZM1WsWFG//PKLRo0apT///FPvvvuunbUBAAAAAJCv5fpI97fffqvevXurR48eKl++vJ01AQAAAABwQ8j1ke4VK1bo1KlTio2NVZ06dfTee+/pyJEjdtYGAAAAAEC+luvQffvtt2vcuHHav3+/unfvrunTp6tEiRLKyMjQokWLdOrUKTvrBAAAAAAg33F59PKAgAB17txZK1as0KZNm/T000/r9ddfV/HixdW8eXM7agQAAAAAIF+65luGSVLFihX15ptvau/evfrss8/yqiYAAAAAAG4I/yh0Z/L09FTLli319ddf58XqAAAAAAC4IeRJ6AYAAAAAAFkRugEAAAAAsAmhGwAAAAAAmxC6AQAAAACwCaEbAAAAAACbELoBAAAAALAJoRsAAAAAAJsQugEAAAAAsAmhGwAAAAAAmxC6AQAAAACwCaEbAAAAAACbELoBAAAAALAJoRsAAAAAAJsQugEAAAAAsAmhGwAAAAAAmxC6AQAAAACwCaEbAAAAAACbELoBAAAAALDJvyJ0v//++4qMjJSvr6/q1KmjH3/8Mce248aN05133qnChQurcOHCio+Pv2J7AAAAAADcxe2he8aMGerfv78GDx6s9evXq1q1akpISNChQ4eybZ+UlKQ2bdpo6dKlWr16tSIiInTvvfdq375917lyAAAAAACuzO2he+TIkerWrZs6deqkmJgYjR07Vv7+/powYUK27adOnaqePXuqevXqqlSpkj7++GNlZGRoyZIl17lyAAAAAACuzK2hOy0tTevWrVN8fLw1zcPDQ/Hx8Vq9enWu1nH27FmdP39eISEh2c5PTU3VyZMnnR4AAAAAAFwPbg3dR44cUXp6ukJDQ52mh4aG6sCBA7lax3PPPacSJUo4BfdLJSYmqmDBgtYjIiLiH9cNAAAAAEBuuP308n/i9ddf1/Tp0/XFF1/I19c32zYDBw7UiRMnrMeePXuuc5UAAAAAgJtVAXduvGjRovL09NTBgwedph88eFBhYWFXXHb48OF6/fXXtXjxYlWtWjXHdj4+PvLx8cmTegEAAAAAcIVbj3R7e3srNjbWaRC0zEHR6tatm+Nyb775pl555RXNnz9ftWrVuh6lAgAAAADgMrce6Zak/v37q0OHDqpVq5Zq166tUaNG6cyZM+rUqZMkqX379ipZsqQSExMlSW+88YYGDRqkadOmKTIy0rr2OzAwUIGBgW57HQAAAAAAXM7tobt169Y6fPiwBg0apAMHDqh69eqaP3++Nbja7t275eHxfwfkx4wZo7S0ND388MNO6xk8eLCGDBlyPUsHAAAAAOCK3B66JalXr17q1atXtvOSkpKcnu/atcv+ggAAAAAAyAP5evRyAAAAAAD+zQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANiE0A0AAAAAgE0I3QAAAAAA2ITQDQAAAACATQjdAAAAAADYhNANAAAAAIBNCN0AAAAAANjE7aH7/fffV2RkpHx9fVWnTh39+OOPObbdvHmzHnroIUVGRsrhcGjUqFHXr1AAAAAAAFzk1tA9Y8YM9e/fX4MHD9b69etVrVo1JSQk6NChQ9m2P3v2rMqWLavXX39dYWFh17laAAAAAABc49bQPXLkSHXr1k2dOnVSTEyMxo4dK39/f02YMCHb9rfddpveeustPfroo/Lx8bnO1QIAAAAA4Bq3he60tDStW7dO8fHx/1eMh4fi4+O1evXqPNtOamqqTp486fQAAAAAAOB6cFvoPnLkiNLT0xUaGuo0PTQ0VAcOHMiz7SQmJqpgwYLWIyIiIs/WDQAAAADAlbh9IDW7DRw4UCdOnLAee/bscXdJAAAAAICbRAF3bbho0aLy9PTUwYMHnaYfPHgwTwdJ8/Hx4fpvAAAAAIBbuO1It7e3t2JjY7VkyRJrWkZGhpYsWaK6deu6qywAAAAAAPKM2450S1L//v3VoUMH1apVS7Vr19aoUaN05swZderUSZLUvn17lSxZUomJiZIuDr7222+/Wf/ft2+fNmzYoMDAQJUrV85trwMAAAAAgOy4NXS3bt1ahw8f1qBBg3TgwAFVr15d8+fPtwZX2717tzw8/u9g/J9//qkaNWpYz4cPH67hw4crLi5OSUlJ17t8AAAAAACuyK2hW5J69eqlXr16ZTvv8iAdGRkpY8x1qAoAAAAAgH/uhh+9HAAAAAAAdyF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGCTf0Xofv/99xUZGSlfX1/VqVNHP/744xXbz5w5U5UqVZKvr6+qVKmib7755jpVCgAAAABA7rk9dM+YMUP9+/fX4MGDtX79elWrVk0JCQk6dOhQtu1XrVqlNm3aqEuXLvr555/VsmVLtWzZUr/++ut1rhwAAAAAgCtze+geOXKkunXrpk6dOikmJkZjx46Vv7+/JkyYkG370aNHq3HjxnrmmWcUHR2tV155RTVr1tR77713nSsHAAAAAODK3Bq609LStG7dOsXHx1vTPDw8FB8fr9WrV2e7zOrVq53aS1JCQkKO7QEAAAAAcJcC7tz4kSNHlJ6ertDQUKfpoaGh+v3337Nd5sCBA9m2P3DgQLbtU1NTlZqaaj0/ceKEJOnkyZP/pPTr4uzpU+4uwS3+9v3b3SW4xUnPf3+fvBb045vLjdiPb9Y+LNGPbyQ3az++WfuwRD++kdys/Tg/9OHMTGmMuWI7t4bu6yExMVFDhw7NMj0iIsIN1QA5e17Pu7sE4B+jH+NGQD/GjYB+jPwuP/XhU6dOqWDBgjnOd2voLlq0qDw9PXXw4EGn6QcPHlRYWFi2y4SFhbnUfuDAgerfv7/1PCMjQ3/99ZeKFCkih8PxD18B8trJkycVERGhPXv2KDg42N3lANeEfowbAf0Y+R19GDcC+vG/mzFGp06dUokSJa7Yzq2h29vbW7GxsVqyZIlatmwp6WIoXrJkiXr16pXtMnXr1tWSJUvUt29fa9qiRYtUt27dbNv7+PjIx8fHaVqhQoXyonzYKDg4mA8W5Hv0Y9wI6MfI7+jDuBHQj/+9rnSEO5PbTy/v37+/OnTooFq1aql27doaNWqUzpw5o06dOkmS2rdvr5IlSyoxMVGS1KdPH8XFxWnEiBFq2rSppk+frp9++kkfffSRO18GAAAAAABZuD10t27dWocPH9agQYN04MABVa9eXfPnz7cGS9u9e7c8PP5vkPV69epp2rRpevHFF/Xf//5X5cuX15dffqnKlSu76yUAAAAAAJAtt4duSerVq1eOp5MnJSVlmfbII4/okUcesbkquIOPj48GDx6c5ZIAID+hH+NGQD9Gfkcfxo2AfnxjcJirjW8OAAAAAACuicfVmwAAAAAAgGtB6AYAAAAAwCaE7puUMUZPPPGEQkJC5HA4tGHDhiu237VrV67aNWzY0Ol2btk5cOCA7rnnHgUEBOT69m1JSUlyOBw6fvx4rtoD+UHHjh2t2yUCuDaX/37i9wX+DSIjIzVq1Kg8bwsgfyJ036Tmz5+vSZMmae7cudq/f/9VR3+PiIhwavdPvtS8/fbb2r9/vzZs2KCtW7deS/kAgP/Pri/sBAFcK/qktHbtWj3xxBN53hZwh0mTJuX6QBmy968YvRzX344dOxQeHq569erlqr2np6fCwsLybNuxsbEqX758nqwPsFN6erocDofTrQuvJi0tTd7e3jZWBfz7Xct7B3C3vPr8LlasmC1tkT+44/Mvp757/vx5eXl5Xbc6kD1+E96EOnbsqKeeekq7d++Ww+FQZGSk5s+frzvuuEOFChVSkSJFdP/992vHjh3WMpeevrdr1y41atRIklS4cGE5HA517NjRapuRkaFnn31WISEhCgsL05AhQ6x5kZGRmj17tj755BNruexOXT9+/LgcDke2t4yT/u8vbgsWLFB0dLQCAwPVuHFj7d+/36ndxx9/rOjoaPn6+qpSpUr64IMPrHlpaWnq1auXwsPD5evrq9KlSysxMVHSxdPvhwwZolKlSsnHx0clSpRQ7969r3GP40r9K/PnP2fOHDVq1Ej+/v6qVq2aVq9ebS2fkpKiZs2aqXDhwgoICNCtt96qb775RpJUq1YtDR8+3GrbsmVLeXl56fTp05KkvXv3yuFwaPv27ZKk1NRUDRgwQCVLllRAQIDq1Knj1M8y+9bXX3+tmJgY+fj4aPfu3Vd8fZmnib/22msqUaKEKlasKEnas2ePWrVqpUKFCikkJEQtWrTQrl27clxPRkaGEhMTVaZMGfn5+alatWqaNWuWNe+WW27RmDFjnJb5+eef5eHhoZSUFEnSyJEjVaVKFQUEBCgiIkI9e/a09sWlr+9q750JEybo1ltvlY+Pj8LDw63bOnbu3Fn333+/U9vz58+rePHiGj9+/BX3080qIyNDb775psqVKycfHx+VKlVKr732miRp06ZNuuuuu+Tn56ciRYroiSeecPp5Zfat4cOHKzw8XEWKFNGTTz6p8+fPS7p4SU9KSor69esnh8Mhh8NhLbtixQrdeeed8vPzU0REhHr37q0zZ85Ikj755BMFBgZq27ZtVvuePXuqUqVKOnv27BXXm5Oc3jtXe89J0sqVK9WwYUP5+/urcOHCSkhI0LFjxyRd+fMD1+Zm65NffvmlypcvL19fXyUkJGjPnj1WmyFDhqh69er6+OOPVaZMGfn6+kq6+D2ka9euKlasmIKDg3XXXXdp48aNTuv/3//+p9tuu02+vr4qWrSoHnjgAWvepUflr/ad4vIj+Lt371aLFi0UGBio4OBgtWrVSgcPHsxS85QpUxQZGamCBQvq0Ucf1alTp666T/KTG/27g5Tz71op9/3g8r7rcDg0ZswYNW/eXAEBAdZ7+6uvvlLNmjXl6+ursmXLaujQobpw4YK1vuPHj6t79+4KDQ2Vr6+vKleurLlz5yopKUmdOnXSiRMnrPde5nf7yMhIDRs2TJ07d1ZQUJBKlSqljz76yOk1Xu27UFJSkmrXrm1ddlq/fn3rO83GjRvVqFEjBQUFKTg4WLGxsfrpp5+uul//lQxuOsePHzcvv/yyueWWW8z+/fvNoUOHzKxZs8zs2bPNtm3bzM8//2yaNWtmqlSpYtLT040xxuzcudNIMj///LO5cOGCmT17tpFktmzZYvbv32+OHz9ujDEmLi7OBAcHmyFDhpitW7eayZMnG4fDYRYuXGiMMebQoUOmcePGplWrVtZyl64707Fjx4wks3TpUmOMMUuXLjWSzLFjx4wxxkycONF4eXmZ+Ph4s3btWrNu3ToTHR1t2rZta63j008/NeHh4Wb27Nnmjz/+MLNnzzYhISFm0qRJxhhj3nrrLRMREWGWLVtmdu3aZZYvX26mTZtmjDFm5syZJjg42HzzzTcmJSXFrFmzxnz00Ud2/lhuaFfqX5k//0qVKpm5c+eaLVu2mIcfftiULl3anD9/3hhjTNOmTc0999xjfvnlF7Njxw7zv//9z3z//ffGGGP69+9vmjZtaowxJiMjw4SEhJiiRYuab7/91hhzsR+ULFnSqqVr166mXr16ZtmyZWb79u3mrbfeMj4+Pmbr1q3GmP/rW/Xq1TMrV640v//+uzlz5swVX1+HDh1MYGCgefzxx82vv/5qfv31V5OWlmaio6NN586dzS+//GJ+++0307ZtW1OxYkWTmppqLdeiRQtrPa+++qqpVKmSmT9/vtmxY4eZOHGi8fHxMUlJScYYYwYMGGDuuOMOp20//fTTTtPefvtt891335mdO3eaJUuWmIoVK5oePXpY83Pz3vnggw+Mr6+vGTVqlNmyZYv58ccfzdtvv22MMWblypXG09PT/Pnnn1b7OXPmmICAAHPq1Kkr7qeb1bPPPmsKFy5sJk2aZLZv326WL19uxo0bZ06fPm3Cw8PNgw8+aDZt2mSWLFliypQpYzp06GAt26FDBxMcHGz+85//mOTkZPO///3P+Pv7W59HR48eNbfccot5+eWXzf79+83+/fuNMcZs377dBAQEmLffftts3brVrFy50tSoUcN07NjRWvcjjzxibrvtNnP+/Hkzd+5c4+XlZX766acrrvdKcnrvXO099/PPPxsfHx/To0cPs2HDBvPrr7+ad9991xw+fNgYc+XPD2NMlt8hl/++QFY3W5+sVauWWbVqlfnpp59M7dq1Tb169aw2gwcPNgEBAaZx48Zm/fr1ZuPGjcYYY+Lj402zZs3M2rVrzdatW83TTz9tihQpYo4ePWqMMWbu3LnG09PTDBo0yPz2229mw4YNZtiwYdZ6S5cubX1uXu07xaVt09PTTfXq1c0dd9xhfvrpJ/PDDz+Y2NhYExcX51RzYGCg9XNatmyZCQsLM//973+vuk/ykxv9u8OVftfmth9k13clmeLFi5sJEyaYHTt2mJSUFLNs2TITHBxsJk2aZHbs2GEWLlxoIiMjzZAhQ6zt3X777ebWW281CxcutPbXN998Y1JTU82oUaNMcHCw9d7L/H1funRpExISYt5//32zbds2k5iYaDw8PMzvv/9ujDFX/S50/vx5U7BgQTNgwACzfft289tvv5lJkyaZlJQUY4wxt956q3nsscdMcnKy2bp1q/n888/Nhg0brq1DuRmh+yb19ttvm9KlS+c4//Dhw0aS2bRpkzEm919q4uLisoSC2267zTz33HPW8xYtWjj9Ar/W0C3JbN++3Vrm/fffN6GhodbzqKgoK0RneuWVV0zdunWNMcY89dRT5q677jIZGRlZXv+IESNMhQoVTFpaWo77CNfu0v6V+fP/+OOPrfmbN282kkxycrIxxpgqVapYvxgu9/XXX5uCBQuaCxcumA0bNpiwsDDTp08fq8917drVCpQpKSnG09PT7Nu3z2kdd999txk4cKAx5v/6lisf6h06dDChoaFWmDbGmClTppiKFSs69a/U1FTj5+dnFixYYC2XGbr//vtv4+/vb1atWuW07i5dupg2bdoYYy6GE4fDYf0ySk9PNyVLljRjxozJsbaZM2eaIkWKWM9z894pUaKEeeGFF3JcZ0xMjHnjjTes582aNXP64oz/c/LkSePj42PGjRuXZd5HH31kChcubE6fPm1NmzdvnvHw8DAHDhwwxlzsI6VLlzYXLlyw2jzyyCOmdevW1vNLv7Bn6tKli3niiSecpi1fvtx4eHiYc+fOGWOM+euvv8wtt9xievToYUJDQ81rr73m1D679V5Jdu+d3Lzn2rRpY+rXr5/r7Vzr7ydcdDP2yR9++MGalpycbCSZNWvWGGMuBhcvLy9z6NAhp7qCg4PN33//7bS+qKgo8+GHHxpjjKlbt65p165djtu+tNarfae4tO3ChQuNp6en2b17tzU/83fijz/+aNXs7+9vTp48abV55plnTJ06da62S/K1G+27w5V+1+a2H1zed425GLr79u2bpdZL/yhkzMXvKeHh4cYYYxYsWGA8PDzMli1bsq1n4sSJpmDBglmmly5d2jz22GPW84yMDFO8eHHre8nVvgsdPXrUSLIOLlwuKCjIOliW33F6OSRJ27ZtU5s2bVS2bFkFBwcrMjJSknJ1aszlqlat6vQ8PDxchw4dyosynfj7+ysqKirb7Zw5c0Y7duxQly5dFBgYaD1effVV69Skjh07asOGDapYsaJ69+6thQsXWut65JFHdO7cOZUtW1bdunXTF1984XQKDlyTm/51ab8JDw+XJOvn2bt3b7366quqX7++Bg8erF9++cVqe+edd+rUqVP6+eef9f333ysuLk4NGza0Tvv6/vvv1bBhQ0kXT5tMT09XhQoVnPrF999/73S6qre3d5Z+fDVVqlRxupZq48aN2r59u4KCgqzthISE6O+//8721Njt27fr7Nmzuueee5xq++STT6z21atXV3R0tKZNm2a9tkOHDumRRx6x1rN48WLdfffdKlmypIKCgvT444/r6NGjOnv2rNXmSu+dQ4cO6c8//9Tdd9+d42vt2rWrJk6cKEk6ePCgvv32W3Xu3Nml/XWzSE5OVmpqarb7Mzk5WdWqVVNAQIA1rX79+srIyNCWLVusabfeeqs8PT2t57n5TN24caMmTZrk1JcSEhKUkZGhnTt3Srp4edD48eM1ZswYRUVF6fnnn/+nLzfLeyc377kNGzZcsb/l5e8n3Hx9skCBArrtttus55UqVVKhQoWUnJxsTStdurTTddUbN27U6dOnVaRIEad6d+7cmet+eylXvlMkJycrIiJCERER1rSYmJgsNUdGRiooKMh6btd3LXe6kb87XO13bW77weV9N1OtWrWcnm/cuFEvv/yyU/3dunXT/v37dfbsWW3YsEG33HKLKlSokKv6L3Xpa3Y4HAoLC7N+Blf7LhQSEqKOHTsqISFBzZo10+jRo50ud+vfv7+6du2q+Ph4vf766/n60iIGUoMkqVmzZipdurTGjRunEiVKKCMjQ5UrV1ZaWprL67p8sAaHw6GMjIwc22cOMmGMsaZlXhvm6nYy15F5Tc64ceNUp04dp3aZXxRq1qypnTt36ttvv9XixYvVqlUrxcfHa9asWYqIiNCWLVu0ePFiLVq0SD179tRbb72l77//nsEorkFu+tel+zXzWr3MftO1a1clJCRo3rx5WrhwoRITEzVixAg99dRTKlSokKpVq6akpCStXr1a99xzjxo0aKDWrVtr69at2rZtm+Li4iRd7Beenp5at26d0xdGSQoMDLT+7+fnl6vrBS916ZfUzG3FxsZq6tSpWdpm9wsys8/OmzdPJUuWdJrn4+Nj/b9du3aaNm2ann/+eU2bNk2NGzdWkSJFJF28xu3+++9Xjx499NprrykkJEQrVqxQly5dlJaWJn9/f0lXfu/4+fld9bW2b99ezz//vFavXq1Vq1apTJkyuvPOO6+63M0oN/vzalz9TJUu9qfu3btnOxZFqVKlrP8vW7ZMnp6e2r9/v86cOeP0Jf5aXP7eyc177mr7KC9/P+Hm65O5kd3nd3h4eLbjymSO4OzKfrTjO8W1/Azymxv5u0NevA+lrH03p+mnT5/W0KFD9eCDD2Zp6+vr+4/quVJfzM13oYkTJ6p3796aP3++ZsyYoRdffFGLFi3S7bffriFDhqht27aaN2+evv32Ww0ePFjTp093Gj8hv+BIN3T06FFt2bJFL774ou6++25FR0dbA9jkJPOIXnp6+j/efuab7tK/bF3tfuBXExoaqhIlSuiPP/5QuXLlnB5lypSx2gUHB6t169YaN26cZsyYodmzZ+uvv/6SdPEDsVmzZnrnnXesD+VNmzb9o7puRtfSv7ITERGh//znP5ozZ46efvppjRs3zpoXFxenpUuXatmyZWrYsKFCQkIUHR2t1157TeHh4dZfbmvUqKH09HQdOnQoS7/Iq9H5M9WsWVPbtm1T8eLFs2yrYMGCWdpfOvDK5e0v/Ut327Zt9euvv2rdunWaNWuW2rVrZ81bt26dMjIyNGLECN1+++2qUKGC/vzzT5fqDgoKUmRkpJYsWZJjmyJFiqhly5aaOHGiJk2apE6dOrm0jZtJ+fLl5efnl+3+jI6O1saNG62BpKSLA4p5eHhYg/Hlhre3d5bP4po1a+q3337L0pfKlStnfX6vWrVKb7zxhv73v/8pMDDQaQCfnNbrqty856pWrZpjf8urzw/8n5utT164cMFp4KUtW7bo+PHjio6OznGZmjVr6sCBAypQoECWWosWLSrpyv02O7n9ThEdHa09e/Y4Dfb222+/6fjx44qJicn19vK7G/27w9V+1+Z1P6hZs6a2bNmS7fvPw8NDVatW1d69e3O8le+1/j7I7XehGjVqaODAgVq1apUqV65sndEnSRUqVFC/fv20cOFCPfjgg9aZdvkNoRsqXLiwihQpoo8++kjbt2/Xd999p/79+19xmdKlS8vhcGju3Lk6fPiw08imrvLz89Ptt9+u119/XcnJyfr+++/14osvXvP6Mg0dOlSJiYl65513tHXrVm3atEkTJ07UyJEjJV0c5fmzzz7T77//rq1bt2rmzJkKCwtToUKFNGnSJI0fP16//vqr/vjjD3366afy8/NT6dKl/3FdN5tr6V+X69u3rxYsWKCdO3dq/fr1Wrp0qdMXpoYNG2rBggUqUKCAKlWqZE2bOnWq9Zdq6eIHd7t27dS+fXvNmTNHO3fu1I8//qjExETNmzcvb17w/9euXTsVLVpULVq00PLly7Vz504lJSWpd+/e2rt3b5b2QUFBGjBggPr166fJkydrx44dWr9+vd59911NnjzZahcZGal69eqpS5cuSk9PV/Pmza155cqV0/nz5/Xuu+/qjz/+0JQpUzR27FiXax8yZIhGjBihd955R9u2bbPquFTXrl01efJkJScnq0OHDi5v42bh6+ur5557Ts8++6x1qcAPP/yg8ePHq127dvL19VWHDh3066+/aunSpXrqqaf0+OOPKzQ0NNfbiIyM1LJly7Rv3z4dOXJEkvTcc89p1apV6tWrlzZs2KBt27bpq6++skLMqVOn9Pjjj6t379667777NHXqVM2YMcMaLT+n9boqN++5gQMHau3aterZs6d++eUX/f777xozZoyOHDmSJ58fcHaz9UkvLy899dRTWrNmjdatW6eOHTvq9ttvV+3atXNcJj4+XnXr1lXLli21cOFC7dq1S6tWrdILL7xgBfjBgwfrs88+0+DBg5WcnKxNmzbpjTfeyHZ9rnyniI+PV5UqVdSuXTutX79eP/74o9q3b6+4uLgspwzfyG6G7w5X+l2b1/1g0KBB+uSTTzR06FBt3rxZycnJmj59uvV9Oy4uTg0aNNBDDz2kRYsWWWeCzp8/X9LF997p06e1ZMkSHTlyxOmStSu52nehnTt3auDAgVq9erVSUlK0cOFCbdu2TdHR0Tp37px69eqlpKQkpaSkaOXKlVq7du0V/2D2r+bma8rhJpcPpLZo0SITHR1tfHx8TNWqVU1SUpKRZL744gtjTPaDnb388ssmLCzMOBwOa2C0uLg406dPH6dtXT5w2uXPjTHmt99+M3Xr1jV+fn6mevXqZuHChVcdSO3yAR2++OILc3mXnjp1qqlevbrx9vY2hQsXNg0aNDBz5swxxlwcMKZ69eomICDABAcHm7vvvtusX7/eWledOnVMcHCwCQgIMLfffrtZvHhxrvYtsrpS/8rNQHq9evUyUVFRxsfHxxQrVsw8/vjj5siRI1b7o0ePGofD4TSQT2Z/GDt2rFMtaWlpZtCgQSYyMtJ4eXmZ8PBw88ADD5hffvnFGJPzYCFXcvko5Jn2799v2rdvb4oWLWp8fHxM2bJlTbdu3cyJEyeyXS4jI8OMGjXKVKxY0Xh5eZlixYqZhIQEa7TVTB988IGRZNq3b59lmyNHjjTh4eHGz8/PJCQkmE8++eSa3jtjx4616ggPDzdPPfWU0/yMjAxTunRp06RJk1zupZtXenq6efXVV03p0qWNl5eXKVWqlDWgzS+//GIaNWpkfH19TUhIiOnWrZvTKPDZ9a0+ffo4jWC7evVqU7VqVePj4+P0c/zxxx/NPffcYwIDA01AQICpWrWqNTBVp06dTJUqVZwGihoxYoQJCQkxe/fuveJ6c5LTe+dq7zljjElKSjL16tUzPj4+plChQiYhIcHqs67+fmIgtau72frk7NmzTdmyZY2Pj4+Jj4+3BqM05uJgVNWqVcuy7MmTJ81TTz1lSpQoYby8vExERIRp166d08BWs2fPtr5jFC1a1Dz44IPWvEsHR7vad4rLB4hLSUkxzZs3NwEBASYoKMg88sgj1kB2OdV8tQFy86Mb/buDMVf+XXst/cAY4/T5eKn58+ebevXqGT8/PxMcHGxq167tNIr+0aNHTadOnUyRIkWMr6+vqVy5spk7d641/z//+Y8pUqSIkWQGDx5sjMl+cMNq1apZ84258nehAwcOmJYtW5rw8HDj7e1tSpcubQYNGmTS09NNamqqefTRR01ERITx9vY2JUqUML169bIGXsxvHMZcciEtAAC5cPr0aZUsWVITJ07M9hoxAHC3SZMmqW/fvjp+/Li7SwFwk2MgNQBArmVkZOjIkSMaMWKEChUq5HR6OwAAALLimm4AuIpLb7Fx+WP58uXuLu+62r17t0JDQzVt2jRNmDBBBQrwt9ubxX333Zfj+2DYsGHuLg83Ifok/s347oBLcXo5AFzF9u3bc5xXsmTJPLv1B/Bvtm/fPp07dy7beSEhIQoJCbnOFeFmR5/EvxnfHXApQjcAAAAAADbh9HIAAAAAAGxC6AYAAAAAwCaEbgAAAAAAbELoBgAAAADAJoRuAADgMofDoS+//NLdZQAA8K9H6AYAIJ/q2LGjHA6H/vOf/2SZ9+STT8rhcKhjx465WldSUpIcDoeOHz+eq/b79+/Xfffd50K1AADcnAjdAADkYxEREZo+fbrT/Yr//vtvTZs2TaVKlcrz7aWlpUmSwsLC5OPjk+frBwDgRkPoBgAgH6tZs6YiIiI0Z84ca9qcOXNUqlQp1ahRw5qWkZGhxMRElSlTRn5+fqpWrZpmzZolSdq1a5caNWokSSpcuLDTEfKGDRuqV69e6tu3r4oWLaqEhARJWU8v37t3r9q0aaOQkBAFBASoVq1aWrNmjSRp48aNatSokYKCghQcHKzY2Fj99NNPdu4WAAD+NQq4uwAAAPDPdO7cWRMnTlS7du0kSRMmTFCnTp2UlJRktUlMTNSnn36qsWPHqnz58lq2bJkee+wxFStWTHfccYdmz56thx56SFu2bFFwcLD8/PysZSdPnqwePXpo5cqV2W7/9OnTiouLU8mSJfX1118rLCxM69evV0ZGhiSpXbt2qlGjhsaMGSNPT09t2LBBXl5e9u0QAAD+RQjdAADkc4899pgGDhyolJQUSdLKlSs1ffp0K3SnpqZq2LBhWrx4serWrStJKlu2rFasWKEPP/xQcXFxCgkJkSQVL15chQoVclp/+fLl9eabb+a4/WnTpunw4cNau3attZ5y5cpZ83fv3q1nnnlGlSpVstYHAMDNgtANAEA+V6xYMTVt2lSTJk2SMUZNmzZV0aJFrfnbt2/X2bNndc899zgtl5aW5nQKek5iY2OvOH/Dhg2qUaOGFbgv179/f3Xt2lVTpkxRfHy8HnnkEUVFReXilQEAkP8RugEAuAF07txZvXr1kiS9//77TvNOnz4tSZo3b55KlizpNC83g6EFBARccf6lp6JnZ8iQIWrbtq3mzZunb7/9VoMHD9b06dP1wAMPXHXbAADkdwykBgDADaBx48ZKS0vT+fPnrcHOMsXExMjHx0e7d+9WuXLlnB4RERGSJG9vb0lSenq6y9uuWrWqNmzYoL/++ivHNhUqVFC/fv20cOFCPfjgg5o4caLL2wEAID8idAMAcAPw9PRUcnKyfvvtN3l6ejrNCwoK0oABA9SvXz9NnjxZO3bs0Pr16/Xuu+9q8uTJkqTSpUvL4XBo7ty5Onz4sHV0PDfatGmjsLAwtWzZUitXrtQff/yh2bNna/Xq1Tp37px69eqlpKQkpaSkaOXKlVq7dq2io6Pz9PUDAPBvRegGAOAGERwcrODg4GznvfLKK3rppZeUmJio6OhoNW7cWPPmzVOZMmUkSSVLltTQoUP1/PPPKzQ01DpVPTe8vb21cOFCFS9eXE2aNFGVKlX0+uuvy9PTU56enjp69Kjat2+vChUqqFWrVrrvvvs0dOjQPHnNAAD82zmMMcbdRQAAAAAAcCPiSDcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGATQjcAAAAAADYhdAMAAAAAYBNCNwAAAAAANiF0AwAAAABgE0I3AAAAAAA2IXQDAAAAAGCT/wedgWoP1gkztwAAAABJRU5ErkJggg==\n"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["
"],"image/png":"iVBORw0KGgoAAAANSUhEUgAAA9gAAAJOCAYAAABMYq+bAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABhg0lEQVR4nO3dd3xO9///8eeVkCFTCAkiUTN2G7WqDRWNohW7aI1SXaiqtnQIXemiWjWqAx1q16elVpVqUTtUa9cmKCVGJSTv3x9+OV+XDEl6NBKP++123drrnPc51+ucvK9znaezHMYYIwAAAAAA8K+45HUBAAAAAAAUBARsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAwE3j6NGjat++vYoVKyaHw6FRo0blel6NGzdW48aNbautIAsLC1OPHj3yuoxM9ejRQ2FhYbmaln5wc5s0aZIcDof27t2b42mHDRsmh8Nhf1EA8hQBG8BNJW1naN26dXldCjJx/vx5DRs2TMuWLbN93k8//bQWLlyoIUOG6IsvvlDz5s0zbetwODJ8BQUF2V5XTn3//fcaNmxYXpdhu7R13Lt37wzHv/jii1abv/766z+u7uaQFvrSXi4uLgoODlarVq3066+/5nV5mWrcuLEcDocqVqyY4fjFixdbyzRz5sz/uDoAN5NCeV0AAABXOn/+vIYPHy5Jth8Z/PHHH9W6dWsNGjQoW+2bNWumbt26OQ3z9PSUJC1atMjW2nLi+++/15gxYwpkyPbw8NCsWbM0duxYubm5OY37+uuv5eHhoQsXLuRRdTePcePGydvbW6mpqTpw4IA+/vhj3XXXXVqzZo1q166d1+VlyMPDQ7t27dKaNWtUt25dp3FfffUVfQfAf4KADQB5yBijCxcuWKEN19exY8fk7++f7faVKlXSgw8+mOG4q8Mf7NG8eXN9++23mj9/vlq3bm0NX7lypfbs2aN27dpp1qxZeVhh/nf+/HkVKVIkyzbt27dX8eLFrfcxMTGqXr26ZsyYccMG7PLly+vSpUv6+uuvnQL2hQsX9M0336hly5b0HQDXHaeIA7jp9ejRQ97e3tq/f79atWolb29vlS5dWmPGjJEk/fbbb7r77rvl5eWl0NBQTZkyxWn6tNPOly9frkcffVTFihWTr6+vunXrpr///tupbVhYmFq1aqWFCxeqTp068vT01EcffSRJ+vPPP9WhQwcFBASoSJEiql+/vubNm2dNe/ToURUqVMg6unul7du3y+Fw6MMPP7SGnTp1SgMGDFBISIjc3d1VoUIFvfXWW0pNTbXa7N27Vw6HQ++++67GjBmjW265RUWKFNE999yjAwcOyBijV199VWXKlJGnp6dat26tkydPpvv8+fPn684775SXl5d8fHzUsmVL/f777xmu50OHDikmJkbe3t4KDAzUoEGDlJKSYtUTGBgoSRo+fLh1Sue1jtRea92l/Y2MMRozZow133/j6mtvly1bJofDoenTp+v1119XmTJl5OHhoaZNm2rXrl3ppl+9erWaN28uPz8/FSlSRJGRkVqxYsU1P7dHjx5W37zyVN4ra7j69Pq0v/OkSZOc5nOtv0ea1NRUjRo1StWqVZOHh4dKliypRx99NF3/NsbotddeU5kyZVSkSBE1adIkXT+4ltKlS+uuu+5K9z376quvVKNGDVWvXj3D6WbMmKGIiAh5enqqePHievDBB3Xo0KF07ebMmaPq1avLw8ND1atX1zfffJPh/LK7zNl16dIlvfrqqypfvrzc3d0VFhamF154QUlJSVabVq1a6ZZbbslw+gYNGqhOnTpOw7788ktrmQMCAvTAAw/owIEDTm0aN26s6tWra/369brrrrtUpEgRvfDCCzmuP+3SiEKF/u/YTHJysoYOHaqIiAj5+fnJy8tLd955p5YuXZpu+qlTpyoiIkI+Pj7y9fVVjRo19P777zu1yc4261o6d+6sadOmOU3z3Xff6fz58+rYsWOG02zcuFH33nuvfH195e3traZNm2Z4Ovzvv/+uu+++W56enipTpoxee+21TGvLzjYRQAFlAOAmMnHiRCPJrF271hrWvXt34+HhYapWrWoee+wxM2bMGNOwYUMjyUycONGUKlXKPPvss2b06NGmWrVqxtXV1fz555/p5lmjRg1z5513mg8++MA8+eSTxsXFxdx1110mNTXVahsaGmoqVKhgihYtagYPHmzGjx9vli5dahISEkzJkiWNj4+PefHFF83IkSNNrVq1jIuLi5k9e7Y1/d13322qVq2abrmGDx9uXF1dTUJCgjHGmHPnzpmaNWuaYsWKmRdeeMGMHz/edOvWzTgcDvPUU09Z0+3Zs8dIMrVr1zZVq1Y1I0eONC+99JJxc3Mz9evXNy+88IJp2LCh+eCDD0z//v2Nw+EwPXv2dPrszz//3DgcDtO8eXMzevRo89Zbb5mwsDDj7+9v9uzZk249V6tWzTz88MNm3Lhxpl27dkaSGTt2rDHGmLNnz5px48YZSaZNmzbmiy++MF988YXZtGlTpn/T7Ky73bt3my+++MJIMs2aNbPmmxVJplevXub48eNOrwsXLhhjjImMjDSRkZFW+6VLlxpJ5tZbbzURERHmvffeM8OGDTNFihQxdevWdZr3kiVLjJubm2nQoIEZMWKEee+990zNmjWNm5ubWb16dZZ1rVy50jRr1sxIspYjbVnSali6dKnTNGl/54kTJ+bo75Gmd+/eplChQuaRRx4x48ePN88//7zx8vIyt99+u0lOTrbavfTSS0aSadGihfnwww/Nww8/bEqVKmWKFy9uunfvnuVypa3zJ5980kyYMMF4enqaM2fOGGOMuXjxogkMDDRxcXEmNjbWSDLHjx+3pkv7Dt5+++3mvffeM4MHDzaenp4mLCzM/P3331a7hQsXGhcXF1O9enUzcuRI8+KLLxo/Pz9TrVo1ExoamqtlvrofZKZ79+5Gkmnfvr0ZM2aM6datm5FkYmJirDaff/65kWTWrFnjNO3evXuNJPPOO+9Yw1577TXjcDhMp06dzNixY83w4cNN8eLF0y1zZGSkCQoKMoGBgaZfv37mo48+MnPmzMm0zrT1u337dnP8+HFz9OhRs2HDBtOmTRvj4eFhtmzZYrU9fvy4CQ4ONgMHDjTjxo0zb7/9tqlcubIpXLiw2bhxo9Vu0aJFRpJp2rSpGTNmjBkzZozp27ev6dChg9Umu9uszERGRppq1aqZHTt2GElmyZIl1riYmBgTHR1tfT9mzJhhjduyZYvx8vIywcHB5tVXXzVvvvmmKVeunHF3dze//vqr1e7IkSMmMDDQFC1a1AwbNsy88847pmLFiqZmzZpGktO2LrvbxLR1DaBg4VsN4KaSWcCWZN544w1r2N9//208PT2Nw+EwU6dOtYZv27bNSDKxsbHp5hkREeG04/32228bSeZ///ufNSw0NNRIMgsWLHCqa8CAAUaS+fnnn61hZ86cMeXKlTNhYWEmJSXFGGPMRx99ZCSZ3377zWn6qlWrmrvvvtt6/+qrrxovLy+zY8cOp3aDBw82rq6uZv/+/caY/wtegYGB5tSpU1a7IUOGGEmmVq1a5uLFi9bwzp07Gzc3Nytknjlzxvj7+5tHHnnE6XMSEhKMn5+f0/C09fzKK684tU0LpGmOHz+ebh1nJbvrzpj/C3DZISnDV1pIzSxgh4eHm6SkJGv4+++/7/Q3S01NNRUrVjTR0dFO//hy/vx5U65cOdOsWbNr1vbkk09muGOe04Cdnb/Hzz//bCSZr776yqndggULnIYfO3bMuLm5mZYtWzot1wsvvGAk5Shgnzx50ri5uVn/cDBv3jzjcDjM3r170wXs5ORkU6JECVO9enXzzz//WPOaO3eukWSGDh1qDatdu7YJDg526utp4e/KgJ3dZTYmewE7Pj7eSDK9e/d2Gj5o0CAjyfz444/GGGNOnz5t3N3dzTPPPOPU7u233zYOh8Ps27fPGHM5cLu6uprXX3/dqd1vv/1mChUq5DQ8MjLSSDLjx4/PssY0aev36pe/v3+67dalS5ec+roxl7edJUuWNA8//LA17KmnnjK+vr7m0qVLmX5udrdZmUkL2MYYU6dOHdOrVy+rHjc3NzN58uQMA3ZMTIxxc3Mzu3fvtoYdPnzY+Pj4mLvuussalradufIfwI4dO2b8/PycAnZOtokEbKBg4hRxAPj/rrxzsb+/vypXriwvLy+n0worV64sf39//fnnn+mm79OnjwoXLmy9f/zxx1WoUCF9//33Tu3KlSun6Ohop2Hff/+96tatq0aNGlnDvL291adPH+3du1d//PGHJKlt27YqVKiQpk2bZrXbsmWL/vjjD3Xq1MkaNmPGDN15550qWrSo/vrrL+sVFRWllJQULV++3OnzO3ToID8/P+t9vXr1JEkPPvig0ymh9erVU3JysnXq7eLFi3Xq1Cl17tzZ6XNcXV1Vr169DE8Vfeyxx5ze33nnnRmuz+zK7rrLjdatW2vx4sVOr6v/dlfr2bOn0/XZd955pyRZyxgfH6+dO3eqS5cuOnHihLXOzp07p6ZNm2r58uU5OiX237rW32PGjBny8/NTs2bNnP7GERER8vb2tv7GP/zwg5KTk9WvXz+n0+8HDBiQ45qKFi2q5s2b6+uvv5YkTZkyRQ0bNlRoaGi6tuvWrdOxY8f0xBNPyMPDwxresmVLValSxbpU4MiRI4qPj1f37t2d+nqzZs1UtWpVp3lmd5mzK20bMHDgQKfhzzzzjCRZNfr6+uree+/V9OnTZYyx2k2bNk3169dX2bJlJUmzZ89WamqqOnbs6FRfUFCQKlasmK4+d3d39ezZM0c1z5o1S4sXL9aiRYs0ceJEVapUSe3atdPKlSutNq6urlZfT01N1cmTJ3Xp0iXVqVNHGzZssNr5+/vr3LlzWrx4caafl9NtVla6dOmi2bNnKzk5WTNnzpSrq6vatGmTrl1KSooWLVqkmJgYp1Pzg4OD1aVLF/3yyy9KTEyUdPlvWL9+fadruwMDA9W1a1eneeZmmwigYOEmZwCgy3efTbv2N42fn5/KlCmT7lpdPz+/DK/DvPrxMN7e3goODk73fNRy5cqlm3bfvn1WqL1SeHi4Nb569eoqXry4mjZtqunTp+vVV1+VdHnnu1ChQmrbtq013c6dO7V58+Z0y5Tm2LFjTu/TdtyvXEZJCgkJyXB42vLv3LlTknT33Xdn+Dm+vr5O7zNaz0WLFs31da1S9tddbpQpU0ZRUVE5mubqdVm0aFFJ6ddZ9+7dM53H6dOn5eXlle5698DAQLm6uuaonqxk5++xc+dOnT59WiVKlMhwHml9ad++fZLSfw8CAwOtdZATXbp00UMPPaT9+/drzpw5evvttzNsl/a5lStXTjeuSpUq+uWXX7KsL23aKwNhdpc5u/bt2ycXFxdVqFDBaXhQUJD8/f2t2iSpU6dOmjNnjlatWqWGDRtq9+7dWr9+vdMz23fu3CljTKaPpLryH/qky9e15/SmfHfddZfTTc7at2+vihUrql+/flq/fr01fPLkyRoxYoS2bdumixcvWsOv3M498cQTmj59uu69916VLl1a99xzjzp27Oj0mLycbrOy8sADD2jQoEGaP3++vvrqK7Vq1Uo+Pj7p2h0/flznz5/PsO+Eh4dbd1CvVq1aptuZq6fN6TYRQMFDwAYAKdPQktnwK48u5dS/vWP4Aw88oJ49eyo+Pl61a9fW9OnT1bRpU6ed4dTUVDVr1kzPPfdchvOoVKmS0/vcLn/akdYvvvgiw+dDX3n0O6v5FSTZXWfvvPNOpndj9vb21ooVK9SkSROn4Xv27FFYWFimn53ZjduuvmnZtWq9UmpqqkqUKKGvvvoqw/GZBaJ/6/7775e7u7u6d++upKSkTG9QdT1cr2XOzo317rvvPhUpUkTTp09Xw4YNNX36dLm4uKhDhw5O9TkcDs2fPz/Dv6G3t7fTezueUuDt7a169erpf//7n86dOycvLy99+eWX6tGjh2JiYvTss8+qRIkScnV1VVxcnHbv3m1NW6JECcXHx2vhwoWaP3++5s+fr4kTJ6pbt26aPHmytUw52WZlJTg4WI0bN9aIESO0YsWK//TO4TndJgIoePiWA4BNdu7c6RSIzp49qyNHjqhFixbXnDY0NFTbt29PN3zbtm3W+DQxMTF69NFHrdPEd+zYoSFDhjhNV758eZ09ezbHR19zqnz58pIu70Db9Vk5vbt3TtbdjSBtnfn6+ma5zmrVqpXulNq0HfbM1lHakeJTp045Db/yCGlOlS9fXj/88IPuuOOOLINa2nreuXOn0+m2x48fz9UZCp6enoqJidGXX36pe++91+kfkDL63O3bt6c7arh9+3Zr/JX1Xe3q/pPdZc6u0NBQpaamaufOndaZFdLlJwOcOnXKqY96eXmpVatWmjFjhkaOHKlp06bpzjvvVKlSpZzqM8aoXLlyOQqe/9alS5ckXd62eXl5aebMmbrllls0e/Zspz4ZGxubblo3Nzfdd999uu+++5SamqonnnhCH330kV5++WVVqFDB9m1Wly5d1Lt3b/n7+2e6DQ4MDFSRIkUy3X64uLhYZ/GEhoZmu+9I9m4TAeQvXIMNADaZMGGC0ymS48aN06VLl3Tvvfdec9oWLVpozZo1WrVqlTXs3LlzmjBhgsLCwpyuEfX391d0dLSmT5+uqVOnys3NTTExMU7z69ixo1atWqWFCxem+6xTp05ZO8r/VnR0tHx9ffXGG284LXua48eP53ieac/nvTokZiYn6+5GEBERofLly+vdd9/V2bNn041PW2dFixZVVFSU0yvtGmMvLy9J6ddRaGioXF1d012vOnbs2FzX27FjR6WkpFiXJFzp0qVLVg1RUVEqXLiwRo8e7XSGx5WnNufUoEGDFBsbq5dffjnTNnXq1FGJEiU0fvx4p0dezZ8/X1u3blXLli0lXT6qWbt2bU2ePFmnT5+22i1evDjddfrZXebsSgt4V6+LkSNHSpJVY5pOnTrp8OHD+uSTT7Rp0yan+ytIl+/F4OrqquHDh6c7m8YYoxMnTuSovuw4efKkVq5cqaCgIOvU+bSj51fWsHr1aqfvoqR09bi4uKhmzZqSZP3N7N5mtW/fXrGxsRo7dmymp8e7urrqnnvu0f/+9z+nS3mOHj2qKVOmqFGjRtYp3S1atNCvv/6qNWvWWO2OHz+e7iyH67FNBJC/cAQbAGySnJyspk2bqmPHjtq+fbvGjh2rRo0a6f7777/mtIMHD9bXX3+te++9V/3791dAQIAmT56sPXv2aNasWXJxcf730E6dOunBBx/U2LFjFR0dLX9/f6fxzz77rL799lu1atVKPXr0UEREhM6dO6fffvtNM2fO1N69ezM9IpgTvr6+GjdunB566CHddttteuCBBxQYGKj9+/dr3rx5uuOOO5yezZ0dnp6eqlq1qqZNm6ZKlSopICBA1atXz/Q66pyuu7zm4uKiTz75RPfee6+qVaumnj17qnTp0jp06JCWLl0qX19ffffdd1nOIyIiQpLUv39/RUdHy9XVVQ888ID8/PzUoUMHjR49Wg6HQ+XLl9fcuXNzfM3wlSIjI/Xoo48qLi5O8fHxuueee1S4cGHt3LlTM2bM0Pvvv6/27dtbz9COi4tTq1at1KJFC23cuFHz58/PdV+rVauWatWqlWWbwoUL66233lLPnj0VGRmpzp076+jRo3r//fcVFhamp59+2mobFxenli1bqlGjRnr44Yd18uRJjR49WtWqVXP6x47sLnNOlqN79+6aMGGCTp06pcjISK1Zs0aTJ09WTExMuksBWrRoIR8fHw0aNEiurq5q166d0/jy5cvrtdde05AhQ7R3717FxMTIx8dHe/bs0TfffKM+ffpo0KBB2a4vIzNnzpS3t7eMMTp8+LA+/fRT/f333xo/frx1tLpVq1aaPXu22rRpo5YtW2rPnj0aP368qlat6rQ+e/furZMnT+ruu+9WmTJltG/fPo0ePVq1a9e2jujbvc3y8/PTsGHDrtnutdde0+LFi9WoUSM98cQTKlSokD766CMlJSU5Xff/3HPP6YsvvlDz5s311FNPycvLSxMmTFBoaKg2b95stbse20QA+Uwe3b0cAPJEZo/p8vLyStf2yse+XCk0NNS0bNky3Tx/+ukn06dPH1O0aFHj7e1tunbtak6cOJHltFfavXu3ad++vfH39zceHh6mbt26Zu7cuRm2TUxMNJ6enkaS+fLLLzNsc+bMGTNkyBBToUIF4+bmZooXL24aNmxo3n33XetxYmmPb7ry+brGmAwfZ3Plsl65/tLaR0dHGz8/P+Ph4WHKly9vevToYdatW2e1yWw9Z/SompUrV5qIiAjj5uaWrUd2ZXfdKYeP6cqqbWaP6bp6nWX0iCxjjNm4caNp27atKVasmHF3dzehoaGmY8eOTs/vzcylS5dMv379TGBgoHE4HE7r7/jx46Zdu3amSJEipmjRoubRRx81W7ZsyfAxXdn9exhjzIQJE0xERITx9PQ0Pj4+pkaNGua5554zhw8fttqkpKSY4cOHm+DgYOPp6WkaN25stmzZYkJDQ3P0mK6sZPQcbGOMmTZtmrn11luNu7u7CQgIMF27djUHDx5MN/2sWbNMeHi4cXd3N1WrVjWzZ8823bt3T/cc7Owuc3afg33x4kUzfPhwU65cOVO4cGETEhJihgwZYj3y7mpdu3Y1kkxUVFSm85w1a5Zp1KiR8fLyMl5eXqZKlSrmySefNNu3b3eqL6PtWGYyekyXl5eXadCggZk+fbpT29TUVPPGG2+Y0NBQ4+7ubm699VYzd+7cdOtz5syZ5p577jElSpQwbm5upmzZsubRRx81R44ccZpfdrZZmcnOcmb2Hd2wYYOJjo423t7epkiRIqZJkyZm5cqV6abfvHmziYyMNB4eHqZ06dLm1VdfNZ9++mm652Cnfda1tok8pgsomBzG/Is79QAANGnSJPXs2VNr165VnTp18rocAAAA5JEb67w5AAAAAADyKQI2AAAAAAA2IGADAAAAAGADrsEGAAAAAMAGHMEGAAAAAMAGBGwAAAAAAGxQKK8LKAhSU1N1+PBh+fj4yOFw5HU5AAAAAAAbGWN05swZlSpVSi4umR+nJmDb4PDhwwoJCcnrMgAAAAAA19GBAwdUpkyZTMcTsG3g4+Mj6fLK9vX1zeNqAAAAAAB2SkxMVEhIiJX9MkPAtkHaaeG+vr4EbAAAAAAooK51STA3OQMAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbFAorwsAADsNdwzP6xLyXKyJzesSAAAAbkocwQYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGyQ7wL2mDFjFBYWJg8PD9WrV09r1qzJsv2MGTNUpUoVeXh4qEaNGvr++++tcRcvXtTzzz+vGjVqyMvLS6VKlVK3bt10+PDh670YAAAAAIACJl8F7GnTpmngwIGKjY3Vhg0bVKtWLUVHR+vYsWMZtl+5cqU6d+6sXr16aePGjYqJiVFMTIy2bNkiSTp//rw2bNigl19+WRs2bNDs2bO1fft23X///f/lYgEAAAAACgCHMcbkdRHZVa9ePd1+++368MMPJUmpqakKCQlRv379NHjw4HTtO3XqpHPnzmnu3LnWsPr166t27doaP358hp+xdu1a1a1bV/v27VPZsmWzVVdiYqL8/Px0+vRp+fr65mLJANhluGN4XpeQ52JNbF6XAAAAUKBkN/PlmyPYycnJWr9+vaKioqxhLi4uioqK0qpVqzKcZtWqVU7tJSk6OjrT9pJ0+vRpORwO+fv7Z9omKSlJiYmJTi8AAAAAwM0t3wTsv/76SykpKSpZsqTT8JIlSyohISHDaRISEnLU/sKFC3r++efVuXPnLP9VIi4uTn5+ftYrJCQkh0sDAAAAACho8k3Avt4uXryojh07yhijcePGZdl2yJAhOn36tPU6cODAf1QlAAAAAOBGVSivC8iu4sWLy9XVVUePHnUafvToUQUFBWU4TVBQULbap4Xrffv26ccff7zmddTu7u5yd3fPxVIAAAAAAAqqfHME283NTREREVqyZIk1LDU1VUuWLFGDBg0ynKZBgwZO7SVp8eLFTu3TwvXOnTv1ww8/qFixYtdnAQAAAAAABVq+OYItSQMHDlT37t1Vp04d1a1bV6NGjdK5c+fUs2dPSVK3bt1UunRpxcXFSZKeeuopRUZGasSIEWrZsqWmTp2qdevWacKECZIuh+v27dtrw4YNmjt3rlJSUqzrswMCAuTm5pY3CwoAAAAAyHfyVcDu1KmTjh8/rqFDhyohIUG1a9fWggULrBuZ7d+/Xy4u/3dQvmHDhpoyZYpeeuklvfDCC6pYsaLmzJmj6tWrS5IOHTqkb7/9VpJUu3Ztp89aunSpGjdu/J8sFwAAAAAg/8tXz8G+UfEcbODGwXOweQ72zYK+Tl8HAPx3CtxzsAEAAAAAuJERsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsEG+C9hjxoxRWFiYPDw8VK9ePa1ZsybL9jNmzFCVKlXk4eGhGjVq6Pvvv3caP3v2bN1zzz0qVqyYHA6H4uPjr2P1AAAAAICCKl8F7GnTpmngwIGKjY3Vhg0bVKtWLUVHR+vYsWMZtl+5cqU6d+6sXr16aePGjYqJiVFMTIy2bNlitTl37pwaNWqkt956679aDAAAAABAAZSvAvbIkSP1yCOPqGfPnqpatarGjx+vIkWK6LPPPsuw/fvvv6/mzZvr2WefVXh4uF599VXddttt+vDDD602Dz30kIYOHaqoqKj/ajEAAAAAAAVQvgnYycnJWr9+vVMQdnFxUVRUlFatWpXhNKtWrUoXnKOjozNtDwAAAABAbhXK6wKy66+//lJKSopKlizpNLxkyZLatm1bhtMkJCRk2D4hIeFf1ZKUlKSkpCTrfWJi4r+aHwAAAAAg/8s3R7BvJHFxcfLz87NeISEheV0SAAAAACCP5ZuAXbx4cbm6uuro0aNOw48ePaqgoKAMpwkKCspR++waMmSITp8+bb0OHDjwr+YHAAAAAMj/8k3AdnNzU0REhJYsWWINS01N1ZIlS9SgQYMMp2nQoIFTe0lavHhxpu2zy93dXb6+vk4vAAAAAMDNLd9cgy1JAwcOVPfu3VWnTh3VrVtXo0aN0rlz59SzZ09JUrdu3VS6dGnFxcVJkp566ilFRkZqxIgRatmypaZOnap169ZpwoQJ1jxPnjyp/fv36/Dhw5Kk7du3S7p89PvfHukGAAAAANw88lXA7tSpk44fP66hQ4cqISFBtWvX1oIFC6wbme3fv18uLv93UL5hw4aaMmWKXnrpJb3wwguqWLGi5syZo+rVq1ttvv32WyugS9IDDzwgSYqNjdWwYcP+mwUDAAAAAOR7DmOMyesi8rvExET5+fnp9OnTnC4O5LHhjuF5XUKeizWxeV0C/gP0dfo6AOC/k93Ml2+uwQYAAAAA4EZGwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsUCivC8B/Y7hjeF6XcEOINbF5XQIAAACAAooj2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANggVwHb1dVVx44dSzf8xIkTcnV1/ddFAQAAAACQ3+QqYBtjMhyelJQkNze3f1UQAAAAAAD5UaGcNP7ggw8kSQ6HQ5988om8vb2tcSkpKVq+fLmqVKlib4UAAAAAAOQDOQrY7733nqTLR7DHjx/vdDq4m5ubwsLCNH78eHsrBAAAAAAgH8hRwN6zZ48kqUmTJpo9e7aKFi16XYoCAAAAACC/yVHATrN06VK76wAAAAAAIF/LVcBOSUnRpEmTtGTJEh07dkypqalO43/88UdbigMAAAAAIL/IVcB+6qmnNGnSJLVs2VLVq1eXw+Gwuy4AAAAAAPKVXAXsqVOnavr06WrRooXd9QAAAAAAkC/l6jnYbm5uqlChgt21AAAAAACQb+UqYD/zzDN6//33ZYyxux4AAAAAAPKlXJ0i/ssvv2jp0qWaP3++qlWrpsKFCzuNnz17ti3FAQAAAACQX+QqYPv7+6tNmzZ21wIAAAAAQL6Vq4A9ceJEu+sAAAAAACBfy9U12JJ06dIl/fDDD/roo4905swZSdLhw4d19uxZ24oDAAAAACC/yNUR7H379ql58+bav3+/kpKS1KxZM/n4+Oitt95SUlKSxo8fb3edAAAAAADc0HJ1BPupp55SnTp19Pfff8vT09Ma3qZNGy1ZssS24jIyZswYhYWFycPDQ/Xq1dOaNWuybD9jxgxVqVJFHh4eqlGjhr7//nun8cYYDR06VMHBwfL09FRUVJR27tx5PRcBAAAAAFAA5Spg//zzz3rppZfk5ubmNDwsLEyHDh2ypbCMTJs2TQMHDlRsbKw2bNigWrVqKTo6WseOHcuw/cqVK9W5c2f16tVLGzduVExMjGJiYrRlyxarzdtvv60PPvhA48eP1+rVq+Xl5aXo6GhduHDhui0HAAAAAKDgyVXATk1NVUpKSrrhBw8elI+Pz78uKjMjR47UI488op49e6pq1aoaP368ihQpos8++yzD9u+//76aN2+uZ599VuHh4Xr11Vd122236cMPP5R0+ej1qFGj9NJLL6l169aqWbOmPv/8cx0+fFhz5sy5bssBAAAAACh4chWw77nnHo0aNcp673A4dPbsWcXGxqpFixZ21eYkOTlZ69evV1RUlDXMxcVFUVFRWrVqVYbTrFq1yqm9JEVHR1vt9+zZo4SEBKc2fn5+qlevXqbzlKSkpCQlJiY6vQAAAAAAN7dc3eRsxIgRio6OVtWqVXXhwgV16dJFO3fuVPHixfX111/bXaMk6a+//lJKSopKlizpNLxkyZLatm1bhtMkJCRk2D4hIcEanzYsszYZiYuL0/Dhw3O8DHkp1sTmdQnAf4K+jpsFfR03g+GO/LW/db3wfS/46OsFp5/nKmCXKVNGmzZt0tSpU7V582adPXtWvXr1UteuXZ1uelZQDRkyRAMHDrTeJyYmKiQkJA8rAgAAAADktVwFbEkqVKiQHnzwQTtryVLx4sXl6uqqo0ePOg0/evSogoKCMpwmKCgoy/Zp/z169KiCg4Od2tSuXTvTWtzd3eXu7p6bxQAAAAAAFFC5DtiHDx/WL7/8omPHjik1NdVpXP/+/f91YVdzc3NTRESElixZopiYGEmXb7a2ZMkS9e3bN8NpGjRooCVLlmjAgAHWsMWLF6tBgwaSpHLlyikoKEhLliyxAnViYqJWr16txx9/3PZlAAAAAAAUXLkK2JMmTdKjjz4qNzc3FStWTA6HwxrncDiuS8CWpIEDB6p79+6qU6eO6tatq1GjRuncuXPq2bOnJKlbt24qXbq04uLiJF1+XndkZKRGjBihli1baurUqVq3bp0mTJhg1TpgwAC99tprqlixosqVK6eXX35ZpUqVskI8AAAAAADZkauA/fLLL2vo0KEaMmSIXFxydSPyXOnUqZOOHz+uoUOHKiEhQbVr19aCBQusm5Tt37/fqZ6GDRtqypQpeumll/TCCy+oYsWKmjNnjqpXr261ee6553Tu3Dn16dNHp06dUqNGjbRgwQJ5eHj8Z8sFAAAAAMj/HMYYk9OJihUrpjVr1qh8+fLXo6Z8JzExUX5+fjp9+rR8fX3zuhwAAIACgTsrX1ZQ7q6MzNHXb/x+nt3Ml6vDz7169dKMGTNyXRwAAAAAAAVNrk4Rj4uLU6tWrbRgwQLVqFFDhQsXdho/cuRIW4oDAAAAACC/yHXAXrhwoSpXrixJ6W5yBgAAAADAzSZXAXvEiBH67LPP1KNHD5vLAQAAAAAgf8rVNdju7u6644477K4FAAAAAIB8K1cB+6mnntLo0aPtrgUAAAAAgHwrV6eIr1mzRj/++KPmzp2ratWqpbvJ2ezZs20pDgAAAACA/CJXAdvf319t27a1uxYAAAAAAPKtXAXsiRMn2l0HAAAAAAD5Wq6uwZakS5cu6YcfftBHH32kM2fOSJIOHz6ss2fP2lYcAAAAAAD5Ra6OYO/bt0/NmzfX/v37lZSUpGbNmsnHx0dvvfWWkpKSNH78eLvrBAAAAADghpbru4jXqVNHf//9tzw9Pa3hbdq00ZIlS2wrDgAAAACA/CJXR7B//vlnrVy5Um5ubk7Dw8LCdOjQIVsKAwAAAAAgP8nVEezU1FSlpKSkG37w4EH5+Pj866IAAAAAAMhvchWw77nnHo0aNcp673A4dPbsWcXGxqpFixZ21QYAAAAAQL6Rq1PER4wYoejoaFWtWlUXLlxQly5dtHPnThUvXlxff/213TUCAAAAAHDDy1XALlOmjDZt2qSpU6dq8+bNOnv2rHr16qWuXbs63fQMAAAAyK1YE5vXJQBAjuQqYF+4cEEeHh568MEH7a4HAAAAAIB8KVfXYJcoUULdu3fX4sWLlZqaandNAAAAAADkO7kK2JMnT9b58+fVunVrlS5dWgMGDNC6devsrg0AAAAAgHwjVwG7TZs2mjFjho4ePao33nhDf/zxh+rXr69KlSrplVdesbtGAAAAAABueLkK2Gl8fHzUs2dPLVq0SJs3b5aXl5eGDx9uV20AAAAAAOQb/ypgX7hwQdOnT1dMTIxuu+02nTx5Us8++6xdtQEAAAAAkG/k6i7iCxcu1JQpUzRnzhwVKlRI7du316JFi3TXXXfZXR8AAAAAAPlCrgJ2mzZt1KpVK33++edq0aKFChcubHddAAAAAADkK7kK2EePHpWPj4/dtQAAAAAAkG/lKmD7+PgoJSVFc+bM0datWyVJVatWVevWreXq6mprgQAAAAAA5Ae5Cti7du1SixYtdOjQIVWuXFmSFBcXp5CQEM2bN0/ly5e3tUgAAAAAAG50ubqLeP/+/VW+fHkdOHBAGzZs0IYNG7R//36VK1dO/fv3t7tGAAAAAABueLk6gv3TTz/p119/VUBAgDWsWLFievPNN3XHHXfYVhwAAAAAAPlFro5gu7u768yZM+mGnz17Vm5ubv+6KAAAAAAA8ptcBexWrVqpT58+Wr16tYwxMsbo119/1WOPPab777/f7hoBAAAAALjh5Spgf/DBBypfvrwaNGggDw8PeXh46I477lCFChX0/vvv210jAAAAAAA3vBxfg22MUWJioqZOnapDhw5Zj+kKDw9XhQoVbC8QAAAAAID8IFcBu0KFCvr9999VsWJFQjUAAAAAAMrFKeIuLi6qWLGiTpw4cT3qAQAAAAAgX8rVNdhvvvmmnn32WW3ZssXuegAAAAAAyJdy9Rzsbt266fz586pVq5bc3Nzk6enpNP7kyZO2FAcAAAAAQH6Rq4A9atQom8sAAAAAACB/y1XA7t69u911AAAAAACQr+UqYEtSSkqKvvnmG+sxXVWrVlXr1q1VqFCuZwkAAAAAQL6VqzT8+++/6/7771dCQoIqV64sSXrrrbcUGBio7777TtWrV7e1SAAAAAAAbnS5uot47969Va1aNR08eFAbNmzQhg0bdODAAdWsWVN9+vSxu0YAAAAAAG54uTqCHR8fr3Xr1qlo0aLWsKJFi+r111/X7bffbltxAAAAAADkF7k6gl2pUiUdPXo03fBjx46pQoUK/7ooAAAAAADym1wF7Li4OPXv318zZ87UwYMHdfDgQc2cOVMDBgzQW2+9pcTEROsFAAAAAMDNIFeniLdq1UqS1LFjRzkcDkmSMUaSdN9991nvHQ6HUlJS7KgTAAAAAIAbWq4C9tKlS+2uAwAAAACAfC1XATsyMtLuOgAAAAAAyNdyFbAl6cKFC9q8ebOOHTum1NRUp3H333//vy4MAAAAAID8JFcBe8GCBerWrZv++uuvdOO47hoAAAAAcDPK1V3E+/Xrpw4dOujIkSNKTU11ehGuAQAAAAA3o1wF7KNHj2rgwIEqWbKk3fUAAAAAAJAv5Spgt2/fXsuWLbO5FAAAAAAA8q9cXYP94YcfqkOHDvr5559Vo0YNFS5c2Gl8//79bSkOAAAAAID8IlcB++uvv9aiRYvk4eGhZcuWyeFwWOMcDgcBGwAAAABw08lVwH7xxRc1fPhwDR48WC4uuTrLHAAAAACAAiVX6Tg5OVmdOnUiXAMAAAAA8P/lKiF3795d06ZNs7sWAAAAAADyrVydIp6SkqK3335bCxcuVM2aNdPd5GzkyJG2FAcAAAAAQH6Rq4D922+/6dZbb5UkbdmyxdaCAAAAAADIj3IVsJcuXWp3HQAAAAAA5Gs5Ctht27a9ZhuHw6FZs2bluiAAAAAAAPKjHAVsPz+/61UHAAAAAAD5Wo4C9sSJE69XHQAAAAAA5Gs8yBoAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwQb4J2CdPnlTXrl3l6+srf39/9erVS2fPns1ymgsXLujJJ59UsWLF5O3trXbt2uno0aNObfr376+IiAi5u7urdu3a13EJAAAAAAAFWb4J2F27dtXvv/+uxYsXa+7cuVq+fLn69OmT5TRPP/20vvvuO82YMUM//fSTDh8+rLZt26Zr9/DDD6tTp07Xq3QAAAAAwE2gUF4XkB1bt27VggULtHbtWtWpU0eSNHr0aLVo0ULvvvuuSpUqlW6a06dP69NPP9WUKVN09913S5ImTpyo8PBw/frrr6pfv74k6YMPPpAkHT9+XJs3b/6PlggAAAAAUNDkiyPYq1atkr+/vxWuJSkqKkouLi5avXp1htOsX79eFy9eVFRUlDWsSpUqKlu2rFatWvWv6klKSlJiYqLTCwAAAABwc8sXATshIUElSpRwGlaoUCEFBAQoISEh02nc3Nzk7+/vNLxkyZKZTpNdcXFx8vPzs14hISH/an4AAAAAgPwvTwP24MGD5XA4snxt27YtL0vM0JAhQ3T69GnrdeDAgbwuCQAAAACQx/L0GuxnnnlGPXr0yLLNLbfcoqCgIB07dsxp+KVLl3Ty5EkFBQVlOF1QUJCSk5N16tQpp6PYR48ezXSa7HJ3d5e7u/u/mgcAAAAAoGDJ04AdGBiowMDAa7Zr0KCBTp06pfXr1ysiIkKS9OOPPyo1NVX16tXLcJqIiAgVLlxYS5YsUbt27SRJ27dv1/79+9WgQQP7FgIAAAAAAOWTa7DDw8PVvHlzPfLII1qzZo1WrFihvn376oEHHrDuIH7o0CFVqVJFa9askST5+fmpV69eGjhwoJYuXar169erZ8+eatCggXUHcUnatWuX4uPjlZCQoH/++Ufx8fGKj49XcnJyniwrAAAAACB/yheP6ZKkr776Sn379lXTpk3l4uKidu3aWY/YkqSLFy9q+/btOn/+vDXsvffes9omJSUpOjpaY8eOdZpv79699dNPP1nvb731VknSnj17FBYWdn0XCgAAAABQYDiMMSavi8jvEhMT5efnp9OnT8vX1zevywEAAACQjwx3DM/rEvJcrInN6xKylN3Mly9OEQcAAAAA4EZHwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAb5JmCfPHlSXbt2la+vr/z9/dWrVy+dPXs2y2kuXLigJ598UsWKFZO3t7fatWuno0ePWuM3bdqkzp07KyQkRJ6engoPD9f7779/vRcFAAAAAFAA5ZuA3bVrV/3+++9avHix5s6dq+XLl6tPnz5ZTvP000/ru+++04wZM/TTTz/p8OHDatu2rTV+/fr1KlGihL788kv9/vvvevHFFzVkyBB9+OGH13txAAAAAAAFjMMYY/K6iGvZunWrqlatqrVr16pOnTqSpAULFqhFixY6ePCgSpUqlW6a06dPKzAwUFOmTFH79u0lSdu2bVN4eLhWrVql+vXrZ/hZTz75pLZu3aoff/wx2/UlJibKz89Pp0+flq+vby6WEAAAAMDNarhjeF6XkOdiTWxel5Cl7Ga+fHEEe9WqVfL397fCtSRFRUXJxcVFq1evznCa9evX6+LFi4qKirKGValSRWXLltWqVasy/azTp08rICDAvuIBAAAAADeFQnldQHYkJCSoRIkSTsMKFSqkgIAAJSQkZDqNm5ub/P39nYaXLFky02lWrlypadOmad68eVnWk5SUpKSkJOt9YmJiNpYCAAAAAFCQ5ekR7MGDB8vhcGT52rZt239Sy5YtW9S6dWvFxsbqnnvuybJtXFyc/Pz8rFdISMh/UiMAAAAA4MaVp0ewn3nmGfXo0SPLNrfccouCgoJ07Ngxp+GXLl3SyZMnFRQUlOF0QUFBSk5O1qlTp5yOYh89ejTdNH/88YeaNm2qPn366KWXXrpm3UOGDNHAgQOt94mJiYRsAAAAALjJ5WnADgwMVGBg4DXbNWjQQKdOndL69esVEREhSfrxxx+VmpqqevXqZThNRESEChcurCVLlqhdu3aSpO3bt2v//v1q0KCB1e7333/X3Xffre7du+v111/PVt3u7u5yd3fPVlsAAAAAwM0hX9zkLDw8XM2bN9cjjzyiNWvWaMWKFerbt68eeOAB6w7ihw4dUpUqVbRmzRpJkp+fn3r16qWBAwdq6dKlWr9+vXr27KkGDRpYdxDfsmWLmjRponvuuUcDBw5UQkKCEhISdPz48TxbVgAAAABA/pQvbnImSV999ZX69u2rpk2bysXFRe3atdMHH3xgjb948aK2b9+u8+fPW8Pee+89q21SUpKio6M1duxYa/zMmTN1/Phxffnll/ryyy+t4aGhodq7d+9/slwAAAAAgIIhXzwH+0bHc7ABAAAA5BbPweY52AAAAAAA4AoEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABoXyugAAAAAAuJnFmti8LgE24Qg2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYolNcFFATGGElSYmJiHlcCAAAAALBbWtZLy36ZIWDb4MyZM5KkkJCQPK4EAAAAAHC9nDlzRn5+fpmOd5hrRXBcU2pqqg4fPiwfHx85HI68LueGlJiYqJCQEB04cEC+vr55XQ5w3dDXcTOgn+NmQV/HzYK+fm3GGJ05c0alSpWSi0vmV1pzBNsGLi4uKlOmTF6XkS/4+vrypcVNgb6OmwH9HDcL+jpuFvT1rGV15DoNNzkDAAAAAMAGBGwAAAAAAGxAwMZ/wt3dXbGxsXJ3d8/rUoDrir6OmwH9HDcL+jpuFvR1+3CTMwAAAAAAbMARbAAAAAAAbEDABgAAAADABgTsAsYYoz59+iggIEAOh0Px8fFZtt+7d2+22jVu3FgDBgzIsk1CQoKaNWsmLy8v+fv7Z6veZcuWyeFw6NSpU9lqDxQUPXr0UExMTF6XARRoV//G8ZuD/CQsLEyjRo2yvS2A64uAXcAsWLBAkyZN0ty5c3XkyBFVr149y/YhISFO7f7Nzsd7772nI0eOKD4+Xjt27MhN+QAAG12vnW525nG90XeltWvXqk+fPra3BW5UkyZNyvZBuhtZobwuAPbavXu3goOD1bBhw2y1d3V1VVBQkG2fHRERoYoVK9oyPyCvpaSkyOFwyMUl+/8WmZycLDc3t+tYFZD/5ea7BeQXdv0OBAYGXpe2KDjyYluaWf++ePGiChcu/J/VcSPjl60A6dGjh/r166f9+/fL4XAoLCxMCxYsUKNGjeTv769ixYqpVatW2r17tzXNlafP7d27V02aNJEkFS1aVA6HQz169LDapqam6rnnnlNAQICCgoI0bNgwa1xYWJhmzZqlzz//3Jouo9PPT506JYfDoWXLlmW4DGn/crVw4UKFh4fL29tbzZs315EjR5zaffLJJwoPD5eHh4eqVKmisWPHWuOSk5PVt29fBQcHy8PDQ6GhoYqLi5N0+RT6YcOGqWzZsnJ3d1epUqXUv3//XK5xZEdWfTCtj8yePVtNmjRRkSJFVKtWLa1atcqaft++fbrvvvtUtGhReXl5qVq1avr+++8lSXXq1NG7775rtY2JiVHhwoV19uxZSdLBgwflcDi0a9cuSVJSUpIGDRqk0qVLy8vLS/Xq1XPqi2n979tvv1XVqlXl7u6u/fv3Z7l8aad6v/766ypVqpQqV64sSTpw4IA6duwof39/BQQEqHXr1tq7d2+m80lNTVVcXJzKlSsnT09P1apVSzNnzrTGlSlTRuPGjXOaZuPGjXJxcdG+ffskSSNHjlSNGjXk5eWlkJAQPfHEE9a6uHL5rvX9+uyzz1StWjW5u7srODhYffv2lSQ9/PDDatWqlVPbixcvqkSJEvr000+zXE/IWGpqqt5++21VqFBB7u7uKlu2rF5//XVJ0m+//aa7775bnp6eKlasmPr06eP090zre++++66Cg4NVrFgxPfnkk7p48aKky5f27Nu3T08//bQcDoccDoc17S+//KI777xTnp6eCgkJUf/+/XXu3DlJ0ueffy5vb2/t3LnTav/EE0+oSpUqOn/+fJbzzUxm361rfSclacWKFWrcuLGKFCmiokWLKjo6Wn///bekrLcvuL5utr47Z84cVaxYUR4eHoqOjtaBAwesNsOGDVPt2rX1ySefqFy5cvLw8JB0eZ+nd+/eCgwMlK+vr+6++25t2rTJaf7fffedbr/9dnl4eKh48eJq06aNNe7Ko+3X2n+5+sj8/v371bp1a3l7e8vX11cdO3bU0aNH09X8xRdfKCwsTH5+fnrggQd05syZa66Tgqag76dImf+uS9nvK1f3b4fDoXHjxun++++Xl5eX9f3/3//+p9tuu00eHh665ZZbNHz4cF26dMma36lTp/Too4+qZMmS8vDwUPXq1TV37lwtW7ZMPXv21OnTp63vZ1rWCAsL0xtvvKGHH35YPj4+Klu2rCZMmOC0jNfa71q2bJnq1q1rXcp6xx13WPtPmzZtUpMmTeTj4yNfX19FRERo3bp111yvmTIoME6dOmVeeeUVU6ZMGXPkyBFz7NgxM3PmTDNr1iyzc+dOs3HjRnPfffeZGjVqmJSUFGOMMXv27DGSzMaNG82lS5fMrFmzjCSzfft2c+TIEXPq1CljjDGRkZHG19fXDBs2zOzYscNMnjzZOBwOs2jRImOMMceOHTPNmzc3HTt2tKa7ct5p/v77byPJLF261BhjzNKlS40k8/fffxtjjJk4caIpXLiwiYqKMmvXrjXr16834eHhpkuXLtY8vvzySxMcHGxmzZpl/vzzTzNr1iwTEBBgJk2aZIwx5p133jEhISFm+fLlZu/evebnn382U6ZMMcYYM2PGDOPr62u+//57s2/fPrN69WozYcKE6/lnuell1QfT+kiVKlXM3Llzzfbt20379u1NaGiouXjxojHGmJYtW5pmzZqZzZs3m927d5vvvvvO/PTTT8YYYwYOHGhatmxpjDEmNTXVBAQEmOLFi5v58+cbYy73ldKlS1u19O7d2zRs2NAsX77c7Nq1y7zzzjvG3d3d7Nixwxjzf/2vYcOGZsWKFWbbtm3m3LlzWS5f9+7djbe3t3nooYfMli1bzJYtW0xycrIJDw83Dz/8sNm8ebP5448/TJcuXUzlypVNUlKSNV3r1q2t+bz22mumSpUqZsGCBWb37t1m4sSJxt3d3SxbtswYY8ygQYNMo0aNnD77mWeecRr23nvvmR9//NHs2bPHLFmyxFSuXNk8/vjj1vjsfL/Gjh1rPDw8zKhRo8z27dvNmjVrzHvvvWeMMWbFihXG1dXVHD582Go/e/Zs4+XlZc6cOZPlekLGnnvuOVO0aFEzadIks2vXLvPzzz+bjz/+2Jw9e9YEBwebtm3bmt9++80sWbLElCtXznTv3t2atnv37sbX19c89thjZuvWrea7774zRYoUsbZpJ06cMGXKlDGvvPKKOXLkiDly5Igxxphdu3YZLy8v895775kdO3aYFStWmFtvvdX06NHDmneHDh3M7bffbi5evGjmzp1rChcubNatW5flfLOS2XfrWt/JjRs3Gnd3d/P444+b+Ph4s2XLFjN69Ghz/PhxY0zW2xdjTLrfoat/c5B7N1vfrVOnjlm5cqVZt26dqVu3rmnYsKHVJjY21nh5eZnmzZubDRs2mE2bNhljjImKijL33XefWbt2rdmxY4d55plnTLFixcyJEyeMMcbMnTvXuLq6mqFDh5o//vjDxMfHmzfeeMOab2hoqLX9vdb+y5VtU1JSTO3atU2jRo3MunXrzK+//moiIiJMZGSkU83e3t7W32n58uUmKCjIvPDCC9dcJwVNQd9Pyep3Pbt9JaP+LcmUKFHCfPbZZ2b37t1m3759Zvny5cbX19dMmjTJ7N692yxatMiEhYWZYcOGWZ9Xv359U61aNbNo0SJrfX3//fcmKSnJjBo1yvj6+lrfz7R9i9DQUBMQEGDGjBljdu7caeLi4oyLi4vZtm2bMcZcc7/r4sWLxs/PzwwaNMjs2rXL/PHHH2bSpElm3759xhhjqlWrZh588EGzdetWs2PHDjN9+nQTHx+fuw5ljCFgFzDvvfeeCQ0NzXT88ePHjSTz22+/GWOyv/MRGRmZbuf+9ttvN88//7z1vnXr1k4/oLkN2JLMrl27rGnGjBljSpYsab0vX768FZjTvPrqq6ZBgwbGGGP69etn7r77bpOamppu+UeMGGEqVapkkpOTM11HuL6u7INpfeSTTz6xxv/+++9Gktm6dasxxpgaNWpYG+arffvtt8bPz89cunTJxMfHm6CgIPPUU09Z/bJ3795WeNy3b59xdXU1hw4dcppH06ZNzZAhQ4wx/9f/crJR7d69uylZsqQVnI0x5osvvjCVK1d26oNJSUnG09PTLFy40JouLWBfuHDBFClSxKxcudJp3r169TKdO3c2xlwOGg6Hw/oxSElJMaVLlzbjxo3LtLYZM2aYYsWKWe+z8/0qVaqUefHFFzOdZ9WqVc1bb71lvb/vvvucdm6RfYmJicbd3d18/PHH6cZNmDDBFC1a1Jw9e9YaNm/ePOPi4mISEhKMMZf7UGhoqLl06ZLVpkOHDqZTp07W+yt3utP06tXL9OnTx2nYzz//bFxcXMw///xjjDHm5MmTpkyZMubxxx83JUuWNK+//rpT+4zmm5WMvlvZ+U527tzZ3HHHHdn+nNz+xiFnbsa+++uvv1rDtm7daiSZ1atXG2MuB5DChQubY8eOOdXl6+trLly44DS/8uXLm48++sgYY0yDBg1M165dM/3sK2u91v7LlW0XLVpkXF1dzf79+63xab+ta9assWouUqSISUxMtNo8++yzpl69etdaJQVeQdtPyep3Pbt95er+bczlgD1gwIB0tV75j0TGXN4nCg4ONsYYs3DhQuPi4mK2b9+eYT0TJ040fn5+6YaHhoaaBx980HqfmppqSpQoYe0DXWu/68SJE0aSddDiaj4+PtaBOjtwingBt3PnTnXu3Fm33HKLfH19FRYWJknZOp3kajVr1nR6HxwcrGPHjtlRppMiRYqofPnyGX7OuXPntHv3bvXq1Uve3t7W67XXXrNO5+nRo4fi4+NVuXJl9e/fX4sWLbLm1aFDB/3zzz+65ZZb9Mgjj+ibb75xOm0F9stOH7yybwUHB0uS9Tfv37+/XnvtNd1xxx2KjY3V5s2brbZ33nmnzpw5o40bN+qnn35SZGSkGjdubJ1O9dNPP6lx48aSLp+ymJKSokqVKjn1nZ9++snplFI3N7d0ff1aatSo4XQ90qZNm7Rr1y75+PhYnxMQEKALFy5kePrqrl27dP78eTVr1sypts8//9xqX7t2bYWHh2vKlCnWsh07dkwdOnSw5vPDDz+oadOmKl26tHx8fPTQQw/pxIkTOn/+vNUmq+/XsWPHdPjwYTVt2jTTZe3du7cmTpwoSTp69Kjmz5+vhx9+OEfrC5dt3bpVSUlJGa7vrVu3qlatWvLy8rKG3XHHHUpNTdX27dutYdWqVZOrq6v1Pjvb5U2bNmnSpElOfS06Olqpqanas2ePpMuXCX366acaN26cypcvr8GDB//bxU333crOdzI+Pj7L/mjnbxyy72bru4UKFdLtt99uva9SpYr8/f21detWa1hoaKjTddCbNm3S2bNnVaxYMad69+zZk+3+faWc7L9s3bpVISEhCgkJsYZVrVo1Xc1hYWHy8fGx3l+v/bobXUHeT7nW73p2+8rV/TtNnTp1nN5v2rRJr7zyilP9jzzyiI4cOaLz588rPj5eZcqUUaVKlbJV/5WuXGaHw6GgoCDrb3Ct/a6AgAD16NFD0dHRuu+++/T+++87XR43cOBA9e7dW1FRUXrzzTf/9aVG3OSsgLvvvvsUGhqqjz/+WKVKlVJqaqqqV6+u5OTkHM/r6hsXOBwOpaamZto+7YYLxhhrWNr1VTn9nLR5pF2z8vHHH6tevXpO7dJ+qG+77Tbt2bNH8+fP1w8//KCOHTsqKipKM2fOVEhIiLZv364ffvhBixcv1hNPPKF33nlHP/30EzdmuE6y0wevXPdp18Sl9a3evXsrOjpa8+bN06JFixQXF6cRI0aoX79+8vf3V61atbRs2TKtWrVKzZo101133aVOnTppx44d2rlzpyIjIyVd7juurq5av369006dJHl7e1v/7+npma3r8q505Y5k2mdFREToq6++Stc2ox+otH49b948lS5d2mmcu7u79f9du3bVlClTNHjwYE2ZMkXNmzdXsWLFJF2+TqxVq1Z6/PHH9frrrysgIEC//PKLevXqpeTkZBUpUkRS1t8vT0/Pay5rt27dNHjwYK1atUorV65UuXLldOedd15zOqSXnfV9LTndLkuX+9ujjz6a4f0nypYta/3/8uXL5erqqiNHjujcuXNOO+K5cfV3KzvfyWutIzt/45B9N1vfzY6MfgeCg4MzvOdM2l2Sc7Ier8f+S27+BgVRQd5PseO7KqXv35kNP3v2rIYPH662bduma+vh4fGv6smqv2Znv2vixInq37+/FixYoGnTpumll17S4sWLVb9+fQ0bNkxdunTRvHnzNH/+fMXGxmrq1KlO90TICY5gF2AnTpzQ9u3b9dJLL6lp06YKDw+3bgyTmbSjcCkpKf/689M69JX/QnSt521fS8mSJVWqVCn9+eefqlChgtOrXLlyVjtfX1916tRJH3/8saZNm6ZZs2bp5MmTki5vbO677z598MEH1gbvt99++1d1IWO56YMZCQkJ0WOPPabZs2frmWee0ccff2yNi4yM1NKlS7V8+XI1btxYAQEBCg8P1+uvv67g4GDrX0lvvfVWpaSk6NixY+n6jl130k9z2223aefOnSpRokS6z/Lz80vX/soblVzd/sp/Ve7SpYu2bNmi9evXa+bMmeratas1bv369UpNTdWIESNUv359VapUSYcPH85R3T4+PgoLC9OSJUsybVOsWDHFxMRo4sSJmjRpknr27Jmjz8D/qVixojw9PTNc3+Hh4dq0aZN18ybp8s2+XFxcrBvpZYebm1u67fltt92mP/74I11fq1ChgvUbsHLlSr311lv67rvv5O3t7XRDnMzmm1PZ+U7WrFkz0/5o1/YFOXez9d1Lly453fBo+/btOnXqlMLDwzOd5rbbblNCQoIKFSqUrtbixYtLyrp/ZyS7+y/h4eE6cOCA043Y/vjjD506dUpVq1bN9ufdDAr6fsq1ftft7iu33Xabtm/fnuF31MXFRTVr1tTBgwczfZxvbn9bsrvfdeutt2rIkCFauXKlqlevbp0VKEmVKlXS008/rUWLFqlt27bW2Xq5QcAuwIoWLapixYppwoQJ2rVrl3788UcNHDgwy2lCQ0PlcDg0d+5cHT9+3Omunznl6emp+vXr680339TWrVv1008/6aWXXsr1/NIMHz5ccXFx+uCDD7Rjxw799ttvmjhxokaOHCnp8p2Uv/76a23btk07duzQjBkzFBQUJH9/f02aNEmffvqptmzZoj///FNffvmlPD09FRoa+q/rQnq56YNXGzBggBYuXKg9e/Zow4YNWrp0qdNOTePGjbVw4UIVKlRIVapUsYZ99dVX1r8KS5c3nF27dlW3bt00e/Zs7dmzR2vWrFFcXJzmzZtnzwL/f127dlXx4sXVunVr/fzzz9qzZ4+WLVum/v376+DBg+na+/j4aNCgQXr66ac1efJk7d69Wxs2bNDo0aM1efJkq11YWJgaNmyoXr16KSUlRffff781rkKFCrp48aJGjx6tP//8U1988YXGjx+f49qHDRumESNG6IMPPtDOnTutOq7Uu3dvTZ48WVu3blX37t1z/Bm4zMPDQ88//7yee+4563KAX3/9VZ9++qm6du0qDw8Pde/eXVu2bNHSpUvVr18/PfTQQypZsmS2PyMsLEzLly/XoUOH9Ndff0mSnn/+ea1cuVJ9+/ZVfHy8du7cqf/9739WEDlz5oweeugh9e/fX/fee6+++uorTZs2zbqrfWbzzansfCeHDBmitWvX6oknntDmzZu1bds2jRs3Tn/99Zct2xfkzs3WdwsXLqx+/fpp9erVWr9+vXr06KH69eurbt26mU4TFRWlBg0aKCYmRosWLdLevXu1cuVKvfjii1ZYj42N1ddff63Y2Fht3bpVv/32m956660M55eT/ZeoqCjVqFFDXbt21YYNG7RmzRp169ZNkZGR6U7pvdndDPspWf2u291Xhg4dqs8//1zDhw/X77//rq1bt2rq1KnW/n9kZKTuuusutWvXTosXL7bOOF2wYIGky9/Ps2fPasmSJfrrr7+cLnHLyrX2u/bs2aMhQ4Zo1apV2rdvnxYtWqSdO3cqPDxc//zzj/r27atly5Zp3759WrFihdauXZvlP6Bdk21Xc+OGcPVNzhYvXmzCw8ONu7u7qVmzplm2bJmRZL755htjTMY3InvllVdMUFCQcTgc1k3LIiMjzVNPPeX0WVff1Ozq98YY88cff5gGDRoYT09PU7t2bbNo0aJr3uTs6psbfPPNN+bqrvrVV1+Z2rVrGzc3N1O0aFFz1113mdmzZxtjLt9gpXbt2sbLy8v4+vqapk2bmg0bNljzqlevnvH19TVeXl6mfv365ocffsjWukXuZNUHs3MjvL59+5ry5csbd3d3ExgYaB566CHz119/We1PnDhhHA6H081x0vrM+PHjnWpJTk42Q4cONWFhYaZw4cImODjYtGnTxmzevNkYk/nNNbJy9d3A0xw5csR069bNFC9e3Li7u5tbbrnFPPLII+b06dMZTpeammpGjRplKleubAoXLmwCAwNNdHS0dSfSNGPHjjWSTLdu3dJ95siRI01wcLDx9PQ00dHR5vPPP8/V92v8+PFWHcHBwaZfv35O41NTU01oaKhp0aJFNtcSMpOSkmJee+01ExoaagoXLmzKli1r3SBm8+bNpkmTJsbDw8MEBASYRx55xOlu7Rn1vaeeesrp7q+rVq0yNWvWNO7u7k5/5zVr1phmzZoZb29v4+XlZWrWrGndDKpnz56mRo0aTjdnGjFihAkICDAHDx7Mcr6Zyey7da3vpDHGLFu2zDRs2NC4u7sbf39/Ex0dbfXpnP7GcZMz+9xsfXfWrFnmlltuMe7u7iYqKsq64aQxl28CVatWrXTTJiYmmn79+plSpUqZwoULm5CQENO1a1enG0rNmjXL2p8pXry4adu2rTXuyhuXXWv/5eqbt+3bt8/cf//9xsvLy/j4+JgOHTpYN5nLrOZr3Si3oCro+ynGZP27npu+Yoxx2tZeacGCBaZhw4bG09PT+Pr6mrp16zrd8f7EiROmZ8+eplixYsbDw8NUr17dzJ071xr/2GOPmWLFihlJJjY21hiT8c0Ja9WqZY03Juv9roSEBBMTE2OCg4ONm5ubCQ0NNUOHDjUpKSkmKSnJPPDAAyYkJMS4ubmZUqVKmb59+1o3TswNx/9fQQAAZMvZs2dVunRpTZw4McPrrACgoJg0aZIGDBigU6dO5XUpAPIJbnIGAMiW1NRU/fXXXxoxYoT8/f2dTlEHAAAA12ADQJaufNTE1a+ff/45r8v7T+3fv18lS5bUlClT9Nlnn6lQIf6NFpfde++9mX5P3njjjbwuD8gUfRf5HfspNx5OEQeALOzatSvTcaVLl7btERhAfnbo0CH9888/GY4LCAhQQEDAf1wRkD30XeR37KfceAjYAAAAAADYgFPEAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAgK0cDofmzJmT12UAAPCfI2ADAFAA9ejRQw6HQ4899li6cU8++aQcDod69OiRrXktW7ZMDodDp06dylb7I0eO6N57781BtQAAFAwEbAAACqiQkBBNnTrV6Tm/Fy5c0JQpU1S2bFnbPy85OVmSFBQUJHd3d9vnDwDAjY6ADQBAAXXbbbcpJCREs2fPtobNnj1bZcuW1a233moNS01NVVxcnMqVKydPT0/VqlVLM2fOlCTt3btXTZo0kSQVLVrU6ch348aN1bdvXw0YMEDFixdXdHS0pPSniB88eFCdO3dWQECAvLy8VKdOHa1evVqStGnTJjVp0kQ+Pj7y9fVVRESE1q1bdz1XCwAA102hvC4AAABcPw8//LAmTpyorl27SpI+++wz9ezZU8uWLbPaxMXF6csvv9T48eNVsWJFLV++XA8++KACAwPVqFEjzZo1S+3atdP27dvl6+srT09Pa9rJkyfr8ccf14oVKzL8/LNnzyoyMlKlS5fWt99+q6CgIG3YsEGpqamSpK5du+rWW2/VuHHj5Orqqvj4eBUuXPj6rRAAAK4jAjYAAAXYgw8+qCFDhmjfvn2SpBUrVmjq1KlWwE5KStIbb7yhH374QQ0aNJAk3XLLLfrll1/00UcfKTIyUgEBAZKkEiVKyN/f32n+FStW1Ntvv53p50+ZMkXHjx/X2rVrrflUqFDBGr9//349++yzqlKlijU/AADyKwI2AAAFWGBgoFq2bKlJkybJGKOWLVuqePHi1vhdu3bp/PnzatasmdN0ycnJTqeRZyYiIiLL8fHx8br11lutcH21gQMHqnfv3vriiy8UFRWlDh06qHz58tlYMgAAbjwEbAAACriHH35Yffv2lSSNGTPGadzZs2clSfPmzVPp0qWdxmXnRmVeXl5Zjr/ydPKMDBs2TF26dNG8efM0f/58xcbGaurUqWrTps01PxsAgBsNNzkDAKCAa968uZKTk3Xx4kXrRmRpqlatKnd3d+3fv18VKlRweoWEhEiS3NzcJEkpKSk5/uyaNWsqPj5eJ0+ezLRNpUqV9PTTT2vRokVq27atJk6cmOPPAQDgRkDABgCggHN1ddXWrVv1xx9/yNXV1Wmcj4+PBg0apKefflqTJ0/W7t27tWHDBo0ePVqTJ0+WJIWGhsrhcGju3Lk6fvy4ddQ7Ozp37qygoCDFxMRoxYoV+vPPPzVr1iytWrVK//zzj/r27atly5Zp3759WrFihdauXavw8HBblx8AgP8KARsAgJuAr6+vfH19Mxz36quv6uWXX1ZcXJzCw8PVvHlzzZs3T+XKlZMklS5dWsOHD9fgwYNVsmRJ63Tz7HBzc9OiRYtUokQJtWjRQjVq1NCbb74pV1dXubq66sSJE+rWrZsqVaqkjh076t5779Xw4cNtWWYAAP5rDmOMyesiAAAAAADI7ziCDQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2OD/ATx38kywUcStAAAAAElFTkSuQmCC\n"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["
"],"image/png":"iVBORw0KGgoAAAANSUhEUgAAA+EAAAH4CAYAAAAl/+ycAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1gU1/s28HvpHUSqDUWxd+yK2FFBYkHFEnuLsSbGqNHYjUZjF0tii0Is2I0NO1GT2KPGrtgbKjYQEJ73D9+dH8MusKBZ/Sb357q4dM+emTkzO+fMPDNnzmhEREBERERERERE/ziTD10AIiIiIiIiov8KBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhRET/UhqNBmPGjPnQxXhnK1asQPHixWFubg4nJ6cPXZx/jWXLlkGj0SAmJibb044ZMwYajeb9FyqHYmJioNFosGzZsnee17+l3vzT3uc2/6cULFgQXbp0ydG0/+R+kJ36o80bGxv7j5SFiD4MBuFE9K919epV9O7dG97e3rCysoKDgwNq1qyJWbNmISEh4UMXjwxw4cIFdOnSBYULF8aPP/6IRYsWZZhXe7Kq/TMxMYGnpyeCgoLw+++/G7HU2VOnTh1oNBr4+Pjo/T4qKkpZp8jISCOX7t9l//79qn0k7V9oaOiHLh7CwsI+6qA2J9Ju85UrV+rNU7NmTWg0GpQuXdrIpft4TJo0CRs3bnzv8+3SpQvs7Oze+3zp/fn7778xZsyYHF0Qpf9dZh+6AERE/4Rff/0VrVu3hqWlJTp16oTSpUsjKSkJv/32G7766iucO3cu04Du3yAhIQFmZv/bzfz+/fuRmpqKWbNmoUiRIgZNM3/+fNjZ2SE1NRW3bt3Cjz/+iNq1a+PPP/9E+fLl/9kC55CVlRWuXLmCP//8E1WqVFF9Fx4eDisrK7x+/foDle7fZ8CAAahcubIqrWDBggA+bL0JCwuDi4tLju/efsysrKwQERGBjh07qtJjYmJw+PBhWFlZfaCSGd/IkSMxbNgwVdqkSZMQEhKC5s2bf5hC0Qfz999/Y+zYsahTp47SDtG/3//22RkRkR7Xr19HaGgovLy8sHfvXnh6eirfff7557hy5Qp+/fXXD1jCf05qaiqSkpJgZWX1rzipffjwIQBkqxt6SEgIXFxclM/NmzdH6dKlsXbt2o82CC9cuDDevHmDX375RRWEv379Ghs2bEBgYCDWrVv3AUv47+Ln54eQkBC93/0b6s3HqGnTpti8eTNiY2NV9TMiIgLu7u7w8fHB06dPP2AJjcfMzOx//gJpTr158wapqamwsLD40EUh+qDYHZ2I/nW+//57vHz5EosXL1YF4FpFihTBwIEDlc9v3rzB+PHjUbhwYVhaWqJgwYIYMWIEEhMTVdMVLFgQQUFB2L9/PypVqgRra2uUKVMG+/fvBwCsX78eZcqUgZWVFXx9fXHy5EnV9NpugdeuXUNAQABsbW2RJ08ejBs3DiKiyjtt2jTUqFEDuXPnhrW1NXx9ffV2RdZoNOjXrx/Cw8NRqlQpWFpaYseOHcp3aZ9pfPHiBQYNGoSCBQvC0tISbm5uaNiwIU6cOKGa59q1a+Hr6wtra2u4uLigY8eOuHPnjt51uXPnDpo3bw47Ozu4urpiyJAhSElJyeCXUQsLC1PKnCdPHnz++eeIi4tTbe/Ro0cDAFxdXXP8jKaHhwcAqE56k5KS8O2338LX1xeOjo6wtbWFn58f9u3bpzP9qlWr4OvrC3t7ezg4OKBMmTKYNWuWKk9cXBwGDRqE/Pnzw9LSEkWKFMGUKVOQmppqcDnbtWuH1atXq6bZsmUL4uPj0aZNG73TnDx5Ek2aNIGDgwPs7OxQv359vV3vz507h3r16sHa2hr58uXDhAkTMizb9u3b4efnB1tbW9jb2yMwMBDnzp0zeD3Sio6ORuvWrVGgQAFYWloif/78GDx4sM7jINnZn+Li4tClSxc4OjrCyckJnTt3Vu037yr9fqZ9zOHKlSvo0qULnJyc4OjoiK5duyI+Pl5n+pUrVyr1x9nZGaGhobh161aWyy1YsCDOnTuHAwcOKN2369SpoypDevqe69e2U7/99huqVKkCKysreHt74+eff9aZ3tD99n1s808++QSWlpZYu3atKj0iIgJt2rSBqampzjSGts0iggkTJiBfvnywsbFB3bp1M9xn30dd1S7TxcUFX3zxhZKWmpoKJycnmJqaqrbPlClTYGZmhpcvXwLQ/T01Gg1evXqF5cuXK799+t4Q2t8gq/3PEMY6lmnHDZg2bRpmzpyp/I5///03AGDv3r1KW+Pk5IRPPvkE58+fV6aPjIyERqPBgQMHdNZh4cKF0Gg0OHv2rJJ24cIFhISEwNnZGVZWVqhUqRI2b96smk5bZ3777TcMGDAArq6ucHJyQu/evZGUlIS4uDh06tQJuXLlQq5cuTB06FCd43NqaipmzpyJUqVKwcrKCu7u7ujdu7fORSRD6uKyZcvQunVrAEDdunWV31/7Wxw7dgwBAQFwcXGBtbU1ChUqhG7dumX6+9L/CCEi+pfJmzeveHt7G5y/c+fOAkBCQkJk3rx50qlTJwEgzZs3V+Xz8vKSYsWKiaenp4wZM0ZmzJghefPmFTs7O1m5cqUUKFBAJk+eLJMnTxZHR0cpUqSIpKSkqJZjZWUlPj4+8umnn8rcuXMlKChIAMioUaNUy8qXL5/07dtX5s6dK9OnT5cqVaoIANm6dasqHwApUaKEuLq6ytixY2XevHly8uRJ5bvRo0credu3by8WFhbyxRdfyE8//SRTpkyRZs2aycqVK5U8S5cuFQBSuXJlmTFjhgwbNkysra2lYMGC8vTpU511KVWqlHTr1k3mz58vrVq1EgASFhaW5TYfPXq0AJAGDRrInDlzpF+/fmJqaiqVK1eWpKQkERHZsGGDtGjRQgDI/PnzZcWKFXL69Oks53nx4kV59OiRPHjwQE6cOCEtWrQQKysrOXv2rJL30aNH4unpKV988YXMnz9fvv/+eylWrJiYm5sr209EZNeuXQJA6tevL/PmzZN58+ZJv379pHXr1kqeV69eSdmyZSV37twyYsQIWbBggXTq1Ek0Go0MHDgwy23h7+8vpUqVkkuXLgkA2bNnj/Jd8+bNJSAgQPbt2ycAZO3atcp3Z8+eFVtbW/H09JTx48fL5MmTpVChQmJpaSm///67ku/evXvi6uoquXLlkjFjxsjUqVPFx8dHypYtKwDk+vXrSt6ff/5ZNBqNNG7cWObMmSNTpkyRggULipOTkyqfdltnpX///tK0aVOZNGmSLFy4ULp37y6mpqYSEhKiymfo/pSamiq1a9cWExMT6du3r8yZM0fq1aunrMvSpUszLY92Oy5ZskQePXqk+tPW1fT1RruuFSpUkJYtW0pYWJj06NFDAMjQoUNV858wYYJoNBpp27athIWFydixY8XFxUWn/uizYcMGyZcvnxQvXlxWrFghK1askF27dqnKkJ62vqb9bbTtlLu7u4wYMULmzp0rFStWFI1Go6oDhu6372ubr127Vtq3by9+fn7Kd6dOnRIAcuTIEaUepGVo2zxy5EgBIE2bNpW5c+dKt27dJE+ePOLi4iKdO3fO9jqL6O4H+gQHB4uvr6/y+eTJkwJATExMVG11YGCgVKpUSfmc/vdcsWKFWFpaip+fn/LbHz58WJXXkP1Pn86dO4utra0qzVjHsuvXrwsAKVmypHh7e8vkyZNlxowZcuPGDYmKihIzMzMpWrSofP/990pdyZUrl7I/x8fHi52dnfTt21dnverWravaX86ePSuOjo5SsmRJmTJlisydO1dq164tGo1G1q9fr+TT1pny5ctL48aNZd68efLpp58q27NWrVrSvn17CQsLU9Zp+fLlqmX36NFDzMzMpGfPnrJgwQL5+uuvxdbWVnX8SrudM6uLV69elQEDBggAGTFihPL7379/Xx48eCC5cuWSokWLytSpU+XHH3+Ub775RkqUKJHl704fPwbhRPSv8uzZMwEgn3zyiUH5tSeBPXr0UKUPGTJEAMjevXuVNC8vLwGgnByJiOzcuVMAiLW1tdy4cUNJX7hwoQCQffv2KWnaE8r+/fsraampqRIYGCgWFhby6NEjJT0+Pl5VnqSkJCldurTUq1dPla494Tt37pzOuqU/iXR0dJTPP/88w22RlJQkbm5uUrp0aUlISFDSt27dKgDk22+/1VmXcePGqeZRoUIF1UmpPg8fPhQLCwtp1KiR6sRu7ty5SoCkpT0BTbttMqLNm/7PyclJduzYocr75s0bSUxMVKU9ffpU3N3dpVu3bkrawIEDxcHBQd68eZPhcsePHy+2trZy6dIlVfqwYcPE1NRUbt68mWm50wYflSpVku7duyvlsbCwkOXLl+sNwps3by4WFhZy9epVJe3u3btib28vtWvXVtIGDRokAOSPP/5Q0h4+fCiOjo6qAO7Fixfi5OQkPXv2VJXv/v374ujoqEo3NAhPvx+LiHz33Xei0WhU9cXQ/Wnjxo0CQL7//nsl7c2bN+Ln55etgFDfn3Y7ZBSEp90vRERatGghuXPnVj7HxMSIqampTJw4UZXvzJkzYmZmppOuT6lSpcTf318nPbtBOAA5ePCgkvbw4UOxtLSUL7/8UkkzdL99X9t87dq1snXrVtFoNMq8v/rqK+WCafog3NC2WdueBAYGSmpqqpJvxIgRAkAVhGenrhoShE+dOlVMTU3l+fPnIiIye/Zs8fLykipVqsjXX38tIiIpKSni5OQkgwcPVqbT93va2tqqypo+b1b7X0YyCsKNcSzTBuEODg7y8OFDVRnKly8vbm5u8vjxYyXt9OnTYmJiIp06dVLS2rVrJ25ubqo2+N69e2JiYqJqL+rXry9lypSR169fq8pUo0YN8fHxUdK0dSYgIEC1v1SvXl00Go306dNHSXvz5o3ky5dPVSejo6MFgISHh6vWZ8eOHTrphtbFtWvX6mxjkbcX5gDI0aNHhf592B2diP5Vnj9/DgCwt7c3KP+2bdsAQNWlEAC+/PJLANB5drxkyZKoXr268rlq1aoAgHr16qFAgQI66deuXdNZZr9+/ZT/a7uTJyUlYffu3Uq6tbW18v+nT5/i2bNn8PPz0+k6DgD+/v4oWbJkFmv69rnqP/74A3fv3tX7/bFjx/Dw4UP07dtX9VxsYGAgihcvrvc5+j59+qg++/n56V3ntHbv3o2kpCQMGjQIJib/dxjq2bMnHBwc3vl5/XXr1iEqKgq7du3C0qVLUbRoUbRq1QqHDx9W8piamirPJKampuLJkyd48+YNKlWqpNrGTk5OePXqFaKiojJc3tq1a+Hn54dcuXIhNjZW+WvQoAFSUlJw8OBBg8vevn17rF+/HklJSYiMjISpqSlatGihky8lJQW7du1C8+bN4e3traR7enqiffv2+O2335S6sG3bNlSrVk31rLmrqys6dOigmmdUVBTi4uLQrl071XqYmpqiatWqervqZyXtfvzq1SvExsaiRo0aEBGdLq5A1vvTtm3bYGZmhs8++0xJMzU1Rf/+/bNVrm+//RZRUVGqP+1jCxnRV7bHjx8r23n9+vVITU1FmzZtVNvPw8MDPj4+Odp+OVWyZEn4+fkpn11dXVGsWDHVtjR0v31f2xwAGjVqBGdnZ6xatQoiglWrVqFdu3Z68xraNmvbk/79+6u6eA8aNEhnnu+zrgJv94GUlBSlbYmOjoafnx/8/PwQHR0NADh79izi4uJUv0dOZLX/ZZexjmUA0KpVK7i6uiqf7927h1OnTqFLly5wdnZW0suWLYuGDRsqvz0AtG3bFg8fPlS6ZwNvu6mnpqaibdu2AIAnT55g7969aNOmDV68eKH8ro8fP0ZAQAAuX76s80hV9+7dVftL1apVISLo3r27kmZqaopKlSrp1BtHR0c0bNhQtQ/5+vrCzs5Op54bUhczoh0LZevWrUhOTs4yP/1v+W+OCkFE/1oODg4A3j7/bIgbN27AxMREZ+RtDw8PODk54caNG6r0tCcnAODo6AgAyJ8/v9709M+ImZiYqIImAChatCgAqJ7r3Lp1KyZMmIBTp06pnn/U91xooUKFMly/tL7//nt07twZ+fPnh6+vL5o2bYpOnTop5dGua7FixXSmLV68OH777TdVmpWVlerECgBy5cqV5eBKGS3HwsIC3t7eOts8u2rXrq0a+CkkJAQ+Pj7o378/jh8/rqQvX74cP/zwAy5cuKA6wUm7Pfv27Ys1a9agSZMmyJs3Lxo1aoQ2bdqgcePGSp7Lly/jr7/+0tkWWtrB5QwRGhqKIUOGYPv27QgPD0dQUJDeC0qPHj1CfHy83t+qRIkSysjwpUqVwo0bN5QT6bTST3v58mUAb0/C9dHWrey4efMmvv32W2zevFlnv3j27JnqsyH7040bN+Dp6anzyiV92yEzZcqUQYMGDbI1Tfq6nytXLgBv67iDgwMuX74MEcnwVXPm5uYAgJcvXyrPBgNvT/Qz2ndyKn1ZteVNuy0N3W/f1zYH3m6D1q1bIyIiAlWqVMGtW7fQvn17vXkNbZu1/6bf7q6urspvpPU+6yoAVKxYETY2NoiOjkZAQACio6MxduxYeHh4YM6cOXj9+rUSjNeqVStb804vq/3vXef3Tx3LAN1jVGbHmhIlSmDnzp149eoVbG1t0bhxYzg6OmL16tWoX78+AGD16tUoX768srwrV65ARDBq1CiMGjVK7/o+fPgQefPmzdH6p683z549g5ubW4bLScuQupgRf39/tGrVCmPHjsWMGTNQp04dNG/eHO3bt4elpWWW09PHjUE4Ef2rODg4IE+ePKrBWgyhL7jVR9/gQZmlS7oBXQwRHR2N4OBg1K5dG2FhYfD09IS5uTmWLl2KiIgInfxp7zZmpk2bNvDz88OGDRuwa9cuTJ06FVOmTMH69evRpEmTbJczo3X+2NjZ2aFq1arYtGmTcmK3cuVKdOnSBc2bN8dXX30FNzc3mJqa4rvvvsPVq1eVad3c3HDq1Cns3LkT27dvx/bt27F06VJ06tQJy5cvB/D2TnrDhg0xdOhQvcvXnigawtPTE3Xq1MEPP/yAQ4cOGXVEdO3AVCtWrNB7Vzi7ozmnpKSgYcOGePLkCb7++msUL14ctra2uHPnDrp06aIzENbHvj9lVcdTU1Oh0Wiwfft2vXm1Qey0adMwduxYJd3LyyvL9wNn1D5lNAiiIe3R+9xvs6N9+/ZYsGABxowZg3LlymXZi8fQttkQ73udzc3NUbVqVRw8eBBXrlzB/fv34efnB3d3dyQnJ+OPP/5AdHQ0ihcv/s4XWt7nMSaz+b3v5QCGH6P0sbS0RPPmzbFhwwaEhYXhwYMHOHToECZNmqTk0bYlQ4YMQUBAgN75pL+Yk531T19v3NzcEB4ernf69L/zu2xPjUaDyMhI/P7779iyZQt27tyJbt264YcffsDvv//O97//j2MQTkT/OkFBQVi0aBGOHDmi6m6nj5eXF1JTU3H58mWUKFFCSX/w4AHi4uLg5eX1XsuWmpqKa9euqU72Ll26BOD/3lO8bt06WFlZYefOnaqr3UuXLn3n5Xt6eqJv377o27cvHj58iIoVK2LixIlo0qSJsq4XL17UuRt68eLF97Yt0i4n7Z2UpKQkXL9+Pdt3KA3x5s0bAG/vQtra2iIyMhLe3t5Yv3696iRfOxp7WhYWFmjWrBmaNWuG1NRU9O3bFwsXLsSoUaNQpEgRFC5cGC9fvnxv5W7fvj169OgBJycnNG3aVG8eV1dX2NjY4OLFizrfXbhwASYmJsodHS8vL+Uud1rppy1cuDCAtxce3se6nDlzBpcuXcLy5cvRqVMnJT2zrv1Z8fLywp49e/Dy5UvVCai+7WBshQsXhoigUKFCmQZznTp1Ut0VTRugZBRwau96xsXFqV7X9y69Rgzdb9/3Nq9VqxYKFCiA/fv3Y8qUKZku15C2Wfvv5cuXVe3Jo0ePdO42vu+6CrztFj5lyhTs3r0bLi4uKF68ODQaDUqVKoXo6GhER0cjKCgoy/m8z4sNxmDIsSwjaY8B6V24cAEuLi6wtbVV0tq2bYvly5djz549OH/+PERE6YoOQPndzc3N/5HjR1qFCxfG7t27UbNmzXe6uJBWVr99tWrVUK1aNUycOBERERHo0KEDVq1ahR49eryX5dOHwWfCiehfZ+jQobC1tUWPHj3w4MEDne+vXr2qvGJKG+TMnDlTlWf69OkA3j4P/b7NnTtX+b+IYO7cuTA3N1e62pmamkKj0ajucsXExGDjxo05XmZKSopO9183NzfkyZNH6e5eqVIluLm5YcGCBaou8Nu3b8f58+ff27Zo0KABLCwsMHv2bNXdgMWLF+PZs2fvfZs/efIEhw8fhoeHh9KFUHt3Iu3y//jjDxw5ckQ17ePHj1WfTUxMULZsWQBQtlGbNm1w5MgR7Ny5U2fZcXFxygUAQ4WEhGD06NEICwvL8F26pqamaNSoETZt2qS6i/rgwQNERESgVq1aShfVpk2b4vfff8eff/6p5Hv06JHOnZyAgAA4ODhg0qRJep8/fPToUbbWQ982FhGd17tlR9OmTfHmzRvMnz9fSUtJScGcOXNyPM/3pWXLljA1NcXYsWN17nKJiLIveXt7o0GDBspfzZo1lXy2trZ6X/2lvUCS9pll7SutcsrQ/fZ9b3ONRoPZs2dj9OjR+PTTTzPMZ2jb3KBBA5ibm2POnDmq7Z5+OuD911XgbRCemJiImTNnolatWkpA5efnhxUrVuDu3bsGPQ+e0W//McvqWJYRT09PlC9fHsuXL1et89mzZ7Fr1y6di48NGjSAs7MzVq9ejdWrV6NKlSqqLu5ubm6oU6cOFi5ciHv37uksL7ttV2batGmDlJQUjB8/Xue7N2/e5Og31F5wSD/t06dPddqS8uXLA4DOa/rofw/vhBPRv07hwoURERGBtm3bokSJEujUqRNKly6NpKQkHD58GGvXrlXewVquXDl07twZixYtQlxcHPz9/fHnn39i+fLlaN68OerWrftey2ZlZYUdO3agc+fOqFq1KrZv345ff/0VI0aMULqxBQYGYvr06WjcuDHat2+Phw8fYt68eShSpAj++uuvHC33xYsXyJcvH0JCQlCuXDnY2dlh9+7dOHr0KH744QcAb+8iTJkyBV27doW/vz/atWuHBw8eYNasWShYsCAGDx78XraBq6srhg8fjrFjx6Jx48YIDg7GxYsXERYWhsqVK6Njx47vNP/IyEjY2dlBRHD37l0sXrwYT58+xYIFC5QT5KCgIKxfvx4tWrRAYGAgrl+/jgULFqBkyZKq53V79OiBJ0+eoF69esiXLx9u3LiBOXPmoHz58srdua+++gqbN29GUFAQunTpAl9fX7x69QpnzpxBZGQkYmJiVM+oZ8XR0dGg96FPmDABUVFRqFWrFvr27QszMzMsXLgQiYmJ+P7775V8Q4cOxYoVK9C4cWMMHDgQtra2WLRoEby8vFT7k4ODA+bPn49PP/0UFStWRGhoKFxdXXHz5k38+uuvqFmzpuqkOyvFixdH4cKFMWTIENy5cwcODg5Yt26dQc9CZqRZs2aoWbMmhg0bhpiYGJQsWRLr16/XucD0IRQuXBgTJkzA8OHDERMTg+bNm8Pe3h7Xr1/Hhg0b0KtXLwwZMiTTefj6+mL+/PmYMGECihQpAjc3N9SrVw+NGjVCgQIF0L17d3z11VcwNTXFkiVLlN8nJwzdb/+Jbf7JJ5/gk08+yTSPoW2z9n3y3333HYKCgtC0aVOcPHkS27dv16l377uuAkD16tVhZmaGixcvolevXkp67dq1lQsXhgThvr6+2L17N6ZPn448efKgUKFCesdy+FgYcizLzNSpU9GkSRNUr14d3bt3R0JCAubMmaO3/TM3N0fLli2xatUqvHr1CtOmTdOZ37x581CrVi2UKVMGPXv2hLe3Nx48eIAjR47g9u3bOH369HtZb39/f/Tu3RvfffcdTp06hUaNGsHc3ByXL1/G2rVrMWvWLISEhGRrnuXLl4epqSmmTJmCZ8+ewdLSEvXq1UNERATCwsLQokULFC5cGC9evMCPP/4IBweHDHtJ0f8Qo43DTkRkZJcuXZKePXtKwYIFxcLCQuzt7aVmzZoyZ84c1WtMkpOTZezYsVKoUCExNzeX/Pnzy/Dhw1V5RN6+biQwMFBnOQB0Xv2lfTXL1KlTlTTtq2KuXr0qjRo1EhsbG3F3d5fRo0erXtUlIrJ48WLx8fERS0tLKV68uCxdulTva230LTvtd9pX7CQmJspXX30l5cqVE3t7e7G1tZVy5crpfaf36tWrpUKFCmJpaSnOzs7SoUMHuX37tiqPvtfeiBj+6iqRt68kK168uJibm4u7u7t89tlnOu9SftdXlNna2kr16tVlzZo1qrypqakyadIk8fLyEktLS6lQoYJs3bpVOnfuLF5eXkq+yMhIadSokbi5uYmFhYUUKFBAevfuLffu3VPN78WLFzJ8+HApUqSIWFhYiIuLi9SoUUOmTZumem+sPvrej5yevleUiYicOHFCAgICxM7OTmxsbKRu3bqq1w5p/fXXX+Lv7y9WVlaSN29eGT9+vCxevFjn9VbaZQUEBIijo6NYWVlJ4cKFpUuXLnLs2DElj6G/899//y0NGjQQOzs7cXFxkZ49e8rp06d1Xm2Vnf3p8ePH8umnn4qDg4M4OjrKp59+qryfOTuvy8pI2nqTtgzp90F9rwcTEVm3bp3UqlVLbG1txdbWVooXLy6ff/65XLx4MdOyibx9HVxgYKDY29sLANWrkY4fPy5Vq1ZV9sPp06dn+Ioyfe2Uv7+/zuvPDN1v/+ltri1f+npgaNuckpIiY8eOFU9PT7G2tpY6derI2bNnxcvLS+e1X4auc/r9IDOVK1fWeQ3g7du3BYDkz59fJ7++/frChQtSu3Ztsba2Vr1aLbv7X3oZvaLMGMcyfdOmtXv3bqlZs6ZYW1uLg4ODNGvWTP7++2+9eaOiogSAaDQauXXrlt48V69elU6dOomHh4eYm5tL3rx5JSgoSCIjI5U82u2W/rVfGW3njNqmRYsWia+vr1hbW4u9vb2UKVNGhg4dKnfv3lXyZKcu/vjjj+Lt7S2mpqbK68pOnDgh7dq1kwIFCoilpaW4ublJUFCQqi2m/10akXcYaYGIiAzWpUsXREZGqu60EhER/S/hsYzo3fGZcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhM+EExERERERERkJ74QTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRmH3oAhD9L7t58yZiY2M/dDGI6ANKTEyEpaXlhy4GEX1gbAuIyMXFBQUKFMgyH4Nwohy6efMmSpQogfj4+A9dFCL6gExNTZGSkvKhi0FEHxjbAiKysbHB+fPnswzEGYQT5VBsbCzi4+OxcuVKlChR4kMXh4g+gG3btmHUqFFsB4j+49gWENH58+fRsWNHxMbGMggn+qeVKFECFStW/NDFIKIP4Pz58wDYDhD917EtIKLs4MBsREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiP7H7N+/HxqNBvv378/2tMuWLYNGo0FMTMx7LxdRTEwMNBoNli1b9qGL8tF7l3psLBqNBmPGjMn2dNwPiIgyxyCciD44bWCY9s/NzQ1169bF9u3bP3TxMtSlSxdoNBo4ODggISFB5/vLly8r6zNt2rQPUEKi90tfXdX+DRs27EMXD5MmTcLGjRs/dDHeq7Tb/LffftP5XkSQP39+aDQaBAUFfYASEmWfdr8+duzYhy4K6REfH48xY8Z81BcJ/9fxPeFE9NEYN24cChUqBBHBgwcPsGzZMjRt2hRbtmz5aE8uzczMEB8fjy1btqBNmzaq78LDw2FlZYXXr19/oNIR/TO0dTWt0qVLw8vLCwkJCTA3N/8g5Zo0aRJCQkLQvHnzD7L8f5KVlRUiIiJQq1YtVfqBAwdw+/ZtWFpafqCSEdG/TXx8PMaOHQsAqFOnzoctzL8Ug3Ai+mg0adIElSpVUj53794d7u7u+OWXXz7aINzS0hI1a9bEL7/8ohOER0REIDAwEOvWrftApSP6Z6Svq2lZWVkZuTT/DU2bNsXatWsxe/ZsmJn93+lbREQEfH19ERsb+wFLR/S/S0Tw+vVrWFtbf+ii0H8Iu6MT0UfLyckJ1tbWqhNOAJg2bRpq1KiB3Llzw9raGr6+voiMjNSZPioqCrVq1YKTkxPs7OxQrFgxjBgxQpUnMTERo0ePRpEiRWBpaYn8+fNj6NChSExMNLic7du3x/bt2xEXF6ekHT16FJcvX0b79u31TnPt2jW0bt0azs7OsLGxQbVq1fDrr7/q5Lt9+zaaN28OW1tbuLm5YfDgwRmW7Y8//kDjxo3h6OgIGxsb+Pv749ChQwavB9G70vcscJcuXWBnZ4c7d+6gefPmsLOzg6urK4YMGYKUlBTV9KmpqZg5cyZKlSoFKysruLu7o3fv3nj69GmWy9ZoNHj16hWWL1+udN/u0qWLUoaCBQvqTDNmzBhoNBqd+fTr1w8bN25E6dKlYWlpiVKlSmHHjh0609+5cwfdunWDu7u7km/JkiU6+bJTjzPSrl07PH78GFFRUUpaUlISIiMjM2xnXr16hS+//BL58+eHpaUlihUrhmnTpkFEVPkSExMxePBguLq6wt7eHsHBwbh9+7beeRq6zkTZoW0nbt68iaCgINjZ2SFv3ryYN28eAODMmTOoV68ebG1t4eXlhYiICNX02u7tBw8eRO/evZE7d244ODigU6dOOu1HwYIFERQUhJ07d6JSpUqwtrbGwoULAWR9bH7w4AHMzMyUu8RpXbx4ERqNBnPnzlXS4uLiMGjQIKUOFilSBFOmTEFqaqqSR9tuTps2DfPmzYO3tzdsbGzQqFEj3Lp1CyKC8ePHI1++fLC2tsYnn3yCJ0+e6Cx/+/bt8PPzg62tLezt7REYGIhz587p3c6ZtccxMTFwdXUFAIwdO1ZpT3MyPgRljHfCieij8ezZM8TGxkJE8PDhQ8yZMwcvX75Ex44dVflmzZqF4OBgdOjQAUlJSVi1ahVat26NrVu3IjAwEABw7tw5BAUFoWzZshg3bhwsLS1x5coVVVCampqK4OBg/Pbbb+jVqxdKlCiBM2fOYMaMGbh06ZLBz5a2bNkSffr0wfr169GtWzcAb+9OFS9eHBUrVtTJ/+DBA9SoUQPx8fEYMGAAcufOjeXLlyM4OBiRkZFo0aIFACAhIQH169fHzZs3MWDAAOTJkwcrVqzA3r17dea5d+9eNGnSBL6+vhg9ejRMTEywdOlS1KtXD9HR0ahSpYpB60JkCG1dTcvFxSXD/CkpKQgICEDVqlUxbdo07N69Gz/88AMKFy6Mzz77TMnXu3dvLFu2DF27dsWAAQNw/fp1zJ07FydPnsShQ4cy7ea+YsUK9OjRA1WqVEGvXr0AAIULF87R+v32229Yv349+vbtC3t7e8yePRutWrXCzZs3kTt3bgBv63G1atWUoN3V1RXbt29H9+7d8fz5cwwaNAhA9upxZgoWLIjq1avjl19+QZMmTQC8Pel+9uwZQkNDMXv2bFV+EUFwcDD27duH7t27o3z58ti5cye++uor3LlzBzNmzFDy9ujRAytXrkT79u1Ro0YN7N27V2lL0zJ0nYlyIiUlBU2aNEHt2rXx/fffIzw8HP369YOtrS2++eYbdOjQAS1btsSCBQvQqVMnVK9eXeexmH79+sHJyQljxozBxYsXMX/+fNy4cUMZCFHr4sWLaNeuHXr37o2ePXuiWLFiBh2b3d3d4e/vjzVr1mD06NGqZa9evRqmpqZo3bo1gLdduv39/XHnzh307t0bBQoUwOHDhzF8+HDcu3cPM2fOVE0fHh6OpKQk9O/fH0+ePMH333+PNm3aoF69eti/fz++/vprXLlyBXPmzMGQIUNUF79WrFiBzp07IyAgAFOmTEF8fDzmz5+PWrVq4eTJk6qLkFm1x66urpg/fz4+++wztGjRAi1btgQAlC1b9n38zKQlRJQjx48fFwBy/PjxD12U/3lLly4VADp/lpaWsmzZMp388fHxqs9JSUlSunRpqVevnpI2Y8YMASCPHj3KcLkrVqwQExMTiY6OVqUvWLBAAMihQ4cyLXfnzp3F1tZWRERCQkKkfv36IiKSkpIiHh4eMnbsWLl+/boAkKlTpyrTDRo0SAColvvixQspVKiQFCxYUFJSUkREZObMmQJA1qxZo+R79eqVFClSRADIvn37REQkNTVVfHx8JCAgQFJTU1XbqVChQtKwYUMlTbutr1+/num6kWFWrlz5n2oHMqqr2tMJ7f6+dOlSZZrOnTsLABk3bpxqXhUqVBBfX1/lc3R0tACQ8PBwVb4dO3boTdfH1tZWOnfurJPeuXNn8fLy0kkfPXq0pD8VAiAWFhZy5coVJe306dMCQObMmaOkde/eXTw9PSU2NlY1fWhoqDg6OirtlKH1OCPabX706FGZO3eu2NvbK/Nu3bq11K1bV0REvLy8JDAwUJlu48aNAkAmTJigml9ISIhoNBpl/U6dOiUApG/fvqp87du3FwAyevTobK+zvv3g3+6/1ha8q7T7tcj/tROTJk1S8jx9+lSsra1Fo9HIqlWrlPQLFy7o7Jva+fn6+kpSUpKS/v333wsA2bRpk5Lm5eUlAGTHjh2qMhl6bF64cKEAkDNnzqimL1mypOo8ZPz48WJrayuXLl1S5Rs2bJiYmprKzZs3ReT/6ourq6vExcUp+YYPHy4ApFy5cpKcnKykt2vXTiwsLOT169dKGZ2cnKRnz56q5dy/f18cHR1V6Ya2x48ePdLZxpS17MQG7I5ORB+NefPmISoqClFRUVi5ciXq1q2LHj16YP369ap8aZ/bevr0KZ49ewY/Pz+cOHFCSXdycgIAbNq0SdXtK621a9eiRIkSKF68OGJjY5W/evXqAQD27dtncNnbt2+P/fv34/79+9i7dy/u37+fYRfRbdu2oUqVKqoBluzs7NCrVy/ExMTg77//VvJ5enoiJCREyWdjY6Pc5dM6deqU0vX98ePHynq8evUK9evXx8GDBzPcBkQ5kbauav+y0qdPH9VnPz8/XLt2Tfm8du1aODo6omHDhqr66OvrCzs7u2zVx3fVoEED1V30smXLwsHBQSmviGDdunVo1qwZRERV3oCAADx79kxpjwytx4Zo06YNEhISsHXrVrx48QJbt27NtJ0xNTXFgAEDVOlffvklRER588S2bdsAQCdf+rva2Vlnopzq0aOH8n8nJycUK1YMtra2qjFXihUrBicnJ1X7odWrVy9Vj5nPPvsMZmZmyn6uVahQIQQEBKjSDD02t2zZEmZmZli9erWS7+zZs/j777/Rtm1bJW3t2rXw8/NDrly5VPWlQYMGSElJwcGDB1XLb926NRwdHZXPVatWBQB07NhR9Vhe1apVkZSUhDt37gB4++hdXFwc2rVrp1qOqakpqlatqrftzKo9pn8eu6MT0UejSpUqqsGe2rVrhwoVKqBfv34ICgqChYUFAGDr1q2YMGECTp06pXquMm1Xs7Zt2+Knn35Cjx49MGzYMNSvXx8tW7ZESEgITEzeXn+8fPkyzp8/rzz7lN7Dhw8NLnvTpk1hb2+P1atX49SpU6hcuTKKFCmi933cN27cUA6uaZUoUUL5vnTp0rhx4waKFCmi88xqsWLFVJ8vX74MAOjcuXOG5Xv27Bly5cpl8PoQZSZ9Xc2KlZWVTj3LlSuX6lnNy5cv49mzZ3Bzc9M7D219fPbsmeqVgBYWFnB2ds5O8bNUoEABnbS05X306BHi4uKwaNEiLFq0KNPyGlqPDeHq6ooGDRogIiIC8fHxSElJUQX3ad24cQN58uSBvb29Kj1tO6P918TERKfrfvryZWediXJCXzvh6OiIfPny6dQfR0dHvWNF+Pj4qD7b2dnB09NT51icvhs7YPix2cXFBfXr18eaNWswfvx4AG+7opuZmSldt4G3bdpff/1l8DlG+nZHG5Dnz59fb7p2/bXnANobCOk5ODioPhvSHtM/j0E4EX20TExMULduXcyaNQuXL19GqVKlEB0djeDgYNSuXRthYWHw9PSEubk5li5dqhqoxdraGgcPHsS+ffvw66+/YseOHVi9ejXq1auHXbt2wdTUFKmpqShTpgymT5+ud/npD3yZsbS0RMuWLbF8+XJcu3bNqAOYaO9yT506FeXLl9ebx87OzmjlIUrP1NQ0yzypqalwc3NDeHi43u+1J40DBw7E8uXLlXR/f/8s32Wb/gReK/3AcFmVV/7/gGbaOtexY8cML379U89Ptm/fHj179sT9+/fRpEkTpdfPP+1DrjP9N2RU77KqjznxriOhh4aGomvXrjh16hTKly+PNWvWoH79+qqxMVJTU9GwYUMMHTpU7zyKFi2q+pzT9dfWzRUrVsDDw0MnX/rBbQ1pj+mfxyCciD5qb968AQC8fPkSALBu3TpYWVlh586dqvfiLl26VGdaExMT1K9fH/Xr18f06dMxadIkfPPNN9i3b5/S3fT06dOoX79+hifp2dG+fXssWbIEJiYmCA0NzTCfl5cXLl68qJN+4cIF5Xvtv2fPnoWI6Awok5b2DpaDgwMaNGjwzutB9CEULlwYu3fvRs2aNTM9QR46dKhqsMa0PTwyqse5cuVSvb1AS3s3OLu0o4inpKRkWecMrceGatGiBXr37o3ff/9d1R1W33J3796NFy9eqO6G62tnUlNTcfXqVdXd7/Tly846E30oly9fRt26dZXPL1++xL1799C0adMspzX02AwAzZs3R+/evZU6eOnSJQwfPlw1XeHChfHy5ct/vL5ozwHc3Nze27LexzkRZY7PhBPRRys5ORm7du2ChYWF0h3M1NQUGo1GdQcrJiZGZyRzfa/v0N4l1nZhb9OmDe7cuYMff/xRJ29CQgJevXqVrfLWrVsX48ePx9y5c/VejdZq2rQp/vzzTxw5ckRJe/XqFRYtWoSCBQuiZMmSSr67d++qXr8WHx+v0xXU19cXhQsXxrRp05SLFWk9evQoW+tB9CG0adMGKSkpSvfOtN68eaME0SVLlkSDBg2UP19fXyWfra2t3mC7cOHCePbsGf766y8l7d69e9iwYUOOympqaopWrVph3bp1OHv2rM73aeucofXYUHZ2dpg/fz7GjBmDZs2aZZivadOmSElJUb0uCQBmzJgBjUajjLCu/Tf96OrpR27OzjoTfSiLFi1CcnKy8nn+/Pl48+aNsp9nxtBjM/D2efWAgACsWbMGq1atgoWFBZo3b66aX5s2bXDkyBHs3LlTZ1lxcXHKTYZ3FRAQAAcHB0yaNEm17lo5qZs2NjYAoLc9pfeDd8KJ6KOxfft25Yrzw4cPERERgcuXL2PYsGHKM02BgYGYPn06GjdujPbt2+Phw4eYN28eihQpojrBHjduHA4ePIjAwEB4eXnh4cOHCAsLQ758+ZRBVz799FOsWbMGffr0wb59+1CzZk2kpKTgwoULWLNmjfIOUUOZmJhg5MiRWeYbNmyY8pqhAQMGwNnZGcuXL8f169exbt065Zn1nj17Yu7cuejUqROOHz8OT09PrFixQjk4pl3uTz/9hCZNmqBUqVLo2rUr8ubNizt37mDfvn1wcHDAli1bDF4Pog/B398fvXv3xnfffYdTp06hUaNGMDc3x+XLl7F27VrMmjUrw+eftXx9fbF7925Mnz4defLkQaFChVC1alWEhobi66+/RosWLTBgwADl9T1FixbN8WBikydPxr59+1C1alX07NkTJUuWxJMnT3DixAns3r1buRBoaD3OjszGf9Bq1qwZ6tati2+++QYxMTEoV64cdu3ahU2bNmHQoEHK3bPy5cujXbt2CAsLw7Nnz1CjRg3s2bMHV65cyfE6E30oSUlJqF+/Ptq0aYOLFy8iLCwMtWrVQnBwcJbTGnps1mrbti06duyIsLAwBAQE6Dwa8tVXX2Hz5s0ICgpCly5d4Ovri1evXuHMmTOIjIxETExMpq92NJSDgwPmz5+PTz/9FBUrVkRoaChcXV1x8+ZN/Prrr6hZs6bOxbisWFtbo2TJkli9ejWKFi0KZ2dnlC5dGqVLl37n8tJbDMKJ6KPx7bffKv+3srJC8eLFMX/+fPTu3VtJr1evHhYvXozJkydj0KBBKFSoEKZMmYKYmBhVEB4cHIyYmBgsWbIEsbGxcHFxgb+/P8aOHasMamJiYoKNGzdixowZ+Pnnn7FhwwbY2NjA29sbAwcO1Hle631xd3fH4cOH8fXXX2POnDl4/fo1ypYtiy1btqjezWtjY4M9e/agf//+mDNnDmxsbNChQwc0adIEjRs3Vs2zTp06OHLkiHIn/uXLl/Dw8EDVqlVV24/oY7ZgwQL4+vpi4cKFGDFiBMzMzFCwYEF07NgRNWvWzHL66dOno1evXhg5ciQSEhLQuXNnVK1aFblz58aGDRvwxRdfYOjQoShUqBC+++47XL58OcdBuLu7O/7880+MGzcO69evR1hYGHLnzo1SpUphypQpSr7s1OP3ycTEBJs3b8a3336L1atXY+nSpShYsCCmTp2KL7/8UpV3yZIlcHV1RXh4ODZu3Ih69erh119/1RkXw9B1JvpQ5s6di/DwcHz77bdITk5Gu3btMHv2bIO6Vxt6bNYKDg6GtbU1Xrx4oRoVXcvGxgYHDhzApEmTsHbtWvz8889wcHBA0aJFVeci70P79u2RJ08eTJ48GVOnTkViYiLy5s0LPz8/dO3aNUfz/Omnn9C/f38MHjwYSUlJGD16NIPw90gj7zKqAdF/2IkTJ+Dr64vjx4+jYsWKH7o4RPQBhIeHo2PHjmwHiP7j2BZ8WMuWLUPXrl1x9OjRbPVgI3qfshMb8JlwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgREREREf3P6tKlC0SEz4PT/wwG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJGaGZrx58yZiY2P/ybIQ/U85f/48AGDbtm3K/4nov+XQoUMA2A4Q/dexLSCi69evG5xXIyKSVaabN2+iRIkSiI+Pf6eCEf3bmJiYIDU19UMXg4g+ILYDRASwLSAiwNTUFNHR0ahevXqm+Qy6Ex4bG4v4+HisXLkSJUqUeC8FJPpft23bNowaNYr1gug/jO0AEQFsC4jobS/Zjh07wtLSMsu8BndHB4ASJUqgYsWKOS4Y0b+JtrsZ6wXRfxfbASIC2BYQUfZwYDYiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMpIPHoQfPXoUNWrUgK2tLTQaDU6dOmXQdMuWLYNGo0FMTIzBeY8dO2bQvKdOnQpvb2+YmpqifPnyBk2Tk3IRERERERHRf8sHDcKTk5PRunVrPHnyBDNmzMCKFSvg5eWV4/mFhYVh2bJl71SmXbt2YejQoahZsyaWLl2KSZMmvdP8iIwhMTERX3/9NfLkyQNra2tUrVoVUVFR2Z5Pw4YNodFo0K9fP73fL168GCVKlICVlRV8fHwwZ84cvfl2796NunXrwsXFBU5OTqhSpQpWrFihk0+j0ej9mzx5sirfxYsXMXjwYNSoUQNWVlaZXugaPHgwKlasCGdnZ9jY2KBEiRIYM2YMXr58qcp37tw5tG7dGt7e3rCxsYGLiwtq166NLVu26J1vamoq5s+fj/Lly8Pa2hq5c+dGvXr1cPr0aVW+iRMnIjg4GO7u7tBoNBgzZoze+WmtXr0a1atXh62tLZycnFCjRg3s3btX+T4hIQHdu3dH6dKl4ejoCDs7O5QrVw6zZs1CcnKyzvyOHz+OoKAgeHh4wM7ODmXLlsXs2bORkpKSo+2kdeLECQQHByv5S5cujdmzZ6vy7Nq1SymrqakpChYsmOm6X716Fe3bt4ebmxusra3h4+ODb775RiefoduedBmjbZg/fz5at26NAgUKQKPRoEuXLnrnob1Ire/v/v37GS776tWrSr3XdzHd0H3+9evX+O6771CyZEnY2Nggb968aN26Nc6dO6fKt2fPHnTr1g1FixaFjY0NvL290aNHD9y7d09n2Ybu8zExMRmu+6pVqzJc9+TkZJQsWRIajQbTpk3T+f7evXvo1asXChUqBGtraxQuXBhffPEFHj9+rJN3zZo1qFatGpycnJA7d274+/vj119/VeW5cOEChg4divLly8Pe3h6enp4IDAzUu92z0y4XLFhQ77r36dNHlS+n+8h/1bvU7zt37qBNmzZwcnKCg4MDPvnkE1y7dk1vXkOP/UDWxzQAePDgAbp27aq0/RUrVsTatWt15pXRfqPRaODj45NhGX777TclX2xsrM737/scZcOGDQgICECePHlgaWmJfPnyISQkBGfPntWZp6HtUHo9e/aERqNBUFCQzncvX77EoEGDkC9fPlhaWqJEiRKYP3++3vkY2l4aWmcPHjyI4OBg5M+fH1ZWVvDw8EDjxo1x6NAhnWWnpqZiwYIFKF++POzs7ODu7o4mTZrg8OHDqnz79+/PcNv//vvvSr7M2lWNRoOePXtmuk0/tGy9J/x9u3r1Km7cuIEff/wRPXr0yNa0n376KUJDQ1UvQw8LC4OLi0uGJwCG2Lt3L0xMTLB48WJYWFjkeD5ExtSlSxdERkZi0KBB8PHxwbJly9C0aVPs27cPtWrVMmge69evx5EjRzL8fuHChejTpw9atWqFL774AtHR0RgwYADi4+Px9ddfK/k2b96M5s2bo3r16hgzZgw0Gg3WrFmDTp06ITY2FoMHD1bNt2HDhujUqZMqrUKFCqrPR44cwezZs1GyZEmUKFEi0x4zR48ehZ+fH7p27QorKyucPHkSkydPxu7du3Hw4EGYmLy99njjxg28ePECnTt3Rp48eRAfH49169YhODgYCxcuRK9evVTz7datG8LDw9GpUyf069cPr169wsmTJ/Hw4UNVvpEjR8LDwwMVKlTAzp07MywnAIwZMwbjxo1DSEgIunTpguTkZJw9exZ37txR8iQkJODcuXNo2rQpChYsCBMTExw+fBiDBw/GH3/8gYiICCXv8ePHUaNGDfj4+ODrr7+GjY0Ntm/fjoEDB+Lq1auYNWtWtrcT8DbQaNasGSpUqIBRo0bBzs4OV69exe3bt1XrExERgdWrV6NixYrIkydPput+6tQp1KlTB3nz5sWXX36J3Llz4+bNm7h165ZOXkO3PekyRtswZcoUvHjxAlWqVNEbqKY3btw4FCpUSJXm5OSUYf7BgwfDzMwMiYmJOt9lZ5/v0KEDNm/ejJ49e6JixYq4e/cu5s2bh+rVq+PMmTPKTYCvv/4aT548QevWreHj44Nr165h7ty52Lp1K06dOgUPDw9lntnZ5wGgXbt2aNq0qSqtevXqGeafM2cObt68qfe7ly9fonr16nj16hX69u2L/Pnz4/Tp05g7dy727duH48ePK/V4zpw5GDBgAAIDAzF58mS8fv0ay5YtQ1BQENatW4eWLVsCAH766ScsXrwYrVq1Qt++ffHs2TMsXLgQ1apVw44dO9CgQQNl+dlplwGgfPny+PLLL1VpRYsW1Zs3u/vIf1VO6/fLly9Rt25dPHv2DCNGjIC5uTlmzJgBf39/nDp1Crlz51byGnrsBww7pj1//hy1atXCgwcPMHDgQHh4eGDNmjVo06YNwsPD0b59eyXvzJkzdS4M37hxAyNHjkSjRo30rltqair69+8PW1tbvHr1Suf7f+Ic5cyZM8iVKxcGDhwIFxcX3L9/H0uWLEGVKlVw5MgRlCtXTslraDuU1rFjx7Bs2TJYWVnpfJeSkoKAgAAcO3YMn3/+OXx8fLBz50707dsXT58+xYgRI5S82WkvAcPq7KVLl2BiYoI+ffrAw8MDT58+xcqVK1G7dm38+uuvaNy4sZL3q6++wvTp09GxY0f07dsXcXFxWLhwIfz9/XHo0CFUqVJFNe8BAwagcuXKqrQiRYoo/3d1ddV78WTHjh0IDw/PcB/5aIgBjh8/LgDk+PHjhmQ32IEDBwSArF279r3Mr1SpUuLv76+TvnTpUgEgR48ezXIeXbt2FVtb2xyXQbus69ev53ge9L9h5cqV/0i9yK4//vhDAMjUqVOVtISEBClcuLBUr17doHkkJCRIwYIFZdy4cQJAPv/8c9X38fHxkjt3bgkMDFSld+jQQWxtbeXJkydKWsOGDSVPnjzy+vVrJS05OVkKFy4sZcuWVU2vb1n6PH78WJ4/fy4iIlOnTs12HZs2bZoAkCNHjmSa782bN1KuXDkpVqyYKn316tUCQNavX5/lsrTlevTokQCQ0aNH68135MgR0Wg0Mn36dIPWIb1+/foJALl3756S1rNnT7GwsJDHjx+r8tauXVscHByynKe+7fTs2TNxd3eXFi1aSEpKSqbT37lzR5KSkkREJDAwULy8vPTmS0lJkdKlS0vVqlUlPj4+03lmZ9t/CB9LO6CPMdoGEZGYmBhJTU0VERFbW1vp3Lmz3nll51istWPHDrGwsJCRI0fqndbQff727dsCQIYMGaLKt3fvXgGgqocHDhzQ2de15yvffPONKt3Qff769es6v0VWHjx4II6Ojsq2Tz9teHi4AJCtW7eq0r/99lsBICdOnFDSfHx8pHLlysrvJPK2btvZ2UlwcLCSduzYMXnx4oVqfrGxseLq6io1a9ZUpWenXfby8tI5fuiTk33kY2HstuBd6veUKVMEgPz5559K2vnz58XU1FSGDx+upGXn2G/oMe37778XALJnzx4lLSUlRSpXriweHh6SmJiY6fTjx48XAHLo0CG938+fP19y584tAwcOFADy6NEj1ff/xDmKPvfv3xczMzPp3bu3kpaddkgrNTVVqlevLt26ddNbj9asWSMAZPHixar0Vq1aiZWVlTx48EBJy845gqF1Vp9Xr16Ju7u7BAQEKGnJyclibW0tISEhqrzXrl0TADJgwAAlbd++fe8UH9avX18cHBwkISEhR9O/i+zEzB+sO3qXLl3g7+8PAGjdujU0Gg3q1KmDv/76C126dIG3t7fSraFbt246XavSP3tdsGBBnDt3DgcOHFC6IdSpU0c1TWJiIr744gu4urrC1tYWLVq0wKNHj5TvNRoNli5dilevXinzWLZsmdLdQV9Xd0O6mxYsWBBBQUH47bffUKVKFVhZWcHb2xs///yzTt64uDgMGjQI+fPnh6WlJYoUKYIpU6YgNTVVlW/VqlXw9fWFvb09HBwcUKZMGdUVrOTkZIwdOxY+Pj6wsrJC7ty5UatWrRx1Q6SPW2RkJExNTVV3bq2srNC9e3ccOXJE753F9L7//nukpqZiyJAher/ft28fHj9+jL59+6rSP//8c7x69UrVpfH58+fIlSuXqpeKmZkZXFxcYG1trXf+CQkJeP36dYblc3Z2hr29fZbrkRFtF9G4uLhM85mamiJ//vw6+aZPn44qVaqgRYsWSE1N1Xt1Pf2ysjJz5kx4eHhg4MCBEJEMu4FntZy0ZX3+/DmsrKx07hh5enpmuO2zmmdERAQePHiAiRMnwsTEBK9evdJpj7Ty5MkDc3PzLJeza9cunD17FqNHj4a1tTXi4+N1usJpZWfbk5ox2gYA8PLygkajyVbZXrx4keFvrpWcnIyBAwdi4MCBKFy4sN48hu7zL168AAC4u7vr5AOgylu7dm1VTxBtmrOzM86fP69KN3SfT+vVq1dISkrKMt+wYcNQrFgxdOzYUe/3z58/B2DYOj1//hxubm6q38nBwQF2dnaqfL6+vrCzs1PNL3fu3PDz89NZ95y0y0lJSQbXYUP2kf+yd6nfkZGRqFy5suouY/HixVG/fn2sWbNGScvOsd/QY1p0dDRcXV1Rr149Jc3ExARt2rTB/fv3ceDAgUzXOyIiAoUKFUKNGjV0vnvy5AlGjhyJcePGZdhz4p84R9HHzc0NNjY2quNpdtohrRUrVuDs2bOYOHGi3uVER0cDAEJDQ1XpoaGheP36NTZt2qSk5eQcITt1VsvGxgaurq6qdU9OTkZCQoLOuru5ucHExCTD5b948QJv3rwxeNn37t3Dvn370LJlS709Bz4mHywI7927t9JFYsCAAVixYgW++eYbREVF4dq1a+jatSvmzJmD0NBQrFq1Ck2bNoWIZDi/mTNnIl++fChevDhWrFihzC+t/v374/Tp0xg9ejQ+++wzbNmyRfV824oVK+Dn5wdLS0tlHrVr134v63vlyhWEhISgYcOG+OGHH5ArVy506dJF9QxIfHw8/P39sXLlSnTq1AmzZ89GzZo1MXz4cHzxxRdKvqioKLRr1w65cuXClClTMHnyZNSpU0f1/MWYMWMwduxY1K1bF3PnzsU333yDAgUK4MSJE+9lfejjcfLkSRQtWhQODg6qdG23nqy6CN68eROTJ0/GlClTMmwET548CQCoVKmSKt3X1xcmJibK9wBQp04dnDt3DqNGjcKVK1dw9epVjB8/HseOHcPQoUN15r1s2TLY2trC2toaJUuWVHWvzqk3b94gNjYWd+/exa5duzBy5EjY29vrdHUC3p4Qx8bG4urVq5gxYwa2b9+O+vXrK98/f/4cf/75JypXrowRI0Yoz2V7e3urTlaya8+ePahcuTJmz54NV1dX5fnLuXPn6s2flJSE2NhY3Lp1Cxs2bMC0adPg5eWl6ppVp04dPH/+HL1798b58+dx48YNLFiwAOvXr8fw4cNztJ12794NBwcH3LlzB8WKFYOdnR0cHBzw2WefZfukJO08AcDS0hKVKlWCra0tbGxsEBoaiidPnij5/qlt/19hjLYhJ+rWrQsHBwfY2NggODgYly9f1ptv5syZePr0KUaOHJnhvAzd5wsXLox8+fLhhx9+wJYtW3D79m38+eef6NOnDwoVKqRzApvey5cv8fLlS7i4uORspf+/sWPHws7ODlZWVqhcuTJ27dqlN9+ff/6J5cuXY+bMmRle4NBeLBg4cCB+//133L59G9u2bcPEiRPRvHlzFC9eXMlbp04d7NixA3PmzEFMTAwuXLiAzz//HM+ePcPAgQOzLPf9+/ffed337t0LGxsb2NnZoWDBgjpdX9MydB/5L8tp/U5NTcVff/2lczzXTnv16lUlWMzOsd/QY1piYqLe9sTGxgbA2y7Tma3z+fPnVV3W0xo1ahQ8PDzQu3fvDOfxT56jxMXF4dGjRzhz5gx69OiB58+fq84nstsOvXjxAl9//TVGjBihegwmrcTERJiamuo8Qqtve2b3HCE7dfb58+eIjY3FhQsXMGLECJw9e1a17toxC5YtW4bw8HDcvHlTufGaK1cunUcAAaBr165wcHCAlZUV6tata9AA26tWrUJqaio6dOiQZd4P7n3fWs8Ofd0N9HVN/OWXXwSAHDx4UEnT1+07q+7oDRo0UHXFGjx4sJiamkpcXJyS1rlzZ53u6NpuZEuXLtWZN9J1N9VXLi8vL53yP3z4UCwtLeXLL79U0saPHy+2trZy6dIl1TKGDRsmpqamcvPmTRERGThwoDg4OMibN290yqNVrly5HHcjIcN8LN1QS5UqJfXq1dNJP3funACQBQsWZDp9SEiI1KhRQ/kMPd2vPv/8czE1NdU7vaurq4SGhiqfX758KW3atBGNRiMABIDY2NjIxo0bdaatUaOGzJw5UzZt2iTz58+X0qVLCwAJCwvLsLyGdEc/cuSIsmwAUqxYMdm3b5/evL1791bymZiYSEhIiKqL3YkTJwSA5M6dW9zd3SUsLEzCw8OlSpUqotFoZPv27Xrnm1l39CdPnijztLOzk6lTp8rq1aulcePGGf5m2nZQ+1epUiX566+/VHnevHkj/fr1E3NzcyWfqampzJ8/P8fbqWzZsmJjYyM2NjbSv39/WbdunfTv318AqH739DLrmhscHKysf4cOHSQyMlJGjRolZmZmUqNGDaWdzum2N6aPpR3QxxhtQ3qZdUdfvXq1dOnSRZYvXy4bNmyQkSNHio2Njbi4uCjHN6179+6Jvb29LFy4UEQy7qacnX3+jz/+kMKFC6v2eV9fX9UjHRnRdoFN24U2vcz2+Rs3bkijRo1k/vz5snnzZpk5c6YUKFBATExMdLqTp6amSpUqVaRdu3YiknlX9p9++kmcnJxU69S5c2dJTk5W5Xvw4IHUr19flc/FxUUOHz6c5bofPHhQNBqNjBo1KsM8WbXLzZo1kylTpsjGjRtl8eLF4ufnJwBk6NChqnzZ2Uc+NsZuC3Jav7XHpnHjxul8N2/ePAEgFy5cEBHDj/3ZOab1799fTExMJCYmRjW/0NBQASD9+vXLcJ2//PJLASB///23znenT58WU1NT2blzp4iIjB49Wm939H/yHKVYsWLKPO3s7GTkyJE6j7Zkpx0aMmSIFCpUSOk6r6+L+A8//CAAJDo6WpU+bNgwASBBQUFKWnbaS0PrrFZAQIAyTwsLC+ndu7dOd/DLly9LxYoVVevu7e2t7G9ahw4dklatWsnixYtl06ZN8t1330nu3LnFyspK9ZiNPr6+vuLp6Znl43P/lOzEzB9dEJ5WQkKCPHr0SDkAzZw5U/kuJ0H4mjVrVOnr168XAHL69Gkl7Z8KwkuWLKkzbdmyZaVFixaqz40bN5ZHjx6p/nbv3i0AZOXKlSLytmExNTXN9ATU399fChYsqBPQ0/vzsZx8e3t7S5MmTXTSr169KgBkxowZGU67d+9e0Wg0qufC9J1od+vWTaytrfXOI3/+/PLJJ58on5OTk2XkyJHSunVr+eWXX2TlypVSu3ZtsbOzy/KZ7MTERCldurQ4OTll+KywIUH4s2fPJCoqSjZu3ChDhw6VihUrypYtW/TmPX/+vERFRcny5cslMDBQWrRoIffv31e+P3jwoHKw+P3335X0Fy9eiIuLi85zklqZBeE3b95U5rlq1SolPSUlRUqWLCn58uXTmeb+/fsSFRUla9eulT59+kj16tX1bs8ZM2ZIUFCQLF++XFavXi3NmzcXMzMz2bBhQ462k7e3twCQPn36qNK1Fy8yamMyC0jq1asnAKRx48aq9O+++04ASFRUlIjkfNsb08fSDuhjjLYhvcyCcH2io6NFo9GonpsUEenUqZOUK1dOOZHK7FlhQ/f5S5cuSatWrWTYsGGyceNGmTZtmuTOnVtq1aqV6bODBw4cEDMzM2nTpk2m65LZPq/P48ePxd3dXWcMiiVLloi1tbUSdGYWhG/fvl0aNWokM2fOlA0bNsgXX3whZmZmqgv8Im/rTN++faVz586ydu1aWbJkiZQpU0Y8PDzk8uXLGZbxwYMHki9fPvH29tZ5Vjyt7I7VkZqaKgEBAWJmZia3bt3KNG9G+8jHxthtQU7rt/b4M2XKFJ3vFi9eLADk5MmTImL4sT87x7TTp0+Lubm5VKlSRQ4dOiRXrlyRSZMmiaWlpQCQ7t27611eSkqK5M2bVypUqKD3e39/f1XAmVEQ/k+eoxw+fFh27NghYWFhUrlyZfnyyy+VMSO0DG2HLl68KObm5hIZGamk6QvC7927J46OjuLj4yO7du2S69evy8KFC8XBwUEASP369VX5s3OOkFZWdfbkyZOya9cuWbx4sdSuXVu6du2q02bcv39fPv30U/n8889l/fr1EhYWJgUKFJDixYvr/E7pXb58WaytrVXPmad38eJFASCDBw/OdF7/pP/pIPzx48cyYMAAcXNzU10pASBjx45V8uUkCE97Epd2+fv371fS/qkgPP3JpsjbBqNOnTrKZ2tra511TvunHbDhwYMHUqJECQEgefPmla5du+oE5AcOHFCujpcuXVqGDBmiuthA7+5jOfnO6dXw5ORkKV26tHTq1EmVru9EOzt3wnv37q06eRYRSUpKEh8fH6lSpUqW67NgwQK9V3W1cjIwW3h4uJiYmMipU6eyzNuwYUPVAEZHjx4VAFKoUCGdvF27dhVzc3Odu04imQfh2u/Mzc11erSMHTtWAMiNGzcyLefEiRPFzs5OdfX8u+++Ew8PD50DX506dSRPnjx6y5mWvu1UqlQpASAHDhxQ5dUOVLV8+XK988osIAkMDNQ77Y0bN1RtfU63vTF9LO2APsZoG9LLbhAuIlKtWjUpXLiw8lk7wNPevXuVtIyCcEP3+bi4OHF3d5dp06ap8u3fvz/TO1vnz58XZ2dnKV++vDIIWUayG4SL/N/dKu1JrXYgxG+//VbJk1EQ/ttvv4mpqanONhkzZoxoNBo5d+6ckta4cWNVgCLy9nzL2dk5w4sLL1++lMqVK4ujo6OcOXMm0/XISbu8Y8cOASArVqzIMm/6feRj9F++E57dY9ratWsld+7cyvmth4eHzJ8/XwDIwIED9S5PO3hZ+josIrJq1SoxNzeXixcvKmkZBeH/9DmK1pMnT8Td3V11QSw77VDjxo11YpqMBks7cOCAFChQQNmeDg4Osnz5cgGguknyrucIhtbZxMREKVWqlLRq1UpJ0x5X0vd0uHTpkpibm2d4hz2t0NBQsbCwyLAnsHZQymPHjmU5r3/K/8TAbBlp06YNfvzxR/Tp0wfr16/Hrl27sGPHDgDIcDAgQ5mamupNl0yeNQeQ4fNY2RkwxJBlp6amomHDhoiKitL716pVKwBvBzE4deoUNm/ejODgYOzbtw9NmjRB586dlXnVrl0bV69exZIlS1C6dGn89NNPqFixIn766SeDy0z/Gzw9PfW+FkibltFrc37++WdcvHgRvXv3RkxMjPIHvH0OKSYmBvHx8coyUlJSdF4JlZSUhMePHyvLSEpKwuLFixEYGKga2Mjc3BxNmjTBsWPHshyQKH/+/ACgejb4XWlfv5PZ+3i1QkJCcPToUVy6dAnA/22/9IOJAG/rYnJycrYHLXF2dlYGTEzfNri5uQEAnj59mmU5X758qRp0JSwsDPXq1dMZWCk4OBh3797N8B2+Wvq2U0brb2g59TF0nv/Etv8vMUbb8D7kz59fVd+HDh0KPz8/FCpUSFm29l2/9+7dU72yy9B9ft26dXjw4AGCg4NV+fz9/eHg4KD3nba3bt1Co0aN4OjoiG3btr3T4JAZSd/eTZs2DUlJSWjbtq2y7tpXAT59+hQxMTFKG7pw4UK4u7vrPK8bHBwMEVHevXvt2jXs2LFDZ92dnZ1Rq1YtveuelJSEli1b4q+//sKmTZtQunTp97viyF5bn34foZzXb2dnZ1haWho0raHH/uwe00JCQnD37l38+eefOHLkCG7cuAFvb28AGb+2Ljw8HCYmJmjXrp3Od1999RVat24NCwsLpd5oBwW7desW7t69q5TbWOcouXLlQr169RAeHq6kGdoO7d27Fzt27MDAgQNVbfCbN2+QkJCAmJgYZWBG4O05/7Vr13Dy5En89ttvuHPnDqpVqwZAvT3f9RzB0HW3sLBAcHAw1q9fj4SEBABv3yd+9uxZnXX38fFBiRIl9LZD+paf2UBxERERKFasGHx9fbOc18fgg74nPL2nT59iz549GDt2LL799lsl3dABObI7OquhcuXKBUB3ZOUbN2681+UULlwYL1++VL2HMyMWFhZo1qwZmjVrhtTUVPTt2xcLFy7EqFGjlIGanJ2d0bVrV3Tt2hUvX75E7dq1MWbMmGy/k50+buXLl8e+ffvw/Plz1QAtf/zxh/K9Pjdv3kRycjJq1qyp893PP/+Mn3/+GRs2bEDz5s2VeRw7dkz1jttjx44hNTVV+f7x48d48+aN3gtUycnJSE1NzfLi1bVr1wC8ff/j+5KYmIjU1FQ8e/Ysy7zaA4Y2b548eeDh4aF6z6nW3bt3YWVlle2TcxMTE5QvXx5Hjx5FUlKSakAV7clCVuufvpwA8ODBgwy3PYAsRxjVt518fX0RFRWlDMyW3XLq4+vrix9//FFnm6af5z+x7f9LjNE2vA/Xrl1T7Uc3b97EjRs3dN4TDbw9WXR0dFSOx4bu8w8ePACge/FcRJCSkqJTNx4/foxGjRohMTERe/bsUUYvft/St3c3b97E06dPUapUKZ28kyZNwqRJk3Dy5EmUL1/+ndddmzf9uqempqJTp07Ys2cP1qxZo7zJ5n3LTluffh+hnNdvExMTlClTRu8gV3/88Qe8vb2VdtXQY39OjmkWFhaq0dm1A3bqOwdOTEzEunXrUKdOHb0XF27duoWIiAi9g6ZVrFgR5cqVw6lTp4x+jpKQkKBzjAayboe0Fxq1F8bTunPnDgoVKoQZM2Zg0KBBSrqpqanqN9e3Pd/1HCG76y4iePHiBaytrbPdDmW0fCsrK52LCMDbfffKlSsYN25clvP5WHxUd8K1V8/S35meOXOmQdPb2tpm+QqinHBwcICLiwsOHjyoSg8LC3uvy2nTpg2OHDmCnTt36nwXFxen7KDpX9dmYmKCsmXLAnjbUOnLY2dnhyJFiijf079HSEgIUlJSsGjRIiUtMTERS5cuRdWqVZUrlzdv3sSFCxeUPKGhodiwYYPOHwA0bdoUGzZsQNWqVQEA9erVg7OzM+bPn69a9vz582FjY4PAwEAAb694Ozk5YcOGDaqryS9fvsSWLVtQvHhxZVTUtK8H1Hrx4gVmzpwJFxeXHF3JjIuLUw4maWl7gKS9Y5T+yj7w9kDw888/K6OgarVt2xa3bt1SveIvNjYWmzZtQr169XReZ2SItm3bIiUlBcuXL1fSXr9+jfDwcJQsWVI50YiNjdXbW0ffOhUtWhRRUVGq+p+SkoI1a9bA3t5eec1TdrZTmzZtAACLFy/WyWtmZqbzKkhDfPLJJ7C0tMTSpUtVPZy0y2/YsKGS9k9s+/8KY7QN2aGvzm/btg3Hjx9H48aNlbRFixbpLLt///4A3t4pTntnydB9Xns3KH1vmM2bN+PVq1eoUKGCkvbq1Ss0bdoUd+7cwbZt2+Dj45PtdTVk3e/cuYMlS5agbNmySpA/YMAAnXVfuHAhgLevdt2wYYNycaJo0aJ48OAB9u/fr5rvL7/8AgDKOhUpUgQmJiZYvXq1qi25ffs2oqOjVesOvH2TzOrVqxEWFqY3CMiuJ0+e6Jx4JycnY/LkybCwsEDdunWVdEP3Ecp5/dZOe/ToUVUgfvHiRezduxetW7dW0gw99gOGH9P0uXz5MhYsWICgoCC9d8K3bduGuLi4DEe81tdetW3bFsDbC4czZswA8M+do+g7n4iJicGePXt0jtFA1u1QvXr19K6Tq6srKlWqhA0bNqBZs2Z6t4W27FOmTEHZsmVVQbih7WV26qy+dY+Li8O6deuQP39+pSdERut+4sQJXLx4UdUO6dv2p0+fxubNm9GoUSO9x33tBZiMRs7/GH1Ud8IdHBxQu3ZtfP/990hOTkbevHmxa9cuXL9+3aDpfX19MX/+fEyYMAFFihSBm5ub6j2E76JHjx6YPHkyevTogUqVKuHgwYNKd9X35auvvsLmzZsRFBSELl26wNfXF69evcKZM2cQGRmJmJgYuLi4oEePHnjy5Anq1auHfPny4caNG5gzZw7Kly+PEiVKAABKliyJOnXqwNfXF87Ozjh27BgiIyNVr2Sjf4eqVauidevWGD58OB4+fIgiRYpg+fLliImJUQVOnTp1woEDB5STsOLFi6teYZNWoUKFVHe5rK2tMX78eHz++edo3bo1AgICEB0djZUrV2LixIlwdnYG8PZC2pAhQzBy5EhUq1YNnTp1QkpKChYvXozbt29j5cqVyjznzZuHjRs3olmzZihQoADu3buHJUuW4ObNm1ixYoXqSvqzZ88wZ84cAFC6LM2dOxdOTk5wcnJS9uv9+/djwIABCAkJgY+PD5KSkhAdHY3169ejUqVKqvft9u7dG8+fP0ft2rWRN29e3L9/H+Hh4bhw4QJ++OEH1ZXW4cOHY82aNWjVqhW++OILODo6YsGCBUhOTsakSZNU227FihW4ceOG0l334MGDmDBhAgDg008/hZeXl7L8n376CZ9//jkuXbqEAgUKKNNu2bJFmd/KlSuxYMECNG/eHN7e3njx4gV27tyJqKgoNGvWTNXGDRs2DB07dkTVqlXRq1cvWFtb45dffsHx48cxYcIE5X3G2dlOFSpUQLdu3bBkyRK8efMG/v7+2L9/P9auXYvhw4erTqz++usvbN68GcDb1zI+e/ZMWfdy5copJw0eHh745ptv8O2336Jx48Zo3rw5Tp8+jR9//BHt2rVT3R3JzrYnNWO0DQCwZcsWnD59GsDbE7W//vpL+d2Dg4OVi8Q1atRAhQoVUKlSJTg6OuLEiRNYsmQJ8ufPr7yyFAAaNWqks1ztBXZ/f3/VSa2h+3yzZs1QqlQpjBs3Djdu3EC1atVw5coVzJ07F56enujevbsyzw4dOuDPP/9Et27dcP78edX7se3s7FTrb+g+P3ToUFy9ehX169dHnjx5EBMTg4ULF+LVq1eq1/5UrFgRFStWVK27totoqVKlVMvu168fli5dimbNmqF///7w8vLCgQMH8Msvv6Bhw4bKhRJXV1d069YNP/30E+rXr4+WLVvixYsXCAsLQ0JCgurVRDNnzkRYWBiqV68OGxsbVZsNAC1atICtrS0Aw9vlzZs3Y8KECQgJCUGhQoXw5MkTRERE4OzZs5g0aZLq1UuG7iOU8/oNAH379sWPP/6IwMBADBkyBObm5pg+fTrc3d3x5ZdfKvkMPfYDhh/TgLfnqK1bt0aBAgVw/fp1zJ8/H87OzliwYIHedQ0PD4elpaXyWGZ6+nrlaF/R1qRJE+X1ev/UOUqZMmVQv359lC9fHrly5cLly5exePFiJXDVMrQdKlCgAAoUKKCzToMGDYK7u7vO+vr7+6N69eooUqQI7t+/j0WLFuHly5fYunWrKmA1tL3MTp1t0qQJ8uXLh6pVq8LNzQ03b97E0qVLcffuXaxevVrJ5+vri4YNG2L58uV4/vw5GjVqhHv37mHOnDmwtrZW3dVv27YtrK2tUaNGDbi5ueHvv//GokWLYGNjo9qeWikpKVi9ejWqVaumXEj4n/C+HzLPDn0Ds92+fVtatGghTk5O4ujoKK1bt5a7d+8aNADa/fv3JTAwUOzt7QWAMqBBRgO6aJef9pU8+gZmE3n76rTu3buLo6Oj2NvbS5s2beThw4cGD8ymbyAFf39/nUEXXrx4IcOHD5ciRYqIhYWFuLi4SI0aNWTatGnKCIuRkZHSqFEjcXNzEwsLCylQoID07t1bNUDThAkTpEqVKuLk5CTW1tZSvHhxmThxos4ojZRzH9OATAkJCTJkyBDx8PAQS0tLqVy5suzYsUOVx9/fXwyp8shk8KVFixZJsWLFxMLCQgoXLiwzZsxQvfZPS/saKe3+V7VqVdUInyIiu3btkoYNG4qHh4eYm5uLk5OTNGrUSO8rgLQDE+n7SzsQ0pUrV6RTp07i7e0t1tbWYmVlJaVKlZLRo0fLy5cvVfP85ZdfpEGDBuLu7i5mZmaSK1cuadCggWzatEnvul+9elVatGghDg4OYm1tLfXq1VONHK2l3c76/tK//uvBgwfSuXNncXZ2FktLS6latarO73b06FFp3bq1FChQQCwtLcXW1lYqVqwo06dP1zuIyo4dO8Tf319cXFzEwsJCypQpozNAT3a2k8jbQWvGjBkjXl5eYm5uLkWKFNE78q62/dP3l36wrtTUVJkzZ44ULVpUzM3NJX/+/DJy5Ei9bZSh2/5D+JjaAX2M0TZ07tw5w9897YCm33zzjZQvX14cHR3F3NxcChQoIJ999pnqbQQZyWx0dEP2eZG3AyUNHjxYihYtKpaWluLi4iKhoaFy7do1VT7ta0Wzam/SliurfT4iIkJq164trq6uYmZmJi4uLtKiRQuD9pvMRke/cOGChISESP78+cXc3Fy8vLxkyJAh8urVK1W+5ORkmTNnjpQvX17s7OzEzs5O6tatqxr8TiTz3zL9uY2h7fKxY8ekWbNmkjdvXrGwsBA7OzupVauWzhtrRN5tH/nQPkRb8C71+9atWxISEiIODg5iZ2cnQUFBGY6Ub+ix35BjmsjbAbby588vFhYWkidPHunTp488ePBA77KfPXsmVlZW0rJlS0M2iSKjgdlE3v85yujRo6VSpUqSK1cuMTMzkzx58khoaKjOa0RFDG+H9Mkonhg8eLB4e3uLpaWluLq6Svv27eXq1at652FIe5mdOjt37lypVauWuLi4iJmZmbi6ukqzZs1Ur2XWio+Pl3HjxknJkiXF2tpaHB0dJSgoSBmNX2vWrFlSpUoVcXZ2FjMzM/H09JSOHTtmuH9qB4ybPXt2RpvOaLITM2tEshiVDG+7Cvj6+uL48eM6V2iJ/qvCw8PRsWNH1gui/zC2A0QEsC0gouzFzHyYjoiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERmJWXYyb9u2DefPn/+nykL0P+XQoUMAWC+I/svYDhARwLaAiIDr168bnFcjIpJVpiNHjsDPzw8pKSnvVDCifxsTExOkpqZ+6GIQ0QfEdoCIALYFRASYmpoiOjoa1atXzzSfQXfCLS0tkZKSgpUrV6JEiRLvpYBE/+u2bduGUaNGsV4Q/YexHSAigG0BEQHnz59Hx44dYWlpmWXebHVHL1GiBCpWrJjjghH9m2i7m7FeEP13sR0gIoBtARFlDwdmIyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRD+LxMTEwONRoNly5Z96KIQERERERFROgzCif6lEhMT8fXXXyNPnjywtrZG1apVERUVle35NGzYEBqNBv369VOlJyQkoHv37ihdujQcHR1hZ2eHcuXKYdasWUhOTlblrVOnDjQajd4/c3NznWVu3rwZFStWhJWVFQoUKIDRo0fjzZs3qjwHDx5EcHAw8ufPDysrK3h4eKBx48Y4dOiQzvx27dqllNXU1BQFCxbMcH1TU1Px/fffo1ChQrCyskLZsmXxyy+/6ORZtmyZsnxbW1uULl0aEyZMwOvXr3O8ndLr2bMnNBoNgoKCdL57+fIlBg0ahHz58sHS0hIlSpTA/PnzdfJld9trXb16FVZWVtBoNDh27Jjqu3v37mHYsGGoW7cu7O3todFosH///gznlZSUhEmTJqF48eKwsrKCu7s7AgMDcfv2bSVPly5dMiynRqPBnTt3Mt1W9M/XeQAZ/j6TJ0/Wybt7927UrVsXLi4ucHJyQpUqVbBixQqdfPPnz0fr1q1RoEABaDQadOnSxaByZlY/Vq9ejY4dO8LHxwcajQZ16tTRO4/9+/dnuE6///57hsuOi4uDm5sbNBoNIiMjdb4/fvw4GjduDAcHB9jb26NRo0Y4deqUKo/2onlGfz179lTlz8nvm1U5szPPw4cPo1atWrCxsYGHhwcGDBiAly9f6uS7fPkyQkNDkS9fPtjY2KB48eIYN24c4uPjVfkmTZqEatWqwdXVFVZWVvDx8cGgQYPw6NEjVb67d++iY8eOKFasGOzt7ZV9afny5RCRTNf/v+hd2oE7d+6gTZs2cHJygoODAz755BNcu3ZNlefWrVsYO3YsqlSpgly5csHFxQV16tTB7t27deaXnWNFdo7TEydORHBwMNzd3aHRaDBmzJh3WifA8LZt/fr1aNu2Lby9vWFjY4NixYrhyy+/RFxcnN7lv3jxAkOHDkWhQoVgaWmJvHnzIiQkRFUf9uzZg27duqFo0aKwsbGBt7c3evTogXv37umdp6F1EQBOnDiB4OBgODs7w8bGBqVLl8bs2bNVeTI6T2jcuLHO/Axp24Ds/Z7A23OO9u3bw83NDdbW1vDx8cE333yjypPReULx4sUznffHKlvvCSei/x1dunRBZGQkBg0aBB8fHyxbtgxNmzbFvn37UKtWLYPmsX79ehw5ckTvdwkJCTh37hyaNm2KggULwsTEBIcPH8bgwYPxxx9/ICIiQsn7zTffoEePHqrpX716hT59+qBRo0aq9O3bt6N58+aoU6cO5syZgzNnzmDChAl4+PChKsi8dOkSTExM0KdPH3h4eODp06dYuXIlateujV9//VV18IiIiMDq1atRsWJF5MmTJ9N1/uabbzB58mT07NkTlStXxqZNm9C+fXtoNBqEhoYCAOLj49G1a1dUq1YNffr0gZubG44cOYLRo0djz5492Lt3LzQaTba3U1rHjh3DsmXLYGVlpfNdSkoKAgICcOzYMXz++efw8fHBzp070bdvXzx9+hQjRozI0bZPa/DgwTAzM0NiYqLOdxcvXsSUKVPg4+ODMmXKZLiPAEBycjICAwNx+PBh9OzZE2XLlsXTp0/xxx9/4NmzZ8iXLx8AoHfv3mjQoIFqWhFBnz59ULBgQeTNmzfDZdBb/3Sd12rYsCE6deqkSqtQoYLq8+bNm9G8eXNUr14dY8aMgUajwZo1a9CpUyfExsZi8ODBSt4pU6bgxYsXqFKlSoYnnellVj+At4H98ePHUblyZTx+/DjL+Q0YMACVK1dWpRUpUiTD/N9++61OUKl14sQJ1KpVC/nz58fo0aORmpqKsLAw+Pv7488//0SxYsUAAK6urnovSuzYsQPh4eE69TMnv29m5czOPE+dOoX69eujRIkSmD59Om7fvo1p06bh8uXL2L59u5Lv1q1bqFKlChwdHdGvXz84OzsrbePx48exadMmJe/x48dRvnx5hIaGwt7eHufPn8ePP/6IX3/9FadOnYKtrS0AIDY2Frdv30ZISAgKFCiA5ORkREVFoUuXLrh48SImTZqU4fr9F+W0HXj58iXq1q2LZ8+eYcSIETA3N8eMGTPg7++PU6dOIXfu3ACATZs2YcqUKWjevDk6d+6MN2/e4Oeff0bDhg2xZMkSdO3aVZlndo4V2TlOjxw5Eh4eHqhQoQJ27tz5zuukZUjb1qtXL+TJkwcdO3ZEgQIFcObMGcydOxfbtm3DiRMnYG1treR99uwZ/P39cfv2bfTq1QtFihTBo0ePEB0djcTERNjY2AAAvv76azx58gStW7eGj48Prl27hrlz52Lr1q04deoUPDw8lHkaWheBt4Fws2bNUKFCBYwaNQp2dna4evWq6gK4Vr58+fDdd9+p0tL/Doa2bUD2fs9Tp06hTp06yJs3L7788kvkzp0bN2/exK1bt3TyWlpa4qefflKlOTo6Zjr/j5YY4Pjx4wJAjh8/bkh2SichIUFSUlJyNG1KSookJCQYnP/69esCQJYuXZqj5ZHhVq5c+dHWiz/++EMAyNSpU5W0hIQEKVy4sFSvXt2geSQkJEjBggVl3LhxAkA+//xzg6br16+fAJB79+5lmm/FihUCQMLDw1XpJUuWlHLlyklycrKS9s0334hGo5Hz589nOs9Xr16Ju7u7BAQEqNLv3LkjSUlJIiISGBgoXl5eeqe/ffu2mJubq9Y1NTVV/Pz8JF++fPLmzRsREUlMTJRDhw7pTD927FgBIFFRUZmWUyTz7ZSamirVq1eXbt26iZeXlwQGBqq+X7NmjQCQxYsXq9JbtWolVlZW8uDBg0yXndG219qxY4dYWFjIyJEjBYAcPXpU9f3z58/l8ePHIiKydu1aASD79u3TO68pU6aIubm5/PHHH5mWSZ/o6GgBIBMnTsz2tMbysbQDxqrzhrYFDRs2lDx58sjr16+VtOTkZClcuLCULVtWlTcmJkZSU1NFRMTW1lY6d+6c6byzqh8iIjdv3lSOu6VKlRJ/f3+989q3b58AkLVr12a5TlpnzpwRMzMzZTuln7Zp06aSK1cuiY2NVdLu3r0rdnZ20rJlyyznX79+fXFwcFAd+3Py+2ZVzuzMs0mTJuLp6SnPnj1T0n788UcBIDt37lTSJk6cKADk7Nmzquk7deokAOTJkyeZrntkZKQAkF9++SXTfCIiQUFBYmtrq7TLH9rH0Ba8SzswZcoUASB//vmnknb+/HkxNTWV4cOHK2lnz56VR48eqaZ9/fq1FC9eXPLly6dKz86xwtDjtMjbc10RkUePHgkAGT169Dutk4jhbZu+8i9fvlwAyI8//qhK/+yzz8TJyUmuXbuW6TwPHDigEyccOHBAAMg333yjSje0Lj579kzc3d2lRYsWWcYg/v7+UqpUqUzziGSvbTP090xJSZHSpUtL1apVJT4+PtPld+7cWWxtbbMs54eUnZj5g3ZHv3HjBvr27YtixYrB2toauXPnRuvWrRETE6PKt2zZMmg0Ghw6dAhffPEFXF1dYWtrixYtWuh0Wzp27BgCAgLg4uICa2trFCpUCN26dVO+r1ixIlq2bKmapkyZMtBoNPjrr7+UtNWrV0Oj0eD8+fNK2p07d9CtWze4u7vD0tISpUqVwpIlS1Tz0nZtW7VqFUaOHIm8efPCxsYGz58/N2ibaLsAhoeHo1SpUrC0tMSOHTsMXn5GLly4gJCQEDg7O8PKygqVKlXC5s2bVdtNo9Fg+fLlOtPu3LkTGo0GW7duBfDP/G7A2zug/v7+sLe3h4ODAypXrqzcJRw9ejTMzc31TterVy84OTnpdAP+L4uMjISpqSl69eqlpFlZWaF79+44cuSI3quL6X3//fdITU3FkCFDsrVsbZejjLpmaUVERMDW1haffPKJkvb333/j77//Rq9evWBm9n8ddfr27QsR0dudMi0bGxu4urrqLDtPnjyZdr3W2rRpE5KTk9G3b18lTaPR4LPPPsPt27eVq/gWFhaoUaOGzvQtWrQAAFW7kZHMttOKFStw9uxZTJw4Ue+00dHRAKDcmdcKDQ3F69evVXeb9NG37bWSk5MxcOBADBw4EIULF9Y7vb29PZydnTNdBvC22/6sWbPQokULVKlSBW/evMn0rpy+cmo0GrRv397gaf6rjF3nExISMm1znz9/jly5csHS0lJJMzMzU47NaXl5eSk9RwyRVf0AgPz588PEJHunOC9evNB57EWfgQMHokWLFvDz89P7fXR0NBo0aKC6y+bp6Ql/f39s3bo1w26jwNvuu/v27UPLli1Vd/lz8vtmVU5D5/n8+XNERUWhY8eOcHBwUPJ26tQJdnZ2WLNmjZKmPddxd3dXLcvT0xMmJiawsLDIcN0Bw48f2rzx8fFISkrKMu9/xbu0A5GRkahcubKqR0jx4sVRv3591W9cqlQpuLi4qKa1tLRE06ZNcfv2bbx48UJJN/RYARh+nAaQZddmLUPXKa2s2jZ9j7boO/bHxcVh6dKl6NWrFwoVKoSkpCS9PcsAoHbt2jrtVe3ateHs7KyaZ3bqYkREBB48eICJEyfCxMQEr169QmpqaobrBQBv3rzJtH3KTttm6O+5a9cunD17FqNHj4a1tTXi4+ORkpKS6TQpKSkGx1Ufsw8ahB89ehSHDx9GaGgoZs+ejT59+mDPnj2oU6eO3hO1/v374/Tp0xg9ejQ+++wzbNmyRfXM2sOHD9GoUSPExMRg2LBhmDNnDjp06KB6rsvPzw+//fab8vnJkyc4d+4cTExMlBNb4O2O5urqihIlSgAAHjx4gGrVqmH37t3o168fZs2ahSJFiqB79+6YOXOmTlnHjx+PX3/9FUOGDMGkSZOyPPCktXfvXgwePBht27bFrFmzULBgwWwvP61z586hWrVqOH/+PIYNG4YffvgBtra2aN68OTZs2AAAqFSpEry9vfU2SqtXr0auXLkQEBAA4P3/bsDbgD0wMBBPnjzB8OHDMXnyZJQvX165APHpp5/izZs3WL16tWq6pKQkREZGolWrVhl2S/wvOnnyJIoWLapqpAGgSpUqAKD3+Z20bt68icmTJ2PKlCk6J8zpJSUlITY2Frdu3cKGDRswbdo0eHl5ZdqV89GjR4iKikLz5s2VLofacgNv98e08uTJg3z58infp/X8+XPExsbiwoULGDFiBM6ePYv69etnWuaMnDx5Era2tkq919JuN33LT+v+/fsAoHOCAhi+nV68eIGvv/4aI0aMUHVBSysxMRGmpqY67Yq2a9vx48czLGNG215r5syZePr0KUaOHJnpuhri77//xt27d1G2bFn06tULtra2sLW1RdmyZbFv375Mp01OTsaaNWtQo0YNg0+4/suMWeeXLVsGW1tbWFtbo2TJknofqahTpw7OnTuHUaNG4cqVK7h69SrGjx+PY8eOYejQodlbuTQMqR850bVrVzg4OMDKygp169bVGQdBa+3atTh8+DC+//77DOeVmJiodxva2NggKSkJZ8+ezXDaVatWITU1FR06dFClZ/f3NaSchs7zzJkzePPmjU67bGFhgfLly6vaRW2A0r17d5w6dQq3bt3C6tWrMX/+fAwYMECnzRERxMbG4v79+4iOjsaAAQNgamqqN9BJSEhAbGwsYmJisHz5cixduhTVq1fPcn/9L8lpO5Camoq//vpL5zfWTnv16lVVcK3P/fv3YWNjoxyHPrScrJMhbZs++o79v/32G16/fo0iRYogJCQENjY2sLa2Rs2aNbNsj4G3Xelfvnypmmd26uLu3bvh4OCAO3fuoFixYrCzs4ODgwM+++wzvRcZLl26BFtbW9jb28PDwwOjRo3SGbfmXdq2jGjHErC0tESlSpVga2sLGxsbhIaG4smTJzr54+Pj4eDgAEdHRzg7O+Pzzz/P9MLBR+1931rPDn3dDo4cOSIA5Oeff1bSli5dKgCkQYMGSpc1EZHBgweLqampxMXFiYjIhg0b9HadTEvbHebvv/8WEZHNmzeLpaWlBAcHS9u2bZV8ZcuWlRYtWiifu3fvLp6enqouGCIioaGh4ujoqKyLtmubt7d3lt0q9AEgJiYmcu7cOVW6ocvX1x29fv36UqZMGVW3wNTUVKlRo4b4+PgoacOHDxdzc3NVd7HExERxcnKSbt26KWnv+3eLi4sTe3t7qVq1qk7X+7TTVa9eXapWrar6fv369Zl2b/onfQxdzzJSqlQpqVevnk76uXPnBIAsWLAg0+lDQkKkRo0aymdk0k3rl19+EQDKX6VKleSvv/7KdP5z5swRALJt2zZV+tSpUwWA3Lx5U2eaypUrS7Vq1XTSAwIClGVbWFhI7969M32EI7NuUYGBgeLt7a2T/urVKwEgw4YNy3S9GjRoIA4ODvL06VOd7wzdTkOGDJFChQop9VVfd9sffvhBAEh0dLQqfdiwYQJAgoKCMixjRtteROTevXtib28vCxcuFJH/q8OGtKn66qC2fubOnVt8fHxk6dKlsnTpUvHx8RELCws5ffp0hvPdsmWLAJCwsLAM83wMPpZ2wFh1vkaNGjJz5kzZtGmTzJ8/X0qXLq33d3r58qW0adNGNBqNss/b2NjIxo0bMy1HVt3RDakf6WXWHf3QoUPSqlUrWbx4sWzatEm+++47yZ07t1hZWcmJEydUeePj46VAgQJKN9aMurKXKVNGihYtquomnZiYKAUKFBAAEhkZmWFZfX19xdPTU6f7aHZ+X0PLaeg8tXX84MGDOnlbt24tHh4eqrTx48eLtbW1qr1L36VW6969e6p8+fLlk9WrV+vN+91336ny1q9fX++x4kP5GNqCnLYD2m7d48aN0/lu3rx5AkAuXLiQ4XIvX74sVlZW8umnn2aYJ6vu6Gll1R09fbn1dUfP7joZ2rbp0717dzE1NZVLly4padOnT1eOf1WqVJHw8HAJCwsTd3d3yZUrl9y9ezfTeY4fP14AyJ49e5S07NTFsmXLio2NjdjY2Ej//v1l3bp10r9/fwEgoaGhqmm7desmY8aMkXXr1snPP/8swcHBAkDatGmjypfTti2z31O7rNy5c0uHDh0kMjJSRo0aJWZmZlKjRg1VHDBs2DD5+uuvZfXq1fLLL79I586dBYDUrFlT9Qjjh/Q/0x097dWU5ORkPH78GEWKFIGTkxNOnDihk79Xr16qLmt+fn5ISUnBjRs3AABOTk4AgK1bt2Y46rC2W9bBgwcBvL3jXblyZTRs2FC5Ex4XF4ezZ88qeUUE69atQ7NmzZSrttq/gIAAPHv2TKe8nTt3zvHVWX9/f5QsWVL5nJPlaz158gR79+5FmzZt8OLFC2W6x48fIyAgAJcvX1ZGHW7bti2Sk5Oxfv16Zfpdu3YhLi4Obdu2VdLe9+8WFRWFFy9eYNiwYTp3s9NO16lTJ/zxxx+4evWqkhYeHo78+fPD398/8436H5OQkKDqBqql3b4JCQkZTrtv3z6sW7cuyx4WWnXr1kVUVBTWrl2LPn36wNzcHK9evcp0moiICLi6uqJhw4Y65QaQYdn1lXvy5MnYtWsXFi9ejGrVqiEpKcmgLqX6vMt2mzRpEnbv3o3JkycrbVFahmynS5cuYdasWZg6darecmi1b98ejo6O6NatG6KiohATE4NFixYhLCwsy3JmtO2Bt4PDaEdlfR+0V6dfvHiBPXv2oEuXLujSpQt2794NEcn0Ll1ERATMzc3Rpk2b91KWfztj1flDhw5h4MCBCA4ORp8+fXD8+HGULl0aI0aMUC3D0tISRYsWRUhICH755ResXLkSlSpVQseOHTMddTwzhtaP7KhRowYiIyPRrVs3BAcHY9iwYfj999+h0WgwfPhwVd7JkycjOTlZNfChPn379sWlS5fQvXt3/P333zh79iw6deqkDDqX0W9x6dIlHD9+HKGhoTpdU7Pz+xpaTkPnmd12uWDBgqhduzYWLVqEdevWoVu3bpg0aRLmzp2rM72zszOioqKwZcsWjBs3Di4uLhne1WrXrh2ioqIQERGhPKKS2X79X5TTdiCr3zizaePj49G6dWtYW1vrfUvCh5LddTK0bUsvIiICixcvxpdffgkfHx8lXbsfazQa7NmzB+3bt8dnn32GjRs34unTp5g3b16G8zx48CDGjh2LNm3aoF69egavU9pyvnz5EvHx8ejUqRNmz56Nli1bYvbs2ejduzdWrVqFy5cvK3kXL16M0aNHo2XLlvj000+xadMm9OzZE2vWrFG11zlt2zKj3U6VK1fGypUr0apVK4wbNw7jx4/H4cOHsWfPHiXvd999h8mTJ6NNmzYIDQ3FsmXLMHHiRBw6dCjLxxU/Rh80CE9ISMC3336L/Pnzw9LSEi4uLsrznM+ePdPJX6BAAdXnXLlyAQCePn0K4G3w2qpVK4wdOxYuLi745JNPsHTpUtUzGO7u7vDx8VEC7ujoaPj5+aF27dq4e/curl27hkOHDiE1NVUJwh89eoS4uDgsWrQIrq6uqj/tKJAPHz5Ula1QoUI53i7pp83J8rWuXLkCEcGoUaN0ph09erRq2nLlyqF48eKqLt+rV6+Gi4uLTiPwPn83bVBdunTpTLdL27ZtYWlpifDwcABvR53cunUrOnTokK3nCf8LrK2t9T57pO2ClNEFojdv3mDAgAH49NNPdUYKzoi7uzsaNGiAkJAQzJ8/H0FBQWjYsKHSPSu9a9eu4ciRI2jbtq3que+05cqo7PrKXb58eTRs2FAJSP/880+DX3GUXk632+rVqzFy5Eh0794dn332md48hmyngQMHokaNGmjVqlWm5fTw8MDmzZuRmJiIRo0aoVChQvjqq68wZ84cAICdnZ3e6TLb9r///jtWrFiBGTNmZPtZ2oxot1fNmjWRP39+Jb1AgQKoVasWDh8+rHe6ly9fYtOmTQgICNAZvZb0M2adT8vCwgL9+vVDXFyc6jGIfv36YcuWLVi1ahVCQ0PRoUMH7N69G56enhg4cGC2lwMYXj/eVZEiRfDJJ59g3759yrOJMTExmDp1KiZOnJhh/dLq06cPRowYgYiICJQqVQplypTB1atXlW74GU2vPbal74oOGP77Zqechs4zO+3yqlWr0KtXL/z000/o2bMnWrZsicWLF6Nz5874+uuvdUaqt7CwQIMGDRAUFIRRo0Zh3rx56N69uzIGTVpeXl5o0KAB2rVrh/DwcHh7e6NBgwYMxNPIaTuQ1W+c0bQpKSkIDQ3F33//jcjIyCxHwTamnK6TVkZtW1rR0dHo3r07AgICdMao0M67WbNmqrpYrVo1FCpUKMPj34ULF9CiRQuULl1aZxTw7NRF7f/btWunyqe9gJXVWzC+/PJLAFC9ei6nbVtmsipnRttJa/DgwTAxMdH7iryP3QcNwvv374+JEyeiTZs2WLNmDXbt2oWoqCjkzp1b7+ABpqameucj//89kdr3YB45cgT9+vVTBjLz9fVVXVmtVasWoqOjkZCQgOPHj8PPzw+lS5eGk5MToqOjER0dDTs7O+W1BNqydOzYEVFRUXr/atasqSrTuzyjlH7anCw//bRDhgzJcNq0z6S2bdsW+/btQ2xsLBITE7F582a0atVKdcL+vn83Q+XKlQtBQUHKiUpkZCQSExPRsWPHbM3nv8DT01Pvq360aRkdKH/++WdcvHgRvXv3RkxMjPIHvL2bGRMTk+XAWiEhIUoQpY/2GSt9J5qenp6qcqYve1YHeAsLCwQHB2P9+vU5OjHz9PTE/fv3dfbNzLZbVFQUOnXqhMDAQCxYsMDgZaXfTnv37sWOHTswcOBA1bZ/8+YNEhISEBMToxqIpHbt2rh27RpOnjyJ3377DXfu3EG1atUAAEWLFtW7zMy2/dChQ+Hn54dChQopy46NjVXW/+bNmwavm5Z2e6UfpAkA3NzclAtx6W3cuBHx8fF6y0n6fcg6r73Aon1+LykpCYsXL0ZgYKDqgo65uTmaNGmCY8eOZXswrezWj3eVP39+JCUlKb1Vvv32W+TNmxd16tRRlq29gPbo0SPExMSojn8TJ07EgwcPEB0djb/++gtHjx5Vvs+sfhYrVgy+vr463xn6+2annIbOMzvtclhYGCpUqKC8elArODgY8fHxWY6rUaNGDXh6eirH+cyEhITg1q1bSs9Gynk74OzsDEtLy2xP27NnT2zduhXLli1T3az5GOR0ndJK37aldfr0aQQHB6N06dKIjIzUubCdk+PfrVu30KhRIzg6OmLbtm2wt7dXfZ+dupjR8t3c3AAgw+OvVkbrnpO2LTPvWk7tANH6fqOP3Qd9T3hkZCQ6d+6MH374QUl7/fq1QaNiZqZatWqoVq0aJk6ciIiICHTo0AGrVq1Sulj6+flh6dKlWLVqFVJSUlCjRg2YmJgowfn58+dRo0YNJXh0dXXF/2PvvMOiOr7//17aLr0LKB1REBVEQSQKIoodG9jF3o1gYi+xxN4Vgz1WVFSCvWGJomLDimBFQMTeAEHant8f/PZ+uWxh1yRqPpnX8+yjzD13yr13zrQzZ/T19VFaWip1ju3X4K+k7+joCKCs86PMvd26dcPMmTMRGxsLCwsL5OTkSHlg/rvfm8QDc3JyskJnXkCZSXqHDh1w9epVREdHo169enBzc/uidP+X8fDwwJkzZ5CTk8Nz0HL58mXuuiwyMzNRXFwsc1Jn69at2Lp1K+Li4tCxY0e5aUsGv7KsIoCyjqaTkxM3YKyYb6DMW7/EkQwAZGdnc+dsVkZBQQGICLm5uSpPhnl4eGDDhg1ITU3lbQmR99wuX76MTp06oUGDBti9e7dUI1xZPoH/e06SQW7F0xuAspMRHBwcsGzZMkRERHDh6urqvDxJZoLl1XVFzz4zMxMZGRkyrXiCg4NhaGioch2vU6cONDU1uS0v5cnOzoa5ubnM+6Kjo6Gnp4fg4GCV0vsv8y3rfFpaGgBw7/Pt27coKSmR6eG2uLgYYrG4Uu+3svIJqFY//gppaWkQiUTcyk5mZiYePXrEtanlkZym8P79e95WFGNjY965zCdPnoS1tTVcXFyk4rh8+TIePXqEWbNmycyPsu9XlXwqG2ft2rWhoaGBa9eu8baHFBUV4ebNm7ywly9fctZu5ZFsEVRmq9Dnz5/lth/lqayt+S/ypXpATU0NderUkemQ8PLly3B0dJQaEI4bNw6bNm3C8uXLpVYxvwe+pEwVqajbJDx+/BitWrVClSpVcOTIEZkrwJLJNHntX0U98PbtWwQFBaGwsBCnTp3iBtzlUaUu1q9fH/Hx8ZxjtvJpyyqTsmUHVNNtlVG/fn2sX79e6jkpm0/JVtvK5L5HvulKuLq6utSKU2RkpMqNs4T3799LxSdROOVNNyRm5gsWLEDdunW5Q96bNGmCU6dO4dq1a7wjPdTV1dGlSxfExsbK9Pwn69isv5O/kn6VKlXQtGlTrF27VubMWcV7XV1dUadOHcTExCAmJgZWVlbw8/OTys/f+d6CgoKgr6+PefPmSXlsrJhO69atYWZmhgULFuDs2bNsFVwOISEhKC0txbp167iwwsJCbNq0CQ0bNuRmODMzM3Hv3j1Opnv37oiLi5P6AUCbNm0QFxeHhg0bAgDevHkj05pBYj4lyyPpjRs3kJqaKvfIKTc3N7i4uGDdunW872n16tUQCAQICQnhwmRtwfjw4QNiY2NhY2PDzaKqQocOHaCpqcntrQbKvsE1a9agWrVqvGPJUlNT0bZtW9jb2+PQoUNyB/zKPqdmzZrJfPbm5uZo0KAB4uLi0L59e7l5f/36NafTZA3CK3v269atk0r7xx9/BAAsXrxYqZWpiujr66NNmza4ePEi7ztLTU3FxYsXZe5Lf/36NU6ePIlOnTp9N152/w18jTovq63Jzc3F8uXLYWZmxnU6q1SpAiMjI8TFxfFWvPPy8nDw4EG4uLioPEH2V+uHPGSV6datWzhw4ACCgoK4lfzZs2dLpf3rr78CKLMiiYuLk3nagISYmBhcvXoVERERMrd7SKxU5NVPZd+vKvlUNk5DQ0M0b94c27dv53mT3rZtG/Ly8hAaGsqF1ahRAzdu3MCDBw94+d+5cyfU1NRQt25dAMCnT59kWljExsbi/fv3vPZDXh9n48aNEAgE8PT0lHn9v8iX6gHJvVevXuUNWu/fv4/Tp0/z3jEALFq0CIsXL8bkyZO/eHvJ10DZMimr24AyT+gS3XD8+HG5g7+aNWvC3d0d+/fv56zKgDJfS0+fPuW1f58+fUKbNm3w7NkzHDlyhLe3vDyq1EXJgHzjxo28ODZs2AANDQ3uBIKcnBwp83YiwuzZswGAOxlJHpXptsro0KEDhEIhNm3axLMmkvSRJM/p8+fPMj30//rrryAitGrVSuW0vzXfdCW8Xbt22LZtGwwNDVGrVi0kJibi5MmTX7wHcMuWLYiKikKnTp3g5OSE3NxcrF+/HgYGBmjTpg0nV716dVhaWuL+/ftcJxMoM++cMGECAEidqzl//nycOXMGDRs2xODBg1GrVi28e/cO169fx8mTJ/9xM4i/kv5vv/2Gxo0bo06dOhg8eDAcHR3x8uVLJCYmIisrC7du3eLJd+vWDb/88gt3tmTFSvV3vzcDAwMsW7YMgwYNgpeXF3r27AljY2PcunUL+fn5vLPLNTU10b17d6xatQrq6urf5ezr90DDhg0RGhqKSZMm4dWrV6hevTq2bNmC9PR0nkIOCwvD2bNnuUGii4uL3JlMBwcH3mrY9u3bsWbNGnTs2BGOjo7Izc3F8ePHER8fj/bt28s0TVO051HCokWLEBwcjKCgIHTv3h3JyclYtWoVBg0axDs6rHXr1rC2tkbDhg1RpUoVZGZmYtOmTcjOzpY6yu727ds4cOAAgDI/CR8/fuQaGHd3d67zbm1tjYiICCxatAjFxcXw8vLCvn37kJCQgOjoaM46Jjc3Fy1btsT79+8xbtw4HD58mJeek5MTGjVqpNJzsrW1lfKfAAARERGwsLCQWon09/dHo0aNUL16dbx48QLr1q1DXl4eDh06JLMhrOzZBwUFSYVJVr79/f2lJlUkz+/u3bsAyjoBkuMfyx9vNnfuXJw6dQrNmjXD6NGjAQArV66EiYmJTMdRMTExKCkpYaboKvI16vxvv/2Gffv2oX379rC1tcXz58/x+++/IzMzE9u2beOOzFNXV8fYsWMxdepU+Pj4ICwsDKWlpdi4cSOysrKwfft2XjoHDx7k2qHi4mLcvn2b+76Cg4NRt25dlevHuXPnODPl169f49OnT1ycfn5+3ORyt27doK2tDV9fX1SpUgUpKSlYt24ddHR0eE6myq/6SJCsent5efHSP3fuHGbNmoWgoCCYmpri0qVL2LRpE1q1aiVzwFJaWoqYmBj4+PhwlmEVUfb9qpJPZeMEykxQfX194e/vjyFDhiArKwtLlixBUFAQr/M7btw4HD16FE2aNMGoUaNgamqKQ4cO4ejRoxg0aBBnevrw4UM0b94c3bp1g4uLC9TU1HDt2jVs374d9vb2vOckcbzUqlUr2Nra4t27d4iNjcXVq1fx448/VmpB91/iS/UAUGYpsX79erRt2xZjx46FpqYmli5dCgsLC25/MADExcVh/PjxcHZ2hqurq1R9btGiBc+0WNm2Qtl2WhJHRkYGN5Fz7tw5TrZPnz6ws7NTqUzK6jYAaNWqFdLS0jB+/HicP3+ed+yxhYUFb3C9bNkytGjRAo0bN8bQoUPx8eNHLF26FDVq1OD5j+nVqxeuXLmCAQMGIDU1lXc2uJ6eHq/eKlsX69WrhwEDBuD3339HSUkJ/P398eeff2LPnj2YNGkSVxevX7+OHj16oEePHqhevToKCgoQFxeHCxcuYMiQIbxJLlV0m7Lv09LSElOmTMEvv/yCVq1aoWPHjrh16xbWr1+PHj16cL5KXrx4gXr16qFHjx5cm3X8+HEcOXIErVq1QocOHfCv4+92t64K79+/p/79+5OZmRnp6elRy5Yt6d69e2RnZ8c7okTeMTmSYzckxx1cv36devToQba2tiQUCqlKlSrUrl07unbtmlTaoaGhBIB3FEZRURHp6OiQlpaWzCOOXr58SSNHjiQbGxvS1NQkS0tLCgwMpHXr1knlqeJRIMoCBUdBKZO+rCPKiIgeP35MYWFhZGlpSZqamlStWjVq166dzOMEHj58yB0Bcv78eanrf/d7k3DgwAHy9fUlbW1tMjAwIG9vb9q5c6dU+leuXCEAFBQUJPM5fS2+h+NIFFFQUEBjx44lS0tLEgqF5OXlRceOHePJ+Pv7kzJqQNZ3efXqVQoNDeXqm66uLnl6etLSpUtlHhVRWlpK1apVI09Pz0rTi4uLIw8PDxIKhWRtbU1Tp06loqIinsyqVauocePGZGZmRhoaGmRubk7t27eXeXSH5FuU9at4HFJpaSnNnTuX7OzsSEtLi9zc3Gj79u08GUk9UyZOVZ9TReQdwTRmzBhydHQkoVBI5ubm1LNnT3r8+LHMOFR59uVRdESZovJXJCkpiZo3b066urqkr69PHTp04B3lUh4fHx+qUqUK7wiU75nvSQ/803X+xIkT1KJFC64dMTIyoqCgIN4ROuWJjo4mb29vMjIyIm1tbWrYsKHMNkdyzIysX8W2rCLy6sf06dPlxln+OKMVK1aQt7c3mZiYkIaGBllZWVHv3r3p4cOHlT4jee39o0ePKCgoiMzMzEgoFJKLiwvNmzePCgsLZcZz7NgxAkArV65UmJ4y71eVfKoaZ0JCAvn6+pJIJCJzc3MaOXIk5eTkSMldvnyZWrduzX0nNWrUoDlz5vD03evXr2nIkCHk4uJCurq6pKWlRc7OzhQREUGvX7/mxXfixAlq164dVa1alTQ1NUlfX59++OEH2rRpE+/4om/N96IL/ooeePr0KYWEhJCBgQHp6elRu3btpOqCorolq2+nbFuhSjstyb8y6StTJlV0m6LyyDoKMT4+nnx8fEgkEpGJiQn16dOHnj9/zpOxs7OTG6eso72UrYtFRUU0Y8YMsrOzI01NTapevTotW7aMJ5OWlkahoaFkb29PIpGIdHR0qH79+rRmzRqp+qWKblPlfYrFYoqMjKQaNWqQpqYm2djYSPX73r9/T71796bq1auTjo4OCYVCcnNzo7lz50r1D78lqoyZBUSVe8e6fv066tevj6SkJGb2w/jm3Lp1Cx4eHti6dSv69OnzzfIRHR2N3r17s3rBYPyHYXqAwWAATBcwGAzVxszfdE84g/ElrF+/Hnp6ejId9DAYDAaDwWAwGAzG98w33RP+X0LemckStLW1OQdxDNkcPHiQ2683atQohU5wGAwGg8FgMBgMBuN7hA3CvxKyjhooT9++fbF58+avk5l/KT/++CNevnyJNm3aYObMmd86OwwGg8FgMBgMBoOhMmwQ/pWIj49XeF3ipZAhn/T09G+dBQaDwWAwGAwGg8H4S7BB+FdC1rm9DAaDwWAwGAwGg8H4b8EcszEYDAaDwWAwGAwGg/GVYINwBoPBYDAYDAaDwWAwvhJsEM5gMBgMBoPBYDAYDMZXgg3CGQwGg8FgMBgMBoPB+Eqo5JjtyJEjSE1N/afywmD8q7hw4QIAVi8YjP8yTA8wGAyA6QIGgwE8efJEaVkBEVFlQomJiWjSpAlKS0v/UsYYjP811NTUIBaLv3U2GAzGN4TpAQaDATBdwGAwAHV1dSQkJKBRo0YK5ZRaCRcKhSgtLcX27dvh6ur6t2SQwfi3c+TIEUybNo3VCwbjPwzTAwwGA2C6gMFgAKmpqejduzeEQmGlsiqZo7u6usLT0/OLM8Zg/C8hMTdj9YLB+O/C9ACDwQCYLmAwGKrBHLMxGAwGg8FgMBgMBoPxlWCDcAaDwWAwGAwGg8FgML4SbBDOYDAYDAaDwWAwGAzGV4INwhkMBoPBYDAYDAaDwfhKsEE4g8FgMBgMBoPBYDAYXwk2CGcoxebNmyEQCJCens6FNW3aFE2bNv1meWIwGAwGg8FgMBiMfxv/yUH4jh07sHz58n88nZSUFMyYMYM3cGUwviWFhYWYMGECqlatCm1tbTRs2BDx8fGV3jdjxgwIBAKpn0gk4skVFBRg4MCBqF27NgwNDaGnpwd3d3esWLECxcXFUvF++PABQ4YMgbm5OXR1dREQEIDr169LyeXl5SEiIgLW1tYQCoVwdXXF6tWrpeQkk0Wyfi9evODk3r59i0WLFsHPzw/m5uYwMjKCj48PYmJipOK8e/cuQkND4ejoCB0dHZiZmcHPzw8HDx6U+ax2794NHx8fGBkZwdTUFP7+/jh8+LCUnFgsxsKFC+Hg4ACRSIS6deti586dMuNctWoVXF1dIRQKUa1aNfz000/49OkTTyY9PV1u2Xft2sVLd/PmzQgODoaNjQ10dXVRu3ZtzJ49G58/f+bFqer7TEpKQrt27WBpaQk9PT3UrVsXK1euRGlpKU8uJiYGvXv3hrOzMwQCAZvM+wf5p+s8ALnf3fz586Vknz17hq5du8LIyAgGBgbo0KED0tLSeDKqfnflGTx4MAQCAdq1ayfz+oEDB+Dp6QmRSARbW1tMnz4dJSUlPJnnz59j4sSJCAgIgL6+PgQCAf7880+Z8c2dOxc+Pj4wNzeHSCSCs7MzIiIi8Pr1a5nyjx8/Rs+ePVGlShVoa2vD2dkZU6ZMkZJLTU1Fq1atoKenBxMTE/Tp00cqTmXrfEWKi4tRq1YtCAQCLF68WK4cAERHR0MgEEBPT48XrooeAYDVq1cjNDQUtra2EAgE6Nevn9w0ldUjjC+v34BydRFQ7d3Fx8ejcePG0NHRgbGxMUJCQmT2gceMGQNPT0+YmJhAR0cHrq6umDFjBvLy8qRkHz58iO7du8Pa2ho6OjpwcXHBrFmzkJ+fz8koqgsCgQCDBw/mZPv166dQ9tmzZ5xs06ZNZcq0atWKl8c///xTbnyXLl3iyZ44cYLTb+rq6rC3t5f7PJXpJ6haFwFg48aNcHV15XRWZGSk3DxIaNGiBQQCAUaNGvWX4jx58iQCAgJgZmYGIyMjeHt7Y9u2bTyZp0+fYubMmfD29oaxsTHMzMzQtGlTnDx5Uio+ee9IIBBAU1Oz0nJ9S1Q6J/x/hR07diA5ORkRERH/aDopKSmYOXMmmjZtqrCSMRhfi379+mHv3r2IiIiAs7MzNm/ejDZt2uDMmTNo3LhxpfevXr2a1xlTV1fnXS8oKMDdu3fRpk0b2NvbQ01NDRcvXsSYMWNw+fJl7Nixg5MVi8Vo27Ytbt26hXHjxsHMzAxRUVFo2rQpkpKS4OzsDAAoLS1Fy5Ytce3aNYwcORLOzs44fvw4RowYgffv32Py5MlS+Zw1axYcHBx4YUZGRtz/ExMTMWXKFLRp0wZTp06FhoYGYmNj0b17d67eSsjIyEBubi769u2LqlWrIj8/H7GxsQgODsbatWsxZMgQTjYyMhKjR49G27ZtMX/+fHz+/BmbN29Gu3btEBsbi86dO3OyU6ZMwfz58zF48GB4eXlh//796NmzJwQCAbp3787JTZgwAQsXLkRISAjCw8ORkpKCyMhI3L17F8ePH5cqe48ePdCmTRteWKNGjbj/5+fno3///vDx8cGwYcNQpUoVJCYmYvr06Th16hROnz4NgUCg8vtMSkqCr68vnJ2dMWHCBOjo6ODo0aMIDw/H48ePsWLFCk529erVSEpKgpeXF96+fStVBsbfxz9d5yW0aNECYWFhvLB69erx/s7Ly0NAQAA+fvyIyZMnQ1NTE8uWLYO/vz9u3rwJU1NTAKp9d+W5du0aNm/eLHOiAACOHj2Kjh07omnTpoiMjMSdO3cwe/ZsvHr1ijepd//+fSxYsADOzs6oU6cOEhMT5T6fpKQkeHh4oHv37tDX10dqairWr1+Pw4cP4+bNm9DV1eVkb968iaZNm6JatWr4+eefYWpqiszMTDx9+pQXZ1ZWFvz8/GBoaIi5c+ciLy8Pixcvxp07d3DlyhVoaWnx5Cur8xWJjIxEZmam3OsS8vLyMH78eF4ZJKiiRwBgwYIFyM3Nhbe3N54/fy43TVX0COPL67eydRFQ/t0dOnQIHTp0gKenJ+bPn4+cnBysWLECjRs3xo0bN2Bubs7JXr16FU2aNEH//v0hEolw48YNzJ8/HydPnsS5c+egpla2Rvj06VN4e3vD0NAQo0aNgomJCfedJSUlYf/+/QAAc3NzqYEcABw7dgzR0dEICgriwoYOHYrmzZvz5IgIw4YNg729PapVq8a7Zm1tjXnz5vHCqlatKvMZjB49Gl5eXryw6tWr8/7esWMHYmJi4OnpKTceCcr0E1Sti2vXrsWwYcPQpUsX/PTTT0hISMDo0aORn5+PCRMmyMzHH3/8oVAPKhvngQMH0LFjRzRq1Iib6N29ezfCwsLw5s0bjBkzBgCwf/9+LFiwAB07dkTfvn1RUlKCrVu3okWLFvj999/Rv39/3jMaNGgQLz+fPn3CsGHDeO/9u4SUICkpiQBQUlKSMuLfPW3btiU7O7t/PJ09e/YQADpz5szfEl9eXt7fEs+XsGnTJgJAT5484cL8/f3J39//m+XpW7N9+/Z/Vb24fPkyAaBFixZxYQUFBeTk5ESNGjVSeO/06dMJAL1+/fqL0h41ahQBoOfPn3NhMTExBID27NnDhb169YqMjIyoR48eXNju3bsJAG3cuJEXZ5cuXUgkEtHLly+5MMl3evXqVYX5SUtLo/T0dF6YWCymZs2akVAorLSulZSUkLu7O9WsWZMX7uzsTF5eXiQWi7mwjx8/kp6eHgUHB3NhWVlZpKmpSSNHjuSl36RJE7K2tqaSkhIiIsrOziYNDQ3q06cPL53IyEgCQAcOHODCnjx5IvV+ZVFYWEgXLlyQCp85cyYBoPj4eIX3E8l+n4MHDyYtLS16+/YtT9bPz48MDAx4YZmZmVRaWkpERG5ubv9qPfI964GvVecB8L5leSxYsIAA0JUrV7iw1NRUUldXp0mTJlV6v6zvToJYLKZGjRrRgAEDyM7Ojtq2bSslU6tWLXJ3d6fi4mIubMqUKSQQCCg1NZULy8nJ4b7jL2nH9+7dSwBo586dXFhpaSnVrl2bGjZsSPn5+QrvHz58OGlra1NGRgYXFh8fTwBo7dq1XJiydb48L1++JENDQ5o1a1al906YMIFq1qxJvXr1Il1dXd41VfVIeno6pxd1dXWpb9++MtNURY98b3xtXfBX6rcqdVHZd1erVi2qXr06FRYWcmE3b94kNTU1+umnnyotz+LFiwkAJSYmcmFz5swhAJScnMyTDQsLIwD07t07hXEGBgaSgYEBFRQUKJRLSEggADRnzhxeuL+/P7m5uVWa9zNnzkj1Z+Tx7NkzKioqIiLF4xFl+wmq1MX8/HwyNTWV0o+SOi7reRYUFJC9vT2nMyrqelXibNGiBVWtWpU+f/7MhRUXF5OTkxPVrVuXC0tOTpZqez5//kwuLi5kbW0t/bAqsG3bNgJA0dHRlcr+3agyZv4uzNGfPXuGgQMHomrVqhAKhXBwcMDw4cNRVFQEAEhLS0NoaChntuLj4yNl3ikxBdm9ezfmzJkDa2triEQiBAYG4tGjR5xc06ZNcfjwYWRkZHDmCuVXqQsLCzF9+nRUr14dQqEQNjY2GD9+PAoLCzmZvn37QiQSITU1lZeHli1bwtjYGNnZ2di8eTNCQ0MBAAEBAVxa8kzaKiKZIUpJSUHPnj1hbGzMm9Xcvn076tevD21tbZiYmKB79+5Ss+kAcPnyZbRp0wbGxsbQ1dVF3bp1eTPJt2/fRr9+/eDo6AiRSARLS0sMGDCArU79D7J3716oq6vzVm5FIhEGDhyIxMREmd9PRYgIOTk5ICKV0pbUsQ8fPvDyY2FhwVsdNjc3R9euXbF//36uziUkJAAAb3VY8vfnz5+5mfCK5ObmyjVfdHBwgJ2dHS9MIBCgY8eOKCwslGmSVx51dXXY2NjwygMAOTk5qFKlCm/W2cDAAHp6etDW1ubC9u/fj+LiYowYMYKX/vDhw5GVlcXNOCcmJqKkpERm2QHINTn99OkTpz8roqWlBV9fX6nwTp06AYCUXpOFrPeZk5MDkUjEszgAACsrK17ZAcDGxoZb6WD8c3ztOl9QUCDX/FGSHy8vL95KkYuLCwIDA7F79+5K45f13UnYtm0bkpOTMWfOHJn3pqSkICUlBUOGDIGGxv8ZAY4YMQJEhL1793Jh+vr6MDExqTQ/quTzxIkTSE5OxvTp06GtrY38/Hy5+ik2Nhbt2rWDra0tF9a8eXPUqFFD7nNSVOfLM3HiRNSsWRO9e/dWKPfw4UMsW7YMS5cu5T0vCarqETs7O55elIcqeuS/zl+p36rURWXe3bt375CSkoJOnTrxLDXc3d3h6uqqcHuEBHntCgBYWFjwZK2srKCmpiZlFVKe58+f48yZM+jcubNc6xgJO3bsgEAgQM+ePWVeLykpkWkqL4vc3FypLS7lqVq1qlJm0sr2E1Spi2fOnMHbt295cQLAyJEj8enTJ5lb5xYuXAixWIyxY8fKzKcqcebk5MDY2BhCoZAL09DQgJmZGa9+u7m5wczMjBefUChEmzZtkJWVhdzcXJl5kbBjxw7o6uqiQ4cOCuW+Nd+8F5SdnQ1vb2/s2rUL3bp1w8qVK9GnTx+cPXsW+fn5ePnyJXx9fTnz0zlz5uDz588IDg5GXFycVHzz589HXFwcxo4di0mTJuHSpUvo1asXd33KlCnw8PCAmZkZtm3bhm3btnH7w8ViMYKDg7F48WK0b98ekZGR6NixI5YtW4Zu3bpxcaxYsQLm5ubo27cv14iuXbsWJ06cQGRkJKpWrQo/Pz+MHj0aADB58mQuLVdXV5WeT2hoKPLz8zF37lxuT8ucOXMQFhYGZ2dnLF26FBERETh16hT8/Px4yis+Ph5+fn5ISUlBeHg4lixZgoCAABw6dIgnk5aWhv79+yMyMhLdu3fHrl270KZNG5UHWozvmxs3bqBGjRowMDDghXt7ewMoM5WsDEdHRxgaGkJfXx+9e/fGy5cvZcoVFRXhzZs3ePr0KeLi4rB48WLY2dnxzLJu3LgBT09PqcGYt7c38vPz8eDBAwBlE2Pq6upSja2Ojg6AMvPFigQEBMDAwAA6OjoIDg7Gw4cPKy0bAG7feEXlD5R1ct+8eYPHjx9j2bJlOHr0KAIDA3kyTZs2xbFjxxAZGYn09HTcu3cPI0eOxMePHxEeHs4ru66urpQ+kLyLGzducGUHINX5VFT2mTNnQk9PDyKRCF5eXjhx4sRfLrsy77Np06bIycnB0KFDkZqaioyMDKxZswZ//PEHJk2apFQeGH8vX7POb968Gbq6utDW1katWrWkTMbFYjFu376NBg0aSN3r7e2Nx48fS3WslPnugLJO74QJEzB58mRYWlrKzJ+kTlVMv2rVqrC2tuaufwlEhDdv3uDFixecGaa6ujrP14FkL6NQKESDBg2gq6sLHR0ddO/eHe/evePknj17hlevXsl9TrLyqWydv3LlCrZs2YLly5dXOqiKiIhAQECAlJl7ZSjSI8rA9IjyfGn9/pK6WBny2iqgrL3Kzs7m+WUByga2b968QXZ2Nk6cOIGpU6dCX1+fyz8Arg4NHDgQN2/exNOnTxETE4PVq1dj9OjRMrdKSNi1axfEYjFvDCCL4uJi7N69G76+vjK3jj548AC6urrQ19eHpaUlpk2bJtc3Rf/+/WFgYACRSISAgABcu3ZNYdqKULafIA9ZdVGeHqxfvz7U1NSk4szMzMT8+fOxYMECuZNgqsTZtGlT3L17F9OmTcOjR4/w+PFj/Prrr7h27RrGjx+vsDySMuno6HB9IFm8fv0a8fHx6Nixo8Lv47vg715aV5WwsDBSU1OTaT4qFospIiKCAFBCQgIXnpubSw4ODmRvb8+ZNEpMQVxdXXmmMCtWrCAAdOfOHS5MnvnHtm3bSE1NjZcWEdGaNWsIAM/c4/jx4wSAZs+eTWlpaaSnp0cdO3bk3fdXzNElpoDlzXKJysyC1NXVpUxm7ty5QxoaGlx4SUkJOTg4kJ2dHb1//54nW95UVpZZ3M6dOwkAnTt3jgtj5ujSfM9mqLJwc3OjZs2aSYXfvXuXANCaNWvk3rt8+XIaNWoURUdH0969eyk8PJw0NDTI2dmZPn78KCUv+YYkvwYNGtDt27d5Mrq6ujRgwACpew8fPkwA6NixY0REtGTJEikdQEQ0ceJEAkDt2rXjwmJiYqhfv360ZcsWiouLo6lTp5KOjg6ZmZlRZmamwufz9u1bqlKlCjVp0kTm9aFDh3LlUVNTo5CQECnTrZcvX1JgYCCv7GZmZnTx4kWeXNu2bcnR0VEqjU+fPhEAmjhxIhH9n+799ddfeXLHjh0jAKSnp8eFZWRkUFBQEK1evZoOHDhAy5cvJ1tbW1JTU6NDhw4pLDsRUfPmzcnAwEBKXxAp9z5LSkpo1KhRpKmpycmpq6vT6tWrFabLzNH/Ob5Wnff19aXly5fT/v37afXq1VS7dm0CQFFRUZzM69evCQDNmjVLKq3ffvuNANC9e/d44cp8d0REY8eOJQcHB87EUZY5+qJFiwiATD3g5eVFPj4+Mp+DMu348+fPefm0trammJgYnkxwcDABIFNTU+rVqxft3buXpk2bRhoaGuTr68u1y1evXiUAtHXrVql0xo0bRwC4cqpS58ViMXl7e3N9CkWm7IcOHSINDQ26e/cuERH17dtXyhxdHor0iARFJs1fqke+B762LvjS+v0ldVGCvHdXWlpKRkZGFBgYyAt/8+YN6erqEgC6du0a71piYiKv3tSsWVNmPfv1119JW1ubJztlyhSZ+StP/fr1ycrKihsnyOPgwYNS+krCgAEDaMaMGRQbG0tbt27l6nHXrl15chcuXKAuXbrQxo0baf/+/TRv3jwyNTUlkUhE169fl5u2InN0ZfsJ8pBVF0eOHEnq6uoy5c3Nzal79+68sJCQEPL19eX+hgxzdFXizMvLo65du5JAIODepY6ODu3bt09hWYiIHj58SCKRSGp7XkUk2/WOHDlSaZz/BKqMmb+pYzaxWIx9+/ahffv2MmfkBAIBjhw5Am9vb54ptp6eHoYMGYJJkyYhJSUFtWvX5q7179+ft2LWpEkTAGUm7eXlZLFnzx64urrCxcUFb9684cKbNWsGoMzkQmLyERQUhKFDh2LWrFnYu3cvRCIR1q5d+wVPQTHDhg3j/f3HH39ALBaja9euvDxaWlrC2dkZZ86cweTJk3Hjxg08efIEy5YtkzLrKj8DXn5m6/Pnz8jLy4OPjw8A4Pr169zzY/z7KSgo4JkASZCYaRUUFMi9t/wqLgB06dIF3t7e6NWrF6KiojBx4kTe9YCAAMTHx+PDhw84deoUbt26JeXNW9n89OzZE7NmzcKAAQPw22+/wdnZGSdOnEBUVJRUvrt27YquXbtyf3fs2BEtW7aEn58f5syZgzVr1sgsn2S2/MOHD3I9ekZERCAkJATZ2dnYvXs3SktLpcw/dXR0ULNmTVhbW6Ndu3bIzc3FsmXL0LlzZyQkJHAreMqW3dPTEw0bNsSCBQtQrVo1BAQEIDU1FcOHD4empiav7La2tlKO2vr06YNatWrh559/Rtu2bWWWCyjz8Hzy5ElERUVJ6QtAufeprq4OJycntGzZEqGhoRCJRNi5cyd+/PFHWFpaomPHjnLTZ/wzfK06f+HCBZ7sgAEDUL9+fUyePBn9+vWDtrY2l5Yq+VHmu3vw4AFWrFiBnTt3yoxbQmXpS8xevwQTExPEx8fj8+fPuHHjBv744w8p01XJ315eXti+fTuAsmeqo6ODSZMm4dSpU2jevLnSz0koFKpU5zdv3ow7d+7wzO5lUVRUhDFjxmDYsGGoVauWSs+hMj2iDEyPKM+X1u8vqYuVoaamhqFDh2LBggWYNGkSBgwYgJycHIwfP55rJyvGWatWLcTHx+PTp0+4ePEiTp48KdPk297eHn5+fujSpQtMTU1x+PBhzJ07F5aWlnI9dT948ABJSUkYM2ZMpVufduzYAU1NTV7fQcLGjRt5f/fp0wdDhgzB+vXrMWbMGK6/7OvryzMJDw4ORkhICOrWrYtJkybh2LFjCvMgi7+iv+XVxYKCArkm/CKRiBfnmTNnEBsbi8uXL1eaT2XjFAqFqFGjBkJCQtC5c2eUlpZi3bp16N27N+Lj47nnWZH8/HyEhoZCW1tb5qkb5dmxYwfMzc3RokULhXLfBX/3qF4VXrx4UemMllAolDnrsW/fPgLAzfZKVsJ37drFk5PM9m7evJkLkzfz5Orqyptpq/gbPXo0Tz43N5csLS0JAO3YsUMqvr9jJbzirP3w4cMV5lHi2GDXrl0ynaNU5O3btzR69GiqUqWKVFwzZ87k5NhKuDTf8wqYLP7Kqpg8LC0tpWa+ZTFnzhzS09PjOVRSdiWciOjs2bNka2vLfZsGBga0ZcsWAkAdOnSoNH0fHx9ycnKSe33EiBFyV5/k0aJFCyknbK1ateKtzBOV1TETExPezLkqM9xZWVn0ww8/8FaFxo0bR97e3mRoaFhpPiUWA0+fPpV5fdeuXSQQCGjgwIGVxiVB1vucN28eWVpaUm5uLk+2adOmVLVqVZ4zrPKwlfB/jm9Z5yUWZBILlr+y+iZB1nfXqlUrqe/na6+EV+TChQsEgA4ePMiFtW3blgDQli1beLIZGRm89laVlXB5VKzzHz9+JAsLC/rll184GXkr4fPnzydjY2OeYzRlVsJV0SOKVsK/VI98D/yXV8KJyhyEDRw4kNTU1Lj2KigoiIYNG0YA6MaNGwrLEx0dTWpqanTz5k0ubOfOnaStrS3VfvXr1490dHTozZs3MuP65ZdfZK6+VyQ3N5d0dHSk2m1F3Lt3T6aFmiy6d+9OWlpanBO1ivwTK+GK6qKyq9bFxcVUu3ZtCgsL48ngL66EDx06lNzd3XnWCUVFReTs7Eze3t4y4ygpKaH27duTlpYWnTp1SqaMhMePHxMAGjVqlEK5f5J/nWO2vxN5x6eQEvubxWIx6tSpg/j4eJm/ik4Hbty4gVevXgEA7ty589czL4OKezDEYjEEAgGOHTsmM4+qrsZ37doV69evx7Bhw/DHH3/gxIkT3IydWCz+28rB+PZYWVnJPF5EElbZURmysLGx4e1nlEdISAjy8vJ4TtRUyY+fnx/S0tJw48YNnD9/Hs+ePeNmTGvUqPGX8jlz5kxERUVh/vz56NOnT6VxlS/T1atXub3raWlpOHbsGIKDg3lyJiYmaNy4MW+10MrKCi9evJDSS7LKXq1aNZw/fx4PHjzAuXPnkJWVhYULF+Lp06dKlx2AzPLHx8cjLCwMbdu2lWslIAtZ7zMqKgrNmjWTOk84ODgY2dnZMs+KZfyzfMs6X/G7MzExgVAo/Ev5qfjdnT59GseOHUN4eDjS09O5X0lJCQoKCpCens6tcFtZWfHSqpj+lzwLefj6+sLKygrR0dFcmCT+ig6mqlSpAgB4//69UvmUPEdFVHz2ixcvRlFREbp168Y9o6ysLC7d9PR0FBUV4ePHj5g9ezYGDx6MnJwcTjYvLw9EhPT0dK7PU54v1SOyYHpEeb60fv8ddVEWWlpa2LBhA7Kzs3Hu3Dncv38fx48fx8ePH6Gmpibly6EiEiet5Z24RUVFoV69erC2tubJBgcHIz8/X+6+6B07dqBmzZqoX7++wjT37duH/Pz8SveNl0dRmypLtqioSMqCRxlU6SdIqKwuWllZobS0VKoeFxUV4e3bt1ycW7duxf379zF06FCebgXKfHCkp6dz57QrG2dRURE2btyItm3b8qwTNDU10bp1a1y7dk2mc8nBgwfj0KFD2Lx5M2eZLA+JLxJV3ue35Juao5ubm8PAwADJyclyZezs7HD//n2p8Hv37nHXVUWeQxInJyfcunULgYGBlTot+fTpE/r3749atWrB19cXCxcuRKdOnXieJpXxBKoqTk5OICI4ODgo7IA7OTkBAJKTk6XOQ5Tw/v17nDp1CjNnzsQvv/zChSvrxIrx78LDwwNnzpxBTk4Oz5GLxNTIw8NDpfgknbKKZwHLQmKO9PHjR15+EhISIBaLeQr58uXL0NHRkfq+1dXVeXmUODqS932XJy0tjXdGqYTffvsNM2bMQEREhNzzMZUtk8RhlSyPx8XFxTxvqR4eHtiwYQNSU1N5Jp+K3oWzszN3dnpKSgqeP3+Ofv36VZpPiaf3iuW/fPkyOnXqhAYNGmD37t0yPSDLQ9b7fPnypdyyA1DoLZbxz/At63zF705NTQ116tSR6ajo8uXLcHR0hL6+vsI4K353krOuy5+wIOHZs2dwcHDAsmXLEBERwZX12rVrPMdP2dnZyMrK4nmY/jv4/Pkzr37Ur18f69evx7Nnz3hy2dnZAP7vOVWrVg3m5uYyn9OVK1eUemcVn31mZibev38PNzc3Kdm5c+di7ty5uHHjBoyMjJCXl4eFCxdi4cKFUrIODg7o0KED9u3bx4X9FT0iC6ZHlOdL6/ffURcVYWFhwU02lZaW4s8//0TDhg2lJlYqUlhYCLFYLNWuGBsbS8kq+h4uX76MR48eYdasWZXmNTo6Gnp6elKT54qQ16bKkxWJRJWWXRaq9hOUqYvl9WB5p4vXrl2DWCzmrmdmZqK4uBg//PCDVBxbt27F1q1bERcXh44dOyod59u3b1FSUiK3fovFYqlr48aNw6ZNm7B8+XL06NFD9oMqx44dO+Dk5CTXrP274+9eWlcVZR2zlXdslJeXR46OjjIds1U8o09icrVp0yYurFu3bmRkZCSV3ubNmwngn8MpIT8/n3d28MiRI0lTU5OSkpIoLy+PnJycyNXVlWcmdvToUQJAcXFxSj8PCfLOaH306BGpq6tTz549eWawRGXPS2KaU1paWqljto8fPxIAmjFjBu+6xDR3+vTpXBgzR5fmezZDlcWlS5ekzA8/f/5M1atXp4YNG3JhGRkZvDNzicrO766IxGxt6dKlXNjr16+lvkui/zvft7wpkWTLRPk6+/r1azIyMqJu3bopLMurV6/I1taW6tatyzNrkpVPiXl7xe0ku3btIjU1NerVq5fMPEsofw65hKKiIvL09CRtbW3ObPLVq1ekpqZGTZs25cX39OlT0tPTo1atWvHC5J3/Wa1aNbmma0Rldbtt27ako6PDO0dYVtmzsrLI2NiYd/4mEVFKSgqZmpqSm5ubwnNWVXmftWvXJhMTE555YElJCdWvX5/09fW5c1ErwszR/zm+Rp2XJZeTk0NOTk5kZmbGc5Q6f/58AsBr7+/du0fq6uo0YcIELkzZ7y4jI4Pi4uKkfubm5tSgQQOKi4ujR48ecfe7uLiQu7s7r35NnTqVBAIBpaSkSKVHpNgcPS8vjz59+iQVLjknfNq0aVzY8+fPSSgUUuPGjXk6a9KkSQTwz2seNmwYaWtr80znT548SQB4DsqUrfNJSUlSz2jt2rUEgPr160dxcXH04cMH+vTpk8znGRAQQCKRiOLi4ujSpUtcvMrqkYooMmn+Uj3yPfC1dcFfqd/K1sWKKHp3spCks3fvXi7s/fv3Mt+j5JzwjRs3cmHt2rUjLS0tun//Pk+2Y8eOpKamRs+ePZOKZ/To0QSAV/dl8erVK9LQ0JDr6Ovjx49SWz/EYjF169ZN6j3Lqos3b94kTU1NCg4OlpsHReboqvQTlK2L+fn5ZGJiImV+37t3b9LR0eG2oaSmpsrUBQCoTZs2FBcXR9nZ2SrFWVJSQkZGRlSjRg1eu5Cbm0vW1tbk4uLCu3/hwoUEgCZPniy3POW5fv26lN79FvxrHLMBZbOwJ06cgL+/P4YMGQJXV1c8f/4ce/bswfnz5zFx4kTs3LkTrVu3xujRo2FiYoItW7bgyZMniI2N/aKzZuvXr4+YmBj89NNP8PLygp6eHtq3b48+ffpg9+7dGDZsGM6cOYMffvgBpaWluHfvHnbv3o3jx4+jQYMGOH36NKKiojB9+nR4enoCADZt2oSmTZti2rRp3Ayyh4cH1NXVsWDBAnz8+BFCoRDNmjXjzM++BCcnJ8yePRuTJk1Ceno6OnbsCH19fTx58gRxcXEYMmQIxo4dCzU1NaxevRrt27eHh4cH+vfvDysrK9y7dw93797F8ePHYWBgAD8/PyxcuBDFxcWoVq0aTpw4gSdPnnxx/hjfLw0bNkRoaCgmTZqEV69eoXr16tiyZQvS09N5zkfCwsJw9uxZngmUnZ0dunXrhjp16kAkEuH8+fPYtWsXPDw8MHToUE5u+/btWLNmDTp27AhHR0fk5ubi+PHjiI+PR/v27XmmRCEhIfDx8UH//v2RkpICMzMzREVFobS0FDNnzuTl3d/fH40aNUL16tXx4sULrFu3Dnl5eTh06BBPB/j6+qJevXpo0KABDA0Ncf36dfz++++wsbHB5MmTObkrV64gLCwMpqamCAwM5JmNSuJxdHQEAAwdOhQ5OTnw8/NDtWrV8OLFC0RHR+PevXtYsmQJN8Ntbm6OAQMGYMOGDQgMDETnzp2Rm5uLqKgoFBQU8I7Xsba2RkREBBYtWoTi4mJ4eXlh3759SEhIQHR0NG9bTXh4OD5//gwPDw8UFxdjx44d3FFD5c8RHj9+PB4/fozAwEBUrVoV6enpWLt2LT59+oQVK1Zwcrm5uWjZsiXev3+PcePGSZ0L6uTkhEaNGqn8PidOnIjevXujYcOGGDJkCLS1tbFz504kJSVh9uzZvHNRz507h3PnzgEoO07k06dPmD17NoCyrQd+fn5g/HW+Rp3/7bffOAertra2eP78OX7//XdkZmZi27ZtPIc9I0aMwPr169G2bVuMHTsWmpqaWLp0KSwsLPDzzz9zcsp+d7a2trw6ICEiIgIWFhZSTrwWLVqE4OBgBAUFoXv37khOTsaqVaswaNAgqWOAJN/j3bt3AZSdQ37+/HkAwNSpUwGUWY01b94c3bp1g4uLC9TU1HDt2jVs374d9vb2POd2lpaWmDJlCn755Re0atUKHTt2xK1bt7B+/Xr06NGDZ0U3efJk7NmzBwEBAQgPD0deXh4WLVqEOnXqoH///pycsnXe09OT66tIkJiWurm58Z6TLMdn+/btw5UrV3jXVNEjAHDw4EHcunULQNmq1+3bt7lnHBwcjLp16wJQTY/81/kr9VvZuggo/+62b9+O2NhY+Pn5QU9PDydPnsTu3bsxaNAgdOnShYvvzz//xOjRoxESEgJnZ2cUFRUhISEBf/zxBxo0aMA7w37cuHE4evQomjRpglGjRsHU1BSHDh3C0aNHMWjQICmT7NLSUsTExMDHx4ezCJVHTEwMSkpK5JouX79+HT169ECPHj1QvXp1FBQUIC4uDhcuXMCQIUN4dapbt27Q1taGr68vqlSpgpSUFKxbtw46OjpSjsRu376NAwcOAAAePXrEbQMBys5Vb9++PQDl+wmq1EVtbW38+uuvGDlyJEJDQ9GyZUskJCRg+/btmDNnDkxMTACUnRnv4uIi87k4ODjwdIGycaqrq2Ps2LGYOnUqfHx8EBYWhtLSUmzcuBFZWVmcw0oAiIuLw/jx4+Hs7AxXV1feNQBo0aKF1NYeST/u32KKDuDbr4QTlc3ShYWFkbm5OQmFQnJ0dKSRI0dyMyWPHz+mkJAQMjIyIpFIRN7e3lLHb6iyEp6Xl0c9e/YkIyMjAsCbhSoqKqIFCxaQm5sbCYVCMjY2pvr169PMmTPp48ePlJOTQ3Z2duTp6SnlIGTMmDGkpqZGiYmJXNj69evJ0dGR1NXVVXLuIm8lXEJsbCw1btyYdHV1SVdXl1xcXGjkyJFSs4Xnz5+nFi1akL6+Punq6lLdunUpMjKSu56VlUWdOnUiIyMjMjQ0pNDQUMrOzmYr4UrwPa+AyaOgoIDGjh1LlpaWJBQKycvLi+cAjajsvVZUDYMGDaJatWqRvr4+aWpqUvXq1WnChAmUk5PDk7t69SqFhoaSra0tCYVC0tXVJU9PT1q6dKlMhzrv3r2jgQMHkqmpKeno6JC/v79Mq5gxY8aQo6MjCYVCMjc3p549e9Ljx4+l5KZMmUIeHh5kaGhImpqaZGtrS8OHD6cXL17w5CTfs7xfeX2xc+dOat68OVlYWJCGhgYZGxtT8+bNaf/+/VLpFxcXU2RkJHl4eJCenh7p6elRQEAAnT59Wkq2tLSU5s6dS3Z2dqSlpUVubm60fft2KblNmzaRu7s76erqkr6+PgUGBsqMb8eOHeTn50fm5uakoaFBZmZm1KlTJ6nvU6IT5f3Kr3Ko+j6PHTtG/v7+ZGZmRlpaWlSnTh2ZzoEk+k3Wr7ze+TfwveuBf7rOnzhxglq0aEGWlpakqalJRkZGFBQUJNeBztOnTykkJIQMDAxIT0+P2rVrRw8fPuTJqPrdVUSWYzYJcXFx5OHhQUKhkKytrWnq1KkyV+UU1REJr1+/piFDhpCLiwvp6uqSlpYWOTs7U0REhMy2WywWU2RkJNWoUYM0NTXJxsZGbvrJyckUFBREOjo6ZGRkRL169ZLSY8rWeVkoOqKsIrIcs6miRyRxKKNviZTXI98b30IXfGn9JlKuLhIp/+4uX75Mfn5+ZGxsTCKRiNzd3WnNmjVSVi2PHj2isLAwcnR0JG1tbRKJROTm5kbTp0/nWZyWj7d169acjqlRowbNmTNHbhsEgFauXFnps/Px8aEqVarItTxLS0uj0NBQsre3J5FIRDo6OlS/fn2ZZVqxYgV5e3uTiYkJaWhokJWVFfXu3Vvm81TU/6hYb5TpJ6haF4mI1q1bRzVr1iQtLS1ycnKiZcuWKbQIlAAZjtlUjTM6Opq8vb3JyMiItLW1qWHDhjxLCSLFfQRZY6nS0lKqVq0aeXp6VlqGfxpVxswCoso9ll2/fh3169dHUlKS1Gwqg/FfJTo6Gr1792b1gsH4D8P0AIPBAJguYDAYqo2Z/+e8ozMYDAaDwWAwGAwGg/G98s33hP/XyMvLQ15enkIZc3NzuUetMRgMBoPBYDAYDAbj3wsbhH9lFi9eLOV0qiJPnjyBvb3918kQg8FgMBgMBoPBYDC+GmwQ/pUJCwtD48aNFcpYWlp+pdwwGAwGg8FgMBgMBuNrwgbhXxlHR0fu6CMGg8FgMBgMBoPBYPy3YI7ZGAwGg8FgMBgMBoPB+EqwQTiDwWAwGAwGg8FgMBhfCTYIZzAYDAaDwWAwGAwG4yvBBuEMBoPBYDAYDAaDwWB8JVRyzHbkyBGkpqb+U3lhMP5VXLhwAQCrFwzGfxmmBxgMBsB0AYPBKDtmWlkERESVCSUmJqJJkyYoLS39SxljMP7XUFNTg1gs/tbZYDAY3xCmBxgMBsB0AYPBANTV1ZGQkIBGjRoplFNqJVwoFKK0tBTbt2+Hq6vr35JBBuPfzpEjRzBt2jRWLxiM/zBMDzAYDIDpAgaDAaSmpqJ3794QCoWVyqpkju7q6gpPT88vzhiD8b+ExNyM1QsG478L0wMMBgNguoDBYKgGc8zGYDAYDAaDwWAwGAzGV4INwhkMBoPBYDAYDAaDwfhKsEE4g8FgMBgMBoPBYDAYXwk2CGcwGAwGg8FgMBgMBuMrwQbhDAaDwWAwGAwGg8FgfCXYIJyhNAKBADNmzFDpnn79+sHe3v4fyQ+DwWAwGAwGg8Fg/Nv4Tw7Cd+zYgeXLl//j6aSkpGDGjBlIT0//x9NiMCpSWFiICRMmoGrVqtDW1kbDhg0RHx+vcjwtWrSAQCDAqFGjeOEFBQUYOHAgateuDUNDQ+jp6cHd3R0rVqxAcXGxzLhOnjyJZs2awdDQEPr6+qhfvz5iYmJ4Mvb29hAIBFK/YcOG8eQ2b94sU04gEODFixdyy/P48WOIRCIIBAJcu3ZN6np8fDwaN24MHR0dGBsbIyQkRGYdVjafp06dwoABA1CjRg3o6OjA0dERgwYNwvPnz2Xmr6ioCHPnzoWLiwtEIhEsLCzQtm1bZGVlyS3TnDlzIBAIULt2balrxcXFmDlzJhwdHSEUCuHo6IjZs2ejpKRESlaVb+bixYvcc7K0tMTo0aORl5cnN4+K8pmfn4/ffvsNQUFBsLKygr6+PurVq4fVq1ejtLRUYZwMxXyPekDC4MGDIRAI0K5dO17427dvsWjRIvj5+cHc3BxGRkbw8fGR0hUA8Oeff8rVA5cuXZKb9ocPH1ClShUIBALs3btX6npSUhJatWoFAwMD6OvrIygoCDdv3uTJqPLdzpgxQ24+BQIBLly4wMleuXIFI0aMQP369aGpqQmBQCC3HC9fvkT//v1RpUoVaGtrw9PTE3v27JErHxMTg0aNGkFXVxdGRkbw9fXF6dOneTIfP37E+PHj4ezsDG1tbdjZ2WHgwIHIzMzkyd2/fx9jxoyBr68vp1Pl9Xc+f/6MefPmoVatWtDR0UG1atUQGhqKu3fv8uSeP3+OiRMnIiAgAPr6+hAIBPjzzz/llofxf3z48AFDhgyBubk5dHV1ERAQgOvXryt9f2pqKlq1agU9PT2YmJigT58+eP36tZScWCzGwoUL4eDgAJFIhLp162Lnzp1SMps3b0ZwcDBsbGygq6uL2rVrY/bs2fj8+TNPVlUdoko5Dxw4AE9PT4hEItja2mL69Oky2z5l4vzWeknZOFVtT+fMmYPg4GBYWFhUuth28uRJBAQEwMzMDEZGRvD29sa2bdtkyr58+RJDhw5FtWrVIBKJYG9vj4EDB/JkVNEhgHLv80v7ht8Clc4J/19hx44dSE5ORkRExD+aTkpKCmbOnImmTZv+T6wGFxQUQENDtU9m/fr1EIvF/1COGIro168f9u7di4iICDg7O2Pz5s1o06YNzpw5g8aNGysVxx9//IHExESZ1woKCnD37l20adMG9vb2UFNTw8WLFzFmzBhcvnwZO3bs4Mlv2rQJAwcORIsWLTB37lyoq6vj/v37ePr0qVTcHh4e+Pnnn3lhNWrUkJmPWbNmwcHBgRdmZGQkt0xjxoyBhoYGCgsLpa4dOnQIHTp0gKenJ+bPn4+cnBysWLECjRs3xo0bN2Bubq5yPidMmIB3794hNDQUzs7OSEtLw6pVq3Do0CHcvHkTlpaWnGxxcTHatm2LixcvYvDgwahbty7ev3+Py5cv4+PHj7C2tpbKc1ZWFubOnQtdXV2Z5e3duzf27NmDAQMGoEGDBrh06RKmTZuGzMxMrFu3jier7Ddz8+ZNBAYGwtXVFUuXLkVWVhYWL16Mhw8f4ujRozLzoSifaWlp+PHHHxEYGIiffvoJBgYGOH78OEaMGIFLly5hy5YtMuNkVM73pgckXLt2DZs3b4ZIJJK6lpiYiClTpqBNmzaYOnUqNDQ0EBsbi+7du3PtakVGjx4NLy8vXlj16tXllumXX35Bfn6+zGvXr19H48aNYWNjg+nTp0MsFiMqKgr+/v64cuUKatasCUC177Zz584y8zN58mTk5eXx8n7kyBFs2LABdevWhaOjIx48eCAznzk5OWjcuDFevnyJ8PBwWFpaYvfu3ejatSuio6PRs2dPnvyMGTMwa9YshISEoF+/figuLkZycjKePXvGyYjFYrRo0QIpKSkYMWIEatSogUePHiEqKgrHjx9Hamoq9PX1AZS9p5UrV6JWrVpwdXWVmqQoT69evXDgwAEMHjwYnp6eyM7Oxm+//YZGjRrhzp07sLOzA1DWKV+wYAGcnZ1Rp04dud8dg49YLEbbtm1x69YtjBs3DmZmZoiKikLTpk2RlJQEZ2dnhfdnZWXBz88PhoaGmDt3LvLy8rB48WLcuXMHV65cgZaWFic7ZcoUzJ8/H4MHD4aXlxf279+Pnj17QiAQoHv37gDKBoL9+/eHj48Phg0bhipVqiAxMRHTp0/HqVOncPr0aW5ySRUdoko5jx49io4dO6Jp06aIjIzEnTt3MHv2bLx69QqrV69WOc5vrZeUjVPV9nTq1KmwtLREvXr1cPz4cbnpHjhwAB07dkSjRo24ScXdu3cjLCwMb968wZgxYzjZp0+f4ocffgAADBs2DNWqVUN2djauXLnCi1MVHaLs+5Sgat/wm0BKkJSURAAoKSlJGfHvnrZt25Kdnd0/ns6ePXsIAJ05c+YfT6s8paWlVFBQ8FXT/C+yffv277ZeXL58mQDQokWLuLCCggJycnKiRo0aKRVHQUEB2dvb06xZswgAjRw5Uqn7Ro0aRQDo+fPnXNiTJ09IW1ubRo8eXen9dnZ21LZt20rlNm3aRADo6tWrSuWLiOjYsWOkpaVFU6dOlXlvrVq1qHr16lRYWMiF3bx5k9TU1Oinn376onyePXuWSktLpcIA0JQpU3jhCxYsIE1NTbp8+bLSZerWrRs1a9aM/P39yc3NjXftypUrBICmTZvGC//5559JIBDQrVu3uDBVvpnWrVuTlZUVffz4kQtbv349AaDjx4+rnM/Xr19TcnKy1D39+/cnAPTw4cNKnsK3g+kB2cjSAxLEYjE1atSIBgwYILMepaWlUXp6utQ9zZo1I6FQSHl5eVz4mTNnCADt2bNHqXwREd25c4c0NDS4MlW8t02bNmRsbExv3rzhwrKzs0lPT486d+7Mhf3V7zYzM5MEAgENHjyYF/7ixQvKz88nIqKRI0eSvK7awoULCQCdOnWKCystLSUvLy+ytLTk6bHExEQSCAS0dOlShXm6cOECAaBVq1bxwn///XcCQH/88QcX9vbtW8rJySEiokWLFhEAevLkiVScWVlZBIDGjh3LCz99+jQB4OUpJyeH3r59S0Tfrg/1pXxLXRATEyP1Lb969YqMjIyoR48eld4/fPhw0tbWpoyMDC4sPj6eANDatWu5sKysLNLU1OTpAbFYTE2aNCFra2sqKSkhIqLCwkK6cOGCVDozZ84kABQfH19pnmTpEFXKWatWLXJ3d6fi4mIubMqUKSQQCCg1NVXlOL+1XlI2TlX1kqTOvn79mgDQ9OnTZcbbokULqlq1Kn3+/JkLKy4uJicnJ6pbty5PtnXr1uTg4MDTobJQVocQKf8+v6Rv+Heiypj5uzBHf/bsGQYOHIiqVatCKBTCwcEBw4cPR1FREYCyWZ3Q0FCYmJhAR0cHPj4+OHz4MC8OiZnG7t27MWfOHFhbW0MkEiEwMBCPHj3i5Jo2bYrDhw8jIyODM08ov0pdWFiI6dOno3r16hAKhbCxscH48eN5q2Z9+/aFSCRCamoqLw8tW7aEsbExsrOzsXnzZoSGhgIAAgICuLSUNauSzDLdu3cPXbt2hYGBAUxNTREeHi5lyiMxEYyOjoabmxuEQiGOHTvGPdsBAwbAwsICQqEQbm5u+P3336XS+/z5M2bMmIEaNWpAJBLBysoKnTt3xuPHj3nplDdTyc3NRUREBOzt7SEUClGlShW0aNGCZ8Ija0/4p0+f8PPPP8PGxgZCoRA1a9bE4sWLQUQyy7Vv3z7Url2by7+kbAz57N27F+rq6hgyZAgXJhKJMHDgQCQmJspcfa7IwoULIRaLMXbsWJXSlrzvDx8+cGFr1qxBaWkpZs2aBQDIy8uTet8VKSoqwqdPn5RKMzc3t1Kz5eLiYoSHhyM8PBxOTk5S19+9e4eUlBR06tSJN+vv7u4OV1dX7Nq164vy6efnBzU1NakwExMTng4Ri8VYsWIFOnXqBG9vb5SUlFQ6I37u3Dns3btX7vaahIQEAOBWJyR0794dRMQzo1P2m8nJyUF8fDx69+4NAwMDTjYsLAx6enrYvXu3yvk0MzODm5ubVHinTp0AQErXMpTje9MDErZt24bk5GTMmTNH5r0ODg7cyqgEgUCAjh07orCwEGlpaTLvy83NlWlqWpHw8HB06tQJTZo0kXk9ISEBzZs3h6mpKRdmZWUFf39/HDp0iNt28Ve/2507d4KI0KtXL164hYUFtLW1Ky1HQkICzM3N0axZMy5MTU0NXbt2xYsXL3D27FkufPny5bC0tER4eDiISO7WkZycHC4P5bGysgIAXr5MTEy4VXFF5ObmKh2nvr4+TExMKo2TwWfv3r2wsLBA586duTBzc3N07doV+/fvl2n5VZ7Y2Fi0a9cOtra2XFjz5s1Ro0YNnk7fv38/iouLMWLECC5MIBBg+PDhyMrK4iwXtLS04OvrK5WOKjpdlg5RtpwpKSlISUnBkCFDeBacI0aMABHxTL2VjfNb6yVl41RVLylrqZuTkwNjY2MIhUIuTENDA2ZmZrw6fO/ePRw9ehTjxo2DqakpPn/+LHdrkrI6RJX3WR5l+obfkm8+CM/Ozoa3tzd27dqFbt26YeXKlejTpw/Onj2L/Px8vHz5Er6+vpwpxZw5c/D582cEBwcjLi5OKr758+cjLi4OY8eOxaRJk3Dp0iVeAzdlyhR4eHjAzMwM27Ztw7Zt27iOoVgsRnBwMBYvXoz27dsjMjISHTt2xLJly9CtWzcujhUrVsDc3Bx9+/blXu7atWtx4sQJREZGomrVqvDz88Po0aMBlJmbSdJydXVV6fl07dqV20vVpk0brFy5ktehknD69GmMGTMG3bp1w4oVK2Bvb4+XL1/Cx8cHJ0+exKhRo7BixQpUr14dAwcO5HWGS0tL0a5dO8ycORP169fHkiVLEB4ejo8fPyI5OVlu3oYNG4bVq1ejS5cuiIqKwtixY6Gtra1QuRIRgoODsWzZMrRq1QpLly5FzZo1MW7cOPz0009S8ufPn8eIESPQvXt3LFy4EJ8/f0aXLl3w9u1blZ7jf40bN26gRo0avEESAHh7ewOAQpMfAMjMzMT8+fOxYMGCSjuDRUVFePPmDZ4+fYq4uDgsXrwYdnZ2PPOokydPwsXFBUeOHIG1tTX09fVhamqKadOmydyucPr0aejo6EBPTw/29vZYsWKF3PQDAgJgYGAAHR0dBAcH4+HDhzLlli9fjvfv32Pq1Kkyr0saWlnl1dHRQXZ2ttR+IlXyWZ68vDzk5eXBzMyMC0tJSUF2djbq1q2LIUOGQFdXF7q6uqhbty7OnDkjFUdpaSl+/PFHDBo0CHXq1FGpTDo6OgDK9r1KUPabuXPnDkpKStCgQQOenJaWFjw8PHDjxg2V8ykPyfMu/5wYyvO96QGgrFM0YcIETJ48mbcVQxkUfQ/9+/eHgYEBRCIRAgICZPp7AIA9e/bg4sWLWLhwodx0CgsL5eqBoqIihe1iZfksT3R0NGxsbODn56dQ7kvyCfDr96lTp+Dl5YWVK1fC3Nwc+vr6sLKywqpVq3j3NmjQALq6upg2bRpOnz6NZ8+e4ezZsxg/fjy8vLzQvHlzlfPp5OQEa2trLFmyBAcPHkRWVhauXLmCYcOGwcHBQWqSkKE6N27cgKenp9SEr7e3N/Lz8+VuaQDKFmtevXolpdMl95fX6Tdu3ICurq5UX1aiUyrq/4ooqhvK6BBlyynJR8UyVa1aFdbW1lJl+tJnV1mZ/k69pGqcquRTGZo2bYq7d+9i2rRpePToER4/foxff/0V165dw/jx4zm5kydPAiibdAsMDIS2tja0tbXRunXrL/aRpcr7lKBs3/Cb8ncvratKWFgYqampyTQbEIvFFBERQQAoISGBC8/NzSUHBweyt7fnzDwlZhqurq48E6wVK1YQALpz5w4XJs8cfdu2baSmpsZLi4hozZo1BIBnWnP8+HECQLNnz6a0tDTS09Ojjh078u77K6ZU06dPJwAUHBzMCx8xYgQB4JmRAiA1NTW6e/cuT3bgwIFkZWUlZQ7SvXt3MjQ05MzdJGZmsszUxGIxL53yZiqGhoaVmib27duX96z37dvHPbfyhISEkEAgoEePHvHS09LS4oXdunWLAFBkZKTCdL8G37MZqpubGzVr1kwq/O7duwSA1qxZo/D+kJAQ8vX15f6GAjPUnTt3EgDu16BBA7p9+zZPxsDAgIyNjUkoFNK0adNo79691LNnTwJAEydO5Mm2b9+eFixYQPv27aONGzdSkyZNCACNHz+eJxcTE0P9+vWjLVu2UFxcHE2dOpV0dHTIzMyMMjMzebLPnz8nfX19zqxOlrlSaWkpGRkZUWBgIO/eN2/ekK6uLgGga9euqZxPWfz6669SZqR//PEHASBTU1NydnamTZs20aZNm8jZ2Zm0tLR4dZ6IaNWqVWRoaEivXr0iIpJp5h0bG0sAaNu2bbxwiU6rXbs2F6bsNyPRa+fOnZOSDQ0NJUtLS5XzKYvCwkKqVasWOTg48MzPvjeYHihDGT1ARDR27FhycHDgTBqV3dbx9u1bqlKlCjVp0oQXfuHCBerSpQtt3LiR9u/fT/PmzSNTU1MSiUR0/fp1nmx+fj7Z2trSpEmTiEi+eWedOnWoRo0anGktUdn3aGtrSwBo7969cvOp7HebnJyslL5QZI7+448/kpqampSJbPfu3QkAjRo1ioiI3r17x+kWPT09WrRoEcXExFCrVq1kfgeHDh0iKysr3vts2bIl5ebmys1nZaakly9fJicnJ16c9evXl7ldQQIzR1ceXV1dGjBggFT44cOHCQAdO3ZM7r1Xr14lALR161apa+PGjSMAXH1t27YtOTo6Ssl9+vRJZntekebNm5OBgQG9f/9e6poyOkTZckq+x4p9ASIiLy8v8vHxUTlOWXxNvaRKnBVRRi9VZo6el5dHXbt2JYFAwL0jHR0d2rdvH09u9OjRnL5p1aoVxcTE0KJFi0hPT4+cnJzo06dPMuNXpENUeZ+q9A3/CVQZM3/TQXhpaSkZGBhQhw4d5MrUqFGDvL29pcLnzZvHG1xLPtqFCxfy5K5fv04AaP/+/VyYvEF4cHAwubm50evXr3m/Bw8eyBw4Dh06lLS0tMjDw4PMzMzo5cuXvOt/xyC84v7K1NRUAkDz5s3jwgBQQEAAT04sFpORkRENGTJEqjySAcj58+e552FmZlZpR7di5bSzs6MGDRrQs2fP5N5TcRA+ZMgQUldX5/aASEhMTJQaXAOgNm3aSMVpYGBAY8aMUZjXr8H33Pl2dHSk1q1bS4U/fvyYANCyZcvk3nv69GkSCAR05coVLkxR5/vFixcUHx9Pe/bsoWHDhlGjRo0oMTGRJ6OmpkYAaP78+bzwVq1akba2ttT3UB6xWEwtW7YkDQ0Nevr0qVw5IqKEhAQSCAQ0dOhQXnhYWBi5u7tzk3by9gxNmDCB60g8ePCArl27Rs2aNSNNTU2pycAvzefZs2dJQ0ODunbtygvfunUrN/FUvqHIyMggTU1N6tWrFxf25s0bMjExocWLF3Nhsga3BQUFZGdnRxYWFhQbG0vp6ekUExNDpqampKGhQU5OTpysst+MJJ+y9q336dOHDA0NVc6nLAYPHkwA6PDhw5XKfkuYHihDGT1w//590tTU5A1ilRmEl5aWUqtWrUhLS4tu3rypUJaI6OHDh6StrU0tW7bkhf/yyy9kZWXFDSbldXZXr15NAKhv37509+5dunPnDnXr1o3TAxUntcqj7Hc7adIkqQl1WSgahN+6dYs0NTXJ29ubLly4QI8ePaK5c+eSUCgkADRw4EAiKtt7Luk079q1i7u/tLSUatWqRdbW1rx4L1++TG3atKE5c+bQvn37aMaMGaSjo0MhISFy81nZIPzBgwfUpUsXmjhxIu3bt48WL15Mpqam1LhxY7k+bNggXHnU1NRo+PDhUuGnTp0iABQXFyf33nPnzhEAiomJkbo2bdo0AsANmps1a0aurq5ScqWlpQSAwsPD5aYzZ84cAkBRUVEyryvbl1CmnJJ91RX75URETZo0IXd3d5XjrMjX1kuqxFkRZfRSZYPw4uJimjp1KoWGhtLOnTtp+/bt5OfnR3p6erz3NGDAAAJAbm5uPH84kkmW9evXy4xfkQ5R5X3KQl7f8J/gX7Mn/PXr18jJyZF5rI6EjIwMzhNpeSSmMBkZGbzw8vtZAMDY2BgA8P79+0rz8/DhQ9y9exfm5ua8n8Tb8atXr3jyixcvhomJCW7evImVK1eiSpUqlaahKhU9Wjo5OUFNTU3KpKOiB8DXr1/jw4cPWLdunVR5+vfvzyvP48ePUbNmTZU9ny9cuBDJycmwsbGBt7c3ZsyYIXdPjISMjAxUrVpVag+Isu8TKHunyrzP/zLa2toy94BJ/AnIMy0tKSnB6NGj0adPHynvm/KwsLBA8+bNERISgtWrV6Ndu3Zo0aIFz3Rbkl6PHj149/bo0QMFBQUKTdgEAgHGjBmDkpKSSn0qNG7cGA0bNuTMoQDg0qVL2LZtG5YtWyZlblaRWbNmYeDAgVi4cCFq1KiBBg0aQENDgztWQ09P7y/l8969e+jUqRNq166NDRs28K5JntEPP/wAGxsbLtzW1haNGzfGxYsXubCpU6fCxMQEP/74o8LyiEQiHD58GKampujSpQvs7e0RFhaGX375BSYmJrzyKPvNSP6VJ1v+21I2nxVZtGgR1q9fj19//RVt2rRR6V7G//G96YHw8HD4+vqiS5cuKpXjxx9/xLFjx7Bhwwa4u7tXKl+9enV06NABZ86c4baMpaenY9GiRZgzZ47CegyUbbWaPHkyduzYATc3N9SpUwePHz/mTC7l3a/sd0tE2LFjB2rXro26detWWh551K1bFzt27MDjx4/xww8/oHr16li5ciW33UyST8l71tTUREhICHe/mpoaunXrhqysLO74sbS0NAQEBGDAgAGYPHkyOnTogOnTpyMqKgp79+6Ve/qBIj5+/IgmTZqgUaNGmDdvHjp06ICff/4ZsbGxOH/+PDZt2vTFz+C/RlFREV68eMH7lZaWfnFdL39NWf3/JenExMRg6tSpGDhwIIYPHy5TRtm+xN/dTn1pmb62XlI2zor8Xe3pqFGjcPDgQezatQvdu3dHr169cPLkSVhZWSE8PJyTkzyvrl278vpcoaGh0NDQ4PVllEWV9ykLWX3D74Fvvif870ZdXV1mOFXiBAoo2xNep04dxMfHy/yVd0QBlO1RkAxk79y589czrwTyzgut+AFK9tn27t1bbnkkxwd8KV27dkVaWhq3D37RokVwc3P7okZaHn/lff6XsbKyknkOtSSsatWqMu/bunUr7t+/j6FDhyI9PZ37AWV7OdPT0yt1FhYSEoK8vDzs37+fC5OkV9Exj2TiqrJJFcmg9N27dwrlJLLl5caPH48mTZrAwcGBK8+bN28AlD2P8mffamlpYcOGDcjOzsa5c+dw//59HD9+HB8/foSamprCo0Uqy+fTp08RFBQEQ0NDHDlyRGoiSt4zAsqek+QZPXz4EOvWrcPo0aORnZ3NlUni/CQ9PZ2XvpubG5KTk5GcnIyEhARkZ2dj8ODBePPmDe84NWW/GYkzJXmyEjlV8ylh8+bNmDBhAoYNGyZ3/z5DOb4nPXD69GkcO3YM4eHhvDhLSkpQUFCA9PR0zilYeWbOnImoqCjMnz8fffr0UbrsNjY2PKeJv/zyC6pVq4amTZtyaUs6969fv0Z6ejrPP8WcOXPw8uVLJCQk4Pbt27h69Sp3XdZxiap8txcuXEBGRoaUQ7YvISQkhDv6JzExERkZGXB0dOTl08TEBCKRCKamplJtakUdvHnzZnz+/Fnq7Pbg4GAu76oSGxuLly9fcnFI8Pf3h4GBwRfF+V/l4sWLsLKy4v2ePn36xXUdqFynm5iYcM64rKys8OLFC6k+mKJ04uPjERYWhrZt22LNmjVKllR2X+LvbqdUibM830ovKRNnef6u9rSoqAgbN25E27ZteQNrTU1NtG7dGteuXeOcacvry6irq8PU1PSLFtFUeZ/yqNg3/B74pueEm5ubw8DAQKGTEzs7O9y/f18q/N69e9x1VZE3kHVycsKtW7cQGBgoV0bCp0+f0L9/f9SqVQu+vr5YuHAhOnXqxFs1qCwOZXj48CFvlfvRo0cQi8WVejOUOF4pLS2t1JGKk5MTLl++jOLiYmhqaqqUPysrK4wYMQIjRozAq1ev4OnpiTlz5qB169Yy5e3s7HDy5Enk5ubyBiF/5X0ypPHw8MCZM2eQk5PDc8p0+fJl7rosMjMzUVxcLHOCZuvWrdi6dSvi4uLQsWNHuWkXFBQAKFv9kFC/fn08fPgQz5494zqIQJljRgBS529XRGJhUZmcRLa8XGZmJjIyMqSsRYCyjqWhoaGUB2cLCwuuASktLcWff/6Jhg0bVjpTLS+fb9++RVBQEAoLC3Hq1CmuQSlPnTp1oKmpyTuzV0J2djYX57NnzyAWizF69GjO+WN5HBwcEB4eznO+KBAIeN5Sjxw5ArFYzNMNyn4ztWvXhoaGBq5du4auXbtyckVFRbh58yYX9iX53L9/PwYNGoTOnTvjt99+k7qHoRrfkx6QTHaV90As4dmzZ3BwcMCyZcsQERHBhf/222+YMWMGIiIiMGHCBIVlrUhaWhpEIhFXZzMzM/Ho0SOe/pEgmWB///497xxZY2Nj3lnqJ0+ehLW1NVxcXHj3q/rdRkdHQyAQSJ3j/aVoaWnx+h6S1R5J/VZTU4OHhweuXr2KoqIi3ukPFXXwy5cvQURSq2oS78bKeHmuyMuXLwFAKk5JOl8S538Vd3d3xMfH88IsLS3h4eGBhIQEiMVi3iDp8uXL0NHRkTlxJKFatWowNzeX6eDrypUrPD3h4eGBDRs2IDU1FbVq1eKlI7lensuXL6NTp05o0KABdu/erZLFpay+hLLllOTj2rVrnNM4oOx7z8rK4jk4VvXZfWu9VFmcEv7O9vTt27coKSmRudpeXFwMsVjMXatfvz4ASPVlJI73lOnHVUSV9ymPin3D74K/275dVZR1zHbx4kUuPC8vjxwdHWU6Zqu4h+LJkycEgDZt2sSFdevWjYyMjKTS27x5MwH8MxEl5Ofn884AHDlyJGlqalJSUhLl5eWRk5MTubq68s7PO3r0aKV7ceRRmWO28vtPIGefXr9+/UhLS4vnlE6CxEkS0Zc5ZispKaEPHz5IyXt5eVGDBg24v+U5Zps7dy7vvm7dusl0zCarXHZ2dtS3b1+p8K/N97wX9NKlSwTwzwf+/PkzVa9enRo2bMiFZWRk8M5XTE1Npbi4OKkf/v/+/Li4OMrOziaisv1D5b8PCZKzPcs7HZPEMXnyZC6stLSUGjduTCYmJly9efv2Lc8ZEhFRUVER/fDDD6SlpcVz4lP+G5YgcaRS/jzy48ePS5Xnxx9/JAC0ePFiOnTokMJnOX/+fClnTKrkMy8vj7y9vUlfX5/n2E0WHTp0IHV1dd47SUlJIXV1dRoxYgQRlT13We/Izc2NbG1tKS4uTqZDLAn5+fnk6elJVlZWvL34yn4zRGV7+Svev2HDBgJAR48e/aJ8nj17lkQiEQUEBPD06PcO0wPK6YGMjAyZcZqbm1ODBg0oLi6Op/937dpFampq1KtXL5nxS5ClB27evEmampq89jMhIUEqbYmDxPHjx1NcXBwVFRXJTWfXrl2cziiPqt9tUVERmZqaSjlykoeiPeGyePDgAenr61O7du144cuWLSMAtG7dOi6soKCAHB0dqVatWlzY4sWLpfpMRETLly+X2lNeHkX7Offu3Stzr6mkP1DRV4gEtidceSTfZ/k+8OvXr8nIyIi6devGk3306BGvrhERDRs2jLS1tXn+SE6ePEkAaPXq1VzY06dP5Z4TXq1aNV67mJKSQqampuTm5kbv3r2Tm3dV+hKqlNPFxYXc3d15eZo6dSoJBAJKSUn5oji/pV5SNk6iL2tPFe0JLykpISMjI6pRowbP+XVubi5ZW1uTi4sLF/b582eqUqUKOTo68vw9rF27lgDQ7t27ZaZfmV8JZd+nsn3DfwpVxszfdCUcAObOnYsTJ07A398fQ4YMgaurK54/f449e/bg/PnzmDhxInbu3InWrVtj9OjRMDExwZYtW/DkyRPExsZWusdTFvXr10dMTAx++ukneHl5QU9PD+3bt0efPn2we/duDBs2DGfOnMEPP/yA0tJS3Lt3D7t378bx48fRoEEDnD59GlFRUZg+fTo8PT0BAJs2bULTpk0xbdo07ogBDw8PqKurY8GCBfj48SOEQiGaNWum0t7xJ0+eIDg4GK1atUJiYiK2b9+Onj17KrX/ZP78+Thz5gwaNmyIwYMHo1atWnj37h2uX7+OkydPcmYZYWFh2Lp1K3766SdcuXIFTZo0wadPn3Dy5EmMGDECHTp0kIo7NzcX1tbWCAkJgbu7O/T09HDy5ElcvXoVS5YskZun9u3bIyAgAFOmTEF6ejrc3d1x4sQJ7N+/HxERETLPb2aoTsOGDREaGopJkybh1atXqF69OrZs2YL09HRs3LiRkwsLC8PZs2c50zIXFxepVR4JDg4OvJWv7du3Y82aNejYsSMcHR2Rm5uL48ePIz4+Hu3bt+edXduhQwcEBgZi3rx5ePPmDdzd3bFv3z6cP38ea9eu5UzdDhw4gNmzZyMkJAQODg549+4dduzYgeTkZMydO5d3pJGvry/q1auHBg0awNDQENevX8fvv/8OGxsbTJ48mZMLCgqSKotk5dvf35935MX27dsRGxsLPz8/7pvevXs3Bg0axNvHqko+e/XqhStXrmDAgAFITU3lHeGnp6fHe6Zz587FqVOn0KxZM271eOXKlTAxMeHKZGZmJnMFUrKiXPFa165dUbVqVdSqVQs5OTn4/fffkZaWhsOHD/OsUZT9ZoAyU11fX19Ob2dlZWHJkiUICgpCq1atVM5nRkYGgoODIRAIEBISgj179vDuqVu37l/aO/tf5XvSA7a2tjJ9fERERMDCwoIX55UrVxAWFgZTU1MEBgYiOjqad4+vry+3ctStWzdoa2vD19cXVapUQUpKCtatWwcdHR3Mnz+fu6f8irYEyeqSl5cXL/1z585h1qxZCAoKgqmpKS5duoRNmzahVatWvL2PX/LdHj9+HG/fvlVoip6RkYFt27YBALc6OXv2bABl1mLlzV9r1aqF0NBQ2Nra4smTJ1i9ejVMTEykzH6HDh2KDRs2YOTIkXjw4AFsbW2xbds2ZGRk4ODBg5xcv379sHjxYgwdOhQ3btyAm5sbrl+/jg0bNsDNzY07axgoW6GMjIwE8H9m6qtWrYKRkRGMjIwwatQoAGXtvpubG2bNmoWMjAz4+Pjg0aNHWLVqFaysrDifGxIkZb179y6AsnPlz58/DwBsi4ocQkJC4OPjg/79+yMlJQVmZmaIiopCaWkpZs6cyZMNDAwEAJ5vocmTJ2PPnj0ICAhAeHg48vLysGjRItSpU4fzIwQA1tbWiIiIwKJFi1BcXAwvLy/s27cPCQkJiI6O5rY75ObmomXLlnj//j3GjRuHw4cP8/Lg5OSERo0aAVCtL6FKORctWoTg4GAEBQWhe/fuSE5OxqpVqzBo0CDeEWvKxvmt9ZKycaqqlyR6QLLF6Ny5c1wd7NOnD+zs7KCuro6xY8di6tSp8PHxQVhYGEpLS7Fx40ZkZWVh+/btXHxCoRCLFi1C37594efnhz59+iAzMxMrVqxAkyZNeNZQyuoQVd6nsn3D74K/e1T/JWRkZFBYWBiZm5uTUCgkR0dHGjlyJDfb8vjxYwoJCSEjIyMSiUTk7e0ttXqlykp4Xl4e9ezZk4yMjAgAb6W2qKiIFixYQG5ubiQUCsnY2Jjq169PM2fOpI8fP1JOTg7Z2dmRp6enlDfxMWPGkJqaGs9L4Pr168nR0ZHU1dVVmtGVrISnpKRQSEgI6evrk7GxMY0aNUrKkygUeKx9+fIljRw5kmxsbEhTU5MsLS0pMDCQNxtOVLY6NmXKFHJwcODkQkJC6PHjx7x0JDNkhYWFNG7cOHJ3dyd9fX3S1dUld3d3Ka+XFVfCicpmzsaMGUNVq1YlTU1NcnZ2pkWLFknNKsorF1sJV46CggIaO3YsWVpaklAoJC8vL6mjNvz9/ZVaZZH1Lq5evUqhoaFka2tLQqGQdHV1ydPTk5YuXSrT035ubi6Fh4eTpaUlaWlpUZ06dWj79u08mWvXrlH79u2pWrVqpKWlRXp6etS4cWOZM6dTpkwhDw8PMjQ0JE1NTbK1taXhw4fTixcvKi2PPO/oly9fJj8/PzI2NiaRSETu7u60Zs0aqW9TlXza2dlxnokr/mSd0pCUlETNmzcnXV1d0tfXpw4dOtCDBw8qLZM8r+MLFiwgFxcXEolEZGxsTMHBwXTjxg2ZcSjzzUhISEggX19fEolEZG5uTiNHjlTo5V5RPiX6W95PnrfW7wGmB1TTAxWR5R1dUj/l/cq35ytWrCBvb28yMTEhDQ0NsrKyot69e9PDhw8rTVtev+HRo0cUFBREZmZmJBQKycXFhebNm8dbASp/vyrfbffu3UlTU5Pevn1bab5k/fz9/aXis7GxIS0tLapatSoNGzZMpgdhorL+QN++fcnExISEQiE1bNhQZv3OysqiAQMGkIODA2lpaZGVlRUNHjyYXr9+zZOT9K+U0W3v3r2jMWPGUI0aNUgoFJKZmRl1796d0tLSpNJX9Ey/Z761Lnj37h0NHDiQTE1NSUdHh/z9/WVamdrZ2clse5KTkykoKIh0dHTIyMiIevXqJbM9LS0tpblz55KdnR1paWmRm5ubVFuu6NsAwOvDqapDlC0nUZkVnoeHBwmFQrK2tqapU6fKtHhRJs5vrZeUjVNVvSTR/7J+Fccs0dHR5O3tTUZGRqStrU0NGzaUe2Tjzp07yd3dnYRCIVlYWNCoUaOk+giq6BAi5d7nX+kb/h2oMmYWEFXu4er69euoX78+kpKSuJVfxj/LjBkzMHPmTLx+/RpmZmbfOjsMGURHR6N3796sXjAY/2GYHmAwGADTBQwGQ7Ux8/+cd3QGg8FgMBgMBoPBYDC+V775nvD/Gnl5ecjLy1Mo891572MwGAwGg8FgMBgMxt8CG4R/ZRYvXizlPKIiT548+Uq5YTAYDAaDwWAwGAzG14QNwr8yYWFhMr0hlsfS0hIzZszAjBkzvk6mGAwGg8FgMBgMBoPxVWCD8K+Mo6Mjd4QBg8FgMBgMBoPBYDD+WzDHbAwGg8FgMBgMBoPBYHwl2CCcwWAwGAwGg8FgMBiMrwQbhDMYDAaDwWAwGAwGg/GVYINwBoPBYDAYDAaDwWAwvhJsEM5gMBgMBoPBYDAYDMZXQiXv6EeOHEFqauo/lRcG41/FhQsXALB6wWD8l2F6gMFgAEwXMBgM4MmTJ0rLCoiIKhNKTExEkyZNUFpa+pcyxmD8r6GmpgaxWPyts8FgML4hTA8wGAyA6QIGgwGoq6sjISEBjRo1Uiin1Eq4UChEaWkptm/fDldX178lgwzGv50jR45g2rRprF4wGP9hmB5gMBgA0wUMBgNITU1F7969IRQKK5VVyRzd1dUVnp6eX5wxBuN/CYm5GasXDMZ/F6YHGAwGwHQBg8FQDeaYjcFgMBgMBoPBYDAYjK8EG4QzGAwGg8FgMBgMBoPxlWCDcAaDwWAwGAwGg8FgML4SbBDOYDAYDAaDwWAwGAzGV4INwhkMBoPBYDAYDAaDwfhKsEE4g8FgMBgMBoPBYDAYXwk2CGf8Y2RnZ2PGjBm4efPmt87K/zyFhYWYMGECqlatCm1tbTRs2BDx8fEqx9OiRQsIBAKMGjWKF15QUICBAweidu3aMDQ0hJ6eHtzd3bFixQoUFxfzZM+dO4fg4GDY2NhAJBLB0tISrVq1woULF3hy+fn5+O233xAUFAQrKyvo6+ujXr16WL16NUpLS3my9+7dw/jx4+Hh4QF9fX1YWVmhbdu2uHbtmsxyPHv2DF27doWRkREMDAzQoUMHpKWlScmtXr0aoaGhsLW1hUAgQL9+/eQ+m6SkJLRr1w6WlpbQ09ND3bp1sXLlSqm82tvbQyAQSP2GDRvGkzt16hQGDBiAGjVqQEdHB46Ojhg0aBCeP38ulfbcuXPh4+MDc3NziEQiODs7IyIiAq9fv+bJzZgxQ2bakl/Fd7B79274+PjAyMgIpqam8Pf3x+HDh6XSnzNnDoKDg2FhYQGBQIAZM2bIfU4AEBMTg0aNGkFXVxdGRkbw9fXF6dOnueubN29WmM/o6OhKyyQSiRTmgfF//NP6QcLGjRvh6urKfaORkZFSMvLqh0AggLOzMyenis4BlK+f5Xn8+DFEIhEEAoGULlGlforFYqxZswYeHh7Q09ODhYUFWrdujYsXL0rJPnz4EN27d4e1tTV0dHTg4uKCWbNmIT8//4vivHv3LkJDQ+Ho6AgdHR2YmZnBz88PBw8elFlmsViM1atXw8PDA9ra2jA1NUWzZs1w69YtKbmFCxfCwcEBIpEIdevWxc6dO6XiU1SPW7RowZN9/vw5hgwZAgcHB2hra8PJyQk//fQT3r59KxXvqlWr4OrqCqFQiGrVquGnn37Cp0+fZJaJUcaHDx8wZMgQmJubQ1dXFwEBAbh+/brS96empqJVq1bQ09ODiYkJ+vTpI9XGqNIWq6K7P378iPHjx8PZ2Rna2tqws7PDwIEDkZmZyZOLi4tDy5YtUbVqVQiFQlhbWyMkJATJyclScSrbFgPK64+8vDxERETA2toaQqEQrq6uWL16tdxnevLkSTRr1gyGhobQ19dH/fr1ERMTI1dekU4ClH/HMTEx6N27N5ydnSEQCNC0aVOZ6fXr109hHX727Bkn+y37IdnZ2ejduzdq1qwJfX19GBkZwdvbG1u2bAERKZX+99hnUOmccAZDFbKzszFz5kzY29vDw8PjW2fnf5p+/fph7969iIiIgLOzMzZv3ow2bdrgzJkzaNy4sVJx/PHHH0hMTJR5raCgAHfv3kWbNm1gb28PNTU1XLx4EWPGjMHly5exY8cOTvbBgwdQU1PDsGHDYGlpiffv32P79u3w8/PD4cOH0apVKwBAWloafvzxRwQGBuKnn36CgYEBjh8/jhEjRuDSpUvYsmULF+eGDRuwceNGdOnSBSNGjMDHjx+xdu1a+Pj44NixY2jevDknm5eXh4CAAHz8+BGTJ0+GpqYmli1bBn9/f9y8eROmpqac7IIFC5Cbmwtvb2+ZnWsJSUlJ8PX1hbOzMyZMmAAdHR0cPXoU4eHhePz4MVasWMGT9/DwwM8//8wLq1GjBu/vCRMm4N27dwgNDYWzszPS0tKwatUqHDp0CDdv3oSlpSUvfQ8PD3Tv3h36+vpITU3F+vXrcfjwYdy8eRO6uroAgM6dO6N69epS+Z88eTLy8vLg5eXFhUVGRmL06NFo27Yt5s+fj8+fP2Pz5s1o164dYmNj0blzZ0526tSpsLS0RL169XD8+HG5zwkoawBnzZqFkJAQ9OvXD8XFxUhOTuY15n5+fti2bZvUvcuWLcOtW7cQGBgodW316tXQ09Pj/lZXV1eYD8b/8U/rBwBYu3Ythg0bhi5duuCnn35CQkICRo8ejfz8fEyYMIGTW758OfLy8nj3ZmRkYOrUqQgKCuLCVNE5qtZPCWPGjIGGhgYKCwulrqlSP8eNG4elS5eid+/eGDFiBD58+IC1a9fC398fFy5cgLe3NwDg6dOn8Pb2hqGhIUaNGgUTExMkJiZi+vTpSEpKwv79+1WOMyMjA7m5uejbty+qVq2K/Px8xMbGIjg4GGvXrsWQIUN45RowYACio6MRFhaGUaNG4dOnT7hx4wZevXrFk5syZQrmz5+PwYMHw8vLC/v370fPnj0hEAjQvXt3Tk5WPb527RpWrFjBe595eXlo1KgRPn36hBEjRsDGxga3bt3CqlWrcObMGSQlJUFNTY179gsXLkRISAjCw8ORkpKCyMhI3L17t1L9819FLBajbdu2uHXrFsaNGwczMzNERUWhadOmSEpK4k1wySIrKwt+fn4wNDTE3LlzkZeXh8WLF+POnTu4cuUKtLS0AKjWFkuoTHeLxWK0aNECKSkpGDFiBGrUqIFHjx4hKioKx48fR2pqKvT19QEAd+7cgbGxMcLDw2FmZoYXL17g999/h7e3NxITE+Hu7s6LW5m2WFn9UVpaipYtW+LatWsYOXIknJ2duT7L+/fvMXnyZF68mzZtwsCBA9GiRQvMnTsX6urquH//Pp4+fSr3PSjSSaq849WrVyMpKQleXl4yJ7kkDB06VOqdERGGDRsGe3t7VKtWjfecvlU/5M2bN8jKykJISAhsbW1RXFyM+Ph49OvXD/fv38fcuXOl0vtX9BlICZKSkggAJSUlKSPOqEBBQQGVlpZ+62wQEdGnT59khhcXF1NhYeHfmtbVq1cJAG3atOlvjfd7Yfv27d9Fvbh8+TIBoEWLFnFhBQUF5OTkRI0aNVIqjoKCArK3t6dZs2YRABo5cqRS940aNYoA0PPnzxXKffr0iSwsLKhly5Zc2OvXryk5OVlKtn///gSAHj58yIVdu3aNcnNzeXJv3rwhc3Nz+uGHH3jhCxYsIAB05coVLiw1NZXU1dVp0qRJPNn09HQSi8VERKSrq0t9+/aVmf/BgweTlpYWvX37lhfu5+dHBgYGvDA7Oztq27atzHjKc/bsWSm9cPbsWQJAU6ZMqfT+vXv3EgDauXOnQrnMzEwSCAQ0ePBgXrizszN5eXlx5Sci+vjxI+np6VFwcDBP9smTJ0RU9s4A0PTp02WmlZiYSAKBgJYuXVpp/iuSn59P+vr61KJFC1749OnTCQC9fv1a5Ti/Bt+LHpDH19AP+fn5ZGpqKvXd9+rVi3R1dendu3cK4//1118JAF24cKHSvMjSOarUTwnHjh0jLS0tmjp1KgGgq1ev8q4rWz+Li4tJW1ubQkJCeLJpaWkEgEaPHs2FzZkzhwBI6b2wsDACwD0nVeKURUlJCbm7u1PNmjV54TExMQSA/vjjD4X3Z2VlkaamJu89i8ViatKkCVlbW1NJSYnC+wcOHEgCgYCePn3KhUVHRxMAOnToEE/2l19+IQB0/fp1IiLKzs4mDQ0N6tOnD08uMjKSANCBAwcUpv0t+Za6QPJu9+zZw4W9evWKpFEtuwAALONJREFUjIyMqEePHpXeP3z4cNLW1qaMjAwuLD4+ngDQ2rVruTBV2mJldfeFCxcIAK1atYoX/vvvvyv1vb548YI0NDRo6NChvHBl22Jl9cfu3bsJAG3cuJEn16VLFxKJRPTy5Usu7MmTJ6StrV1pXS1PZTpJlXecmZnJ6S83Nzfy9/dXOh8JCQkEgObMmVOp7Nfsh8iiXbt2pKury9NJ37rPoMqY+Zuao2dkZGDEiBGoWbMmZxYVGhqK9PR0npzEdPHChQv46aefODOMTp06SZlBXLt2DS1btoSZmRm0tbXh4OCAAQMGcNc9PT15KzwAUKdOHQgEAty+fZsLi4mJgUAgQGpqKhf27NkzDBgwABYWFhAKhXBzc8Pvv//Oi+vPP/+EQCDArl27MHXqVFSrVg06OjrIyclR+rls374d3t7e0NHRgbGxMfz8/HDixAmeTFRUFNzc3CAUClG1alWMHDkSHz584Mk0bdoUtWvXRlJSEvz8/KCjo4PJkycjPT0dAoEAixcvxvLly+Hk5AShUIiUlBQAZeZGISEhMDExgUgkQoMGDXDgwAGpfH748AFjxoyBvb09ZxYUFhaGN2/e4M8//+Rmu/r378+Zg2zevJmXt5SUFAQEBEBHRwfVqlXDwoULpdIpLCzE9OnTUb16dQiFQtjY2GD8+PFSM4Xx8fFo3LgxjIyMoKenh5o1a0rNTEZGRsLNzY17tg0aNOCtqPwb2bt3L9TV1XkrHiKRCAMHDkRiYqLCWVcJCxcuhFgsxtixY1VK297eHgCkvr2K6OjowNzcnCdnZmYGNzc3KdlOnToBAK/u1a9fnzejCQCmpqZo0qQJTw4oex5eXl682VYXFxcEBgZi9+7dPFk7OzsIBAKFeQeAnJwciEQiGBkZ8cKtrKygra0t856ioiKF5pN+fn7cyk/5MBMTE6kyyULZZ79z504QEXr16sULz8nJQZUqVXjlNzAwgJ6enlSZJGlVxvLly2FpaYnw8HAQkdSKpyIOHjyI3NxcqXxKICLk5ORImZ4xFPM19MOZM2fw9u1bjBgxghc+cuRIfPr0SeYWh/Ls2LEDDg4O8PX1rTQvsr57VetncXExwsPDER4eDicnJ5npKFs/i4uLUVBQAAsLC55slSpVoKamxktf0g+oKGtlZQU1NTVutVGVOGWhrq4OGxsbKd2wdOlSeHt7o1OnThCLxXL10/79+1FcXMx7nwKBAMOHD0dWVpZCi4jCwkLExsbC398f1tbWSpUdAFemxMRElJSU8FbbAXB/79q1S1HR/7Ps3bsXFhYWvP6tubk5unbtiv3798tcWS1PbGws2rVrB1tbWy6sefPmqFGjBq/dVKUtllCZ7lb225BHlSpVoKOjI7ctrKwtVlZ/JCQkAIDMb/Pz5888S5Y1a9agtLQUs2bNAlBmCaKo7VJGJ6nyjm1sbKT0l7Ls2LEDAoEAPXv2rFT2a/ZD5KWfn5+PoqIiqWv/hj7DNx2EX716FRcvXkT37t2xcuVKDBs2DKdOnULTpk2l9kcBwI8//ohbt25h+vTpGD58OA4ePMjbm/bq1SsEBQUhPT0dEydORGRkJHr16oVLly5xMk2aNMH58+e5v9+9e4e7d+9CTU2Nq2BAWWUzNzeHq6srAODly5fw8fHByZMnMWrUKKxYsQLVq1fHwIEDsXz5cqm8/vrrrzh8+DDGjh2LuXPnco1rZcycORN9+vSBpqYmZs2ahZkzZ8LGxoa3n3LGjBkYOXIkqlatiiVLlqBLly5Yu3YtgoKCpPbKvX37Fq1bt4aHhweWL1+OgIAA7tqmTZsQGRmJIUOGYMmSJTAxMcHdu3fh4+OD1NRUTJw4EUuWLIGuri46duyIuLg47t68vDw0adIEkZGRCAoKwooVKzBs2DDcu3cPWVlZcHV15ZTPkCFDsG3bNmzbtg1+fn5cHO/fv0erVq3g7u6OJUuWwMXFBRMmTMDRo0c5GbFYjODgYCxevBjt27dHZGQkOnbsiGXLlqFbt26c3N27d9GuXTsUFhZi1qxZWLJkCYKDg3l7T9avX4/Ro0ejVq1aWL58OWbOnAkPDw9cvnxZqXfzvXLjxg3UqFEDBgYGvHCJuWJle/IzMzMxf/58LFiwoFKlV1RUhDdv3uDp06eIi4vD4sWLYWdnJ9P0KCcnB2/evMG9e/cwefJkJCcnyzQzrsiLFy8AlA3SlZEtLycWi3H79m00aNBAStbb2xuPHz9Gbm5upfFWpGnTpsjJycHQoUORmpqKjIwMrFmzBn/88QcmTZokJX/69Gno6OhAT08P9vb2cs1hK5KXl4e8vDyZZScivHnzBi9evOBMfdXV1eXu9ZIQHR0NGxsbXt2TlOnYsWOIjIxEeno67t27h5EjR+Ljx48IDw9XKr8VOXXqFLy8vLBy5UqYm5tzewZXrVpV6b3R0dHQ1taWmiSV4OjoyO2r6927N16+fPlFefyv8TX0w40bNwBAqt7Vr18fampq3HV596ampsrt8Cmjc1Stn8uXL8f79+8xdepUhWWviKz6Kdljv3nzZkRHRyMzMxO3b99Gv379YGxszJv8kNTVgQMH4ubNm3j69CliYmKwevVqjB49mjPnVCVOCZ8+fcKbN2/w+PFjLFu2DEePHuXp25ycHFy5cgVeXl6YPHkyt8/e0dFRanLyxo0b0NXV5fo/EiTfjKL3eeTIEXz48EGqsy2Z1AgPD8elS5eQlZWFI0eOYM6cOejYsSNcXFwAgBtIVPzWdHR0AJSZxDKkuXHjBjw9PaUGXt7e3sjPz8eDBw/k3vvs2TO8evVKbrup6H1LqNgWl6cy3d2gQQPo6upi2rRpOH36NJ49e4azZ89i/Pjx8PLykmni/uHDB7x+/Rp37tzBoEGDkJOTI7N/oUxbrKz+KCwshLq6ulR/Xta3efLkSbi4uODIkSOwtraGvr4+TE1NMW3aNIjFYqk8KKOT/so7Vpbi4mLs3r0bvr6+Miffv3U/pKCgAG/evEF6ejq2bNmCTZs2oVGjRjLbpn9Fn+HvXlpXhfz8fKmwxMREAkBbt27lwjZt2kQAqHnz5jyThTFjxpC6ujp9+PCBiIji4uJkmnCUZ8+ePQSAUlJSiIjowIEDJBQKKTg4mLp168bJ1a1blzp16sT9PXDgQLKysqI3b97w4uvevTsZGhpyZTlz5gwBIEdHR5nlU8TDhw9JTU2NOnXqJGUGJyn3q1evSEtLi4KCgngyq1atIgD0+++/c2H+/v4EgNasWcOL68mTJwSADAwM6NWrV7xrgYGBVKdOHfr8+TMvbV9fX3J2dubCJCZkssyEJHlVZI4uyVv591xYWEiWlpbUpUsXLmzbtm2kpqZGCQkJvPvXrFnDM19ctmxZpeYnHTp0IDc3N7nXVeV7MUN1c3OjZs2aSYXfvXtX5vuvSEhICPn6+nJ/Q4E5+s6dOwkA92vQoAHdvn1bpmzLli05OS0tLRo6dCgVFBQozEthYSHVqlWLHBwcqLi4WKHsuXPnSCAQ0LRp07gwibn0rFmzpOR/++03AkD37t2TGZ8ic/SSkhIaNWoUaWpqcmVSV1en1atXS8m2b9+eFixYQPv27aONGzdSkyZNCACNHz9eYXmI/s8s99SpU1LXnj9/znv21tbWFBMTozC+5ORkuWm/fPmSAgMDeXGamZnRxYsX5canyBz93bt3BIBMTU1JT0+PFi1aRDExMdSqVatKv8O3b9+SlpYWde3aVera8uXLadSoURQdHU179+6l8PBw0tDQIGdnZ/r48aPC8n8Nvhc9II+voR9GjhxJ6urqMu83Nzen7t27y43/559/5rXJFVFG56hSP58/f076+vqcia2kf6Go3yBBXv18+PAheXp68vLp6OgoU9f8+uuvpK2tzZOVtf1ElTiJiIYOHcrJqampUUhICG8bwPXr17n6aWFhQVFRURQdHU3e3t4kEAjo6NGjnGzbtm3J0dFRKo1Pnz4RAJo4caLcZ9SlSxcSCoX0/v17qWsbNmwgIyMjXpn69u3L0/WS/uavv/7Ku/fYsWMEgPT09OSm/a35lrpAV1eXBgwYIBV++PBhAkDHjh2Te6+kr1a+PyZh3LhxBIDXJ6yIrLaYSDXdfejQIbKysuJ9Gy1btpQyfZdQs2ZNTk5PT4+mTp0q1W9Wti1WVn8sWbKEAEj1RydOnEgAqF27dlyYgYEBGRsbk1AopGnTptHevXupZ8+eMuuPsjrpS9+xKuboBw8eJAAUFRUl8/q37ofMmzePJxsYGEiZmZk8mW/dZ1BlzPxNHbOVn7koLi5GTk4OqlevDiMjI1y/fh19+vThyQ8ZMoRnstCkSRMsW7YMGRkZqFu3LmdKcujQIbi7u0NTU1MqzSZNmgAo8+Ds6uqKhIQEeHl5oUWLFpg3bx6Ashm25ORkzlMyESE2NhZdu3blZoEktGzZErt27cL169fxww8/cOF9+/ZVypSiPPv27YNYLMYvv/wiNdMlKffJkydRVFSEiIgInszgwYMxefJkHD58GP379+fChUIh7+/ydOnSBebm5tzf7969w+nTpzFr1izk5ubyVgxbtmyJ6dOn49mzZ6hWrRpiY2Ph7u7OmQ7Lymtl6OnpoXfv3tzfWlpa8Pb25nmx3rNnD1xdXeHi4sJ77s2aNQNQZgbp6+vLvfv9+/ejf//+Ms1wjIyMkJWVhatXr/JMlf/tFBQUQCgUSoVLPEEWFBTIvffMmTOIjY1V2hogICAA8fHx+PDhA06dOoVbt27JNfOaP38+fv75Zzx9+hRbtmxBUVERSkpKFMY/atQopKSk4PDhw9DQkK+eXr16hZ49e8LBwQHjx4/nwiVl/dLnIQ91dXU4OTmhZcuWCA0NhUgkws6dO/Hjjz/C0tISHTt25GQrbt3o378/WrdujaVLl+LHH3/kmWiW59y5c5g5cya6du3Kfd/lMTExQXx8PD5//owbN27gjz/+qNTcW+JlXJaJt46ODmrWrAlra2u0a9cOubm5WLZsGTp37oyEhASZ1g2KkOTl7du32LVrF2epEhISgjp16mD27NkYOnSozHv37t2LoqIimfmsOBvepUsXeHt7o1evXoiKisLEiRNVyud/ja+hHwoKCuRae4lEIrlpiMVi7Nq1C/Xq1ZNadZWgjM5RpX5OmDCB83SuCorqp76+Ptzc3NCoUSMEBgbixYsXmD9/Pjp27IiEhATeCqG9vT38/PzQpUsXmJqa4vDhw5g7dy4sLS15ln2qxAkAERERCAkJQXZ2Nnbv3o3S0lKeiWb5+nnp0iU0bNgQABAcHAwHBwfMnj2bc5r5pd9MTk4ODh8+jDZt2kiZ9gJAtWrV4O3tjTZt2sDOzg4JCQlYuXIlzMzMsHjxYgBlWwYbNmyIBQsWoFq1aggICEBqaiqGDx8OTU3NL9Lf/wX+Sj1Xtt2UdV1eWwyoprvNzc1Rr149jBo1Cm5ubrh58yYWLlyI/v37Y8+ePVLpbtq0CTk5OUhLS8OmTZtQUFCA0tJSXt9P2bZYWf3Rs2dPzJo1CwMGDMBvv/0GZ2dnnDhxAlFRUbznCJTVN7FYjPnz53OOKbt06YJ3795hxYoVmDx5MudsTlmd9FfesbLs2LEDmpqa6Nq1q8zr37of0qNHDzRo0ACvX7/GoUOH8PLlS6ly/6v6DH/3qF4V8vPzadq0aWRtbU0CgYA3u9G/f39OTjIrdOnSJd79klXnP//8k4jKVmC7dOnCrfIGBwfT77//LjWD5+zsTL169SIiIh8fH5o0aRLdunWLANDjx4/p0KFDvFmoly9f8vIm6ydZEZbkSdaMYmUMGzaM1NTUFDpIk8wCPX78WOqah4cHNWjQgPvb399f5my2ZCW84mqhxIGPop/EeYpIJOKeoTwqWwl3cXGRCu/bty/Z29tzf7u6uirMj8TpRX5+Pv3www/cLFq3bt0oJiaGNzOakpJC1apVIwBUvXp1GjFiBJ0/f15hGRTxvayAfelKV3FxMdWuXZvCwsJ44VCwEl6ROXPmkJ6eXqWO2QoLC8nNzY1n5VCRhQsXylwBqUheXh55eXmRoaEh3blzh3ftn1oJnzdvHllaWkrNyjdt2pSqVq1a6aq9ZBVn27ZtMq+npqaSiYkJeXh4UE5OjsK4JEic2Rw8eFDmdbFYTHZ2dlS7dm2Z11u1asWbuScqW5E2MTGRuSJNpHglXHJNU1NTynHTzJkzCQDP6U95/Pz8yMTEhIqKimRel4WlpSUFBgYqLf9P8b3oAXl8Df3wpSvhp0+fJgC0ePFiZYpCRLJ1jrL1U+I48PTp05yMMivhiuqn5DmNGjWKF/7gwQPS1NTkrf7s3LmTtLW1eQ7LiIj69etHOjo6nKWdKnHKo0WLFjyHR5L22MHBQUq2f//+pKmpyT2nL10JlzjS2rt3r9S18+fPk7q6utRznjFjBgkEArp79y4XlpWVxbXn+P+rkuPGjSNvb28yNDSstOzfiq+hCwoLC+n58+e8X0lJyTdZCVfUFiuiou5+/Pgx6ejoSH03mzdvJgB05MgRhfG9e/eOLCws6Oeff640bVltsSrt+9mzZ8nW1pb7Ng0MDGjLli0EgDp06MDJ6erqymzzJLJnz54lItV00j+9Ep6bm0s6OjpS/QJFfIt+SHkGDx5MNjY2Slkef60+w7/GMduPP/6IOXPmoGvXrti9ezdOnDiB+Ph4mJqaytwzIc+9PP3/TfcCgQB79+5FYmIiRo0axTlSq1+/Pm+mpnHjxkhISEBBQQGSkpLQpEkT1K5dG0ZGRkhISEBCQgL09PRQr149AODy0rt3b8THx8v8lV8FByp3JPG1UJSPitck5Rw7dqzccqq6OqaIyt6nJE916tSRmx+J4xhtbW2cO3cOJ0+eRJ8+fXD79m1069YNLVq04M55dHV1xf3797Fr1y40btwYsbGxaNy4MaZPn/63lelbYGVlJfN4LUlY1apVZd63detW3L9/H0OHDkV6ejr3A4Dc3Fykp6fL9M1QnpCQEOTl5fEckshCS0sLwcHB+OOPP2TO1m7evBkTJkzAsGHDFO6JKioqQufOnXH79m3s378ftWvX5l03MTGBUCj8ouehiKioKDRr1kzKIU1wcDCys7OlnElWxMbGBkCZtUlFnj59iqCgIBgaGuLIkSPc7Hhl+Pr6wsrKinemdnkuXLiAjIwMmbPPaWlpOHbsGIKDg3nhJiYmaNy4sdQ5nsogceRoamoqVberVKkCoMwPREUyMzORkJCA0NBQmdZL8rCxsZH5PBl8voZ+sLKyQmlpqdQxV0VFRXj79q3cNKKjo6GmpoYePXooXR5ZOkfZ+jl+/Hg0adIEDg4OXHkkFlbPnz+XOpMYqLx+njt3DsnJyVJ1ydnZGa6urry6FBUVhXr16klZwwQHByM/P5/be6tKnIqe09WrV7l9opJ3UNH5FVBWP4uLizkLAysrK7x48ULKoVFl30x0dDQMDQ3Rrl07qWtr166FhYWF1L7j4OBgEBHv/PNq1arh/PnzePDgAc6dO4esrCwsXLgQT58+lTpe6r/GxYsXYWVlxfs9ffr0i+s58H8O0OTdL2lXy1NZW6yIirp78+bN+Pz5s9R3I/n+K/vejY2N0axZM7ltYcW0AX5brEr77ufnh7S0NNy4cQPnz5/Hs2fP4OPjA4B/9Jm8+laxLVRFJ/2Vd6wM+/btQ35+vlznqLL41v2QkJAQPH36FOfOnatU9nvsM3xTc/S9e/eib9++WLJkCRf2+fPnSr3sVYaPjw98fHwwZ84c7NixA7169cKuXbs4U48mTZpg06ZN2LVrF0pLS+Hr6ws1NTVucJ6amgpfX1+uIylxMFRaWirTQcTfhZOTE8RiMVJSUuSeq21nZwcAuH//PhwdHbnwoqIiPHny5C/lTxKfpqZmpfE4OTkhOTlZoYyyZumVpSM5N7iy+NTU1BAYGIjAwEAsXboUc+fOxZQpU3DmzBmuPLq6uujWrRu6devGNSJz5szBpEmTOJOefxseHh44c+YMcnJyeM6XJCak8r6lzMxMFBcXS00gAWUd8K1btyIuLo5nylkRyYD648ePleazoKAARITc3FzeBND+/fsxaNAgdO7cGb/99pvc+8ViMcLCwnDq1Cns3r0b/v7+UjJqamqoU6cOrl27JnXt/7V3p8FVlfcfwH8iSUhIQAIIDUhksdJQIBVtdEDQgogSXGHUuoDWrSojtToufeG462i1M9oMKpYyEhe6uM5YFIcR7VgBceo4BsWO4oIsilPFDcTn/4LJNZebYGLtwf79fGZ4wcm555577znPeb73PPf3PPfcczFo0KB2h9yW1q1bl/syp6XmQohfN8y++ScWLX/+EbFtaOjEiRPj888/jyeffDLXGWqvzz77rM33vrGxsc0Kp80FStp6TV/3elrTqVOnqK2tjWXLlsXmzZvzhievWbMmIgpff0TbVVN3JKUUb7zxRu6LUtqWRfvQvI3ly5fH4Ycfnltv+fLl8eWXX7b6HM1VtA866KAOdR5ba3Pae36++eabsXr16hg4cGDBukcccUR07949r//RnvOzI+fSunXrokePHl+7n9/G+bn9+1RVVRV9+/aNd955p2DdNWvWRJcuXXJtY21tbcyZMyeampqipqYmt96Ojpl33303Fi9eHDNmzGh1yOw3aUP32muv3NzHL7/8crz77ru5nwl+X40cOTKeeOKJvGV9+/aN2traePrpp+PLL7/MG5L93HPPRVlZ2Q6/vOjXr1/07t271evm0qVLCz7v9lyL29Ja271u3bpIKRUcH+29vkZsO97b0w9p7Vrc0WNz1113zXtPFi1aFBGR128eNWpUrFq1Kt555528vvr218KOtEn/yWfcHo2NjVFeXl4Qir/OzuyHtLcP+p3tM3zbt9Y7orKyMs2YMSNvWfOQ1JbDQtsamtE89Hvx4sUppW1DUloWbkvpqyF3LecfXLVqVYqItPfee6fa2trc8htuuCHtueeeqbi4uGBI7IwZM1JxcXGrQ25aFjdr3qeW8/i1V0cKs02aNCnvtTY0NKSIwsJsrRUiax6O3nLe2GYHHXRQqqysTGvWrNnh62xPYbampqYUEemWW24pWKetfZs+fXqqrq7O/b95OFLLeSqbffLJJ2nTpk0ppVQwv2NKXw3RaZ6XdPuieiltG2rVqVOndg8Bbum7Mgz1H//4R8Hn+dlnn6UhQ4akurq63LLVq1enpqam3P+bmprSAw88UPAvItLhhx+eHnjggdxxsGHDhoJzK6Wv5uxtWaio5VyZzT744IO0xx57pD322CNv+VNPPZW6dOmSDj744B0WfkkppXPOOafNY6Gl66+/vqC9WLlyZdp1113TxRdf3ObjdjQc/cc//nGqrKzMO4a++OKLNGrUqFRRUZEbRv3+++8XDMXevHlzGj16dCouLs4bQrtp06b005/+NFVUVKTly5e3uV+bNm1KH3/8ccHy5vk5ty+G0/ycPXv2TAceeGCr21y/fn3q1KlTOuigg/I+17feeiuVl5enSZMmtfq4r5snvLlA4h133JFb9umnn6ZBgwalmpqaVh8zYsSINGDAgFaPr+Z93V7zTwu+yXzk37bvSjvQlizah08++SRVVlYWDCs86aSTUllZWavt81//+tcUUTjnbrOOtDntPT8XLlxY8HpmzpyZGxLfcg7r9p6fy5cvL+izpLSt39SpU6d09tln55bV19en4uLi9Morr+Ste9RRR6VOnTqld955p8PbbK293bx5c9pnn31SaWlp3hDb888/P0VEevzxx3PLNmzYkLp165YOP/zw3LK33nqrzXnC+/Xr1+o84TfffHPB59JS8+fW3F9rNmvWrFZ/btjS1q1b0+TJk1NZWVmbP2n5LtiZbcF9991X0PfcsGFD2m233fKKDqeU0muvvZZee+21vGVnn312Ki0tzStytWjRohQRBQXK2nstbm/bfdNNN7X608Xf/e53KSLSfffdl1vW2vH++uuvp4qKirzrXUeuxe1tP9p6jQMGDEgjRozI67c3t5WXXXZZbtnWrVvTmDFjUmVlZa6/05E2qSOfcUvtGY6+fv361Llz53TyySe3+ved3Q9p7VhKaVvxvV122SWtWrVqh+tm2Wf4nynMVl9fH3fffXd07949ampq4tlnn41FixZFz549v9H25s2bFw0NDXH00UfH4MGD46OPPoo777wzunXrlvft/JAhQ6Jv377xyiuvxMyZM3PLx44dmyug0FzArdn1118fixcvjrq6ujjjjDOipqYmNm7cGCtWrIhFixZ9K0MchgwZEr/5zW/iqquuigMPPDCOOeaYKCkpiWXLlkVVVVVcd9110bt377j00kvjiiuuiEmTJsURRxwRr7zySjQ0NMR+++2XV+jsm/j9738fY8aMieHDh8cZZ5wRgwYNinXr1sWzzz4bb7/9dvzzn/+MiIiLLroo/vznP8e0adNyQ/43btwYDz/8cMyePTtGjhwZgwcPjt122y1mz54dFRUV0bVr16irq2v1G7+2nHzyybFgwYI4++yzY/HixTF69OjYunVrrFy5MhYsWBALFy6MfffdN6688spYsmRJTJ48Oaqrq2P9+vXR0NAQ/fv3jzFjxkRExMSJE6Nv374xevTo6NOnTzQ1NcVtt90WkydP/kZ3R78r6urqYtq0aXHppZfG+vXrY8iQITFv3rx444034q677sqtd8opp8RTTz2VG2I4dOjQ3LQw2xs4cGDeHfD58+fH7Nmz46ijjopBgwbFRx99FAsXLownnngipkyZkleo6LDDDov+/ftHXV1d7L777vHmm2/G3LlzY82aNXH//ffn1lu9enUcccQRscsuu8TUqVMLiq+MGDEiRowYERHbpu9oaGiIAw44IMrKymL+/Pl56x599NG56X3OOeecuPPOO2Py5Mlx4YUXRlFRUdx8883Rp0+f+PWvf533uEceeSR3TG/ZsiVefPHFuPrqqyNi27fQzc9/ySWXxEknnRR1dXVx5plnRmlpadx7773x/PPPx9VXX50bRv3www/H1VdfHVOnTo2BAwfGxo0b45577omXXnopV3yp2YknnhhLly6N0047LZqamvLmWC0vL8+9/6tWrYoJEybEcccdF0OHDo1OnTrF8uXLY/78+bHnnnu2Oo3HwoUL4/3332/z7nLv3r3jtNNOizlz5sT48ePjmGOOiY8++igaGhri008/LZjW6e67747Vq1fnhh8vWbIk9z6dfPLJuRE6Z511VsyZMyfOPffcePXVV2PAgAG5xz7yyCMF+/HSSy/Fiy++GJdcckmbI12qq6vjuOOOi+HDh0eXLl3imWeeifvuuy9qa2vbLPTGV7JoH0pLS+Oqq66Kc889N6ZNmxaHHnpoPP300zF//vy45pprorKysmAbjY2NUVJSEscee2yrz9GRNqe95+fEiRMLnqf5LtO4cePyhkq39/wcNWpUHHLIITFv3rz48MMPY+LEifHuu+/GrbfeGqWlpTFr1qzc4y666KJ47LHH4sADD4zzzjsvevbsGY8++mg89thjcfrpp+dGBHRkm2eddVZ8+OGHMXbs2OjXr1+sXbs2GhsbY+XKlfHb3/42b4jtpZdeGgsWLIhjjz02LrjggujevXvMnj07tmzZEtdee21uvf79+8esWbPixhtvjC1btsR+++0XDz74YDz99NPR2NjY6k/JGhsbo6qqqs2pis4777yYO3duTJkyJWbOnBnV1dXx1FNPxb333huHHHJIrlBcxLbCSp999lnU1tbGli1b4p577omlS5fGvHnz8uax5itTp06N/fffP0499dR4+eWXo1evXtHQ0BBbt26NK664Im/d5qm8Wg6zvuyyy+JPf/pTHHzwwXH++efHpk2b4sYbb4zhw4fnFfftyLW4vW33jBkz4qabboqzzjorXnjhhRg2bFisWLEi5syZE8OGDcsrADx8+PAYP3581NbWRo8ePWLVqlVx1113xZYtW+L666/PrdeRa3F724+Ibe3EAQccEEOGDIm1a9fGHXfcEZs2bYpHH3007+70kUceGePHj4/rrrsu3nvvvRg5cmQ8+OCD8cwzz8Ttt9+eGy3SkTapI5/xkiVLckO0N2zYEB9//HHumj127NiCqcLuv//++OKLL9rsM+zsfsg111wTf//732PSpEkxYMCA2LhxY/zlL3+JZcuWxcyZM/N+Kvs/1Wf4tlN9R3zwwQfp1FNPTb169Url5eXp0EMPTStXrkzV1dXf6E74ihUr0gknnJAGDBiQSkpK0u67757q6+tb/RZ72rRpKSLySutv3rw5lZWVpeLi4lanUlq3bl0699xz0x577JGKiopyP/JvedfnP7kT3uwPf/hD+slPfpJKSkpSjx490rhx49ITTzyRt85tt92Whg4dmoqKilKfPn3SL3/5y4IpQb7JnfCUthXJOOWUU1Lfvn1TUVFR6tevX6qvry8omvH++++n8847L/Xr1y8VFxen/v37p+nTp+d9m/jQQw+lmpqa1Llz57xvOtt7JzylbZ/LDTfckIYNG5Z7T0aNGpWuuOKK3HQDTz75ZDryyCNTVVVVKi4uTlVVVemEE05Ir776am47t99+exo7dmzq2bNnKikpSYMHD04XXXTRN56y4Lt0B+zTTz9NF154Yerbt28qKSlJ++23X0GRjuZp4b5OtFJ4admyZWnatGm5c6tr165pn332STfffHNBUbLbbrstjRkzJvXq1St17tw59e7dO02ZMiUtWbIkb73mc6Wtfy3vtk6fPn2H677++ut5237rrbfS1KlTU7du3VJ5eXmqr6/P+6a0Pdvd/lv5v/3tb2ncuHGpV69eqbi4OA0fPrygqNXy5cvTlClTcudEeXl5GjNmTFqwYEHBc1dXV7f53C3PgQ0bNqQzzzwzDR06NHXt2jUVFxenvfbaK82aNavNKfmOP/74VFRU1OodyGZbtmxJt956a6qtrU3l5eWpvLw8HXzwwXkFYpo1Hzut/dv+zta6devS9OnTU2VlZSopKUl1dXVtFoxpntqlrWnuUkrp9NNPTzU1NamioiIVFRWlIUOGpIsvvvgbjV75b/gutQNt+W+3D83uuOOOtPfee6fi4uI0ePDgdMstt7R6N/vf//536tKlSzrmmGPafJ6OtDkpte/8bE1b/Yv2np8pbRsJcOWVV6aamppUWlqaunfvnurr69MLL7xQ8HzPPfdcOuyww3LX1x/+8IfpmmuuKXhN7d3mvffemyZMmJD69OmTOnfunHr06JEmTJiQHnrooVZf77/+9a909NFHp27duqXS0tL0s5/9LC1durRgva1bt6Zrr702VVdXp+Li4jRs2LA0f/78Vre5cuXKFBHpggsuaPXvLdebOnVqrg9VXV2dLrzwwoI7bHPnzk0jR45MXbt2TRUVFWn8+PGttkvfNTu7Ldi4cWP6xS9+kXr27JnKysrSuHHjWi04WF1dXXAMp7RtKqmJEyemsrKytNtuu6UTTzwxrV27Nm+djlyLO9J2v/322+m0005LAwcOTMXFxekHP/hBOuOMMwqucZdffnnad999U48ePVLnzp1TVVVVOv744wuuIR25FqfU/vbjV7/6VRo0aFAqKSlJvXv3Tj//+c9bLZSc0rZCZ+eff37q27dvbpttnUMt7ahYZHs/48svv7xdfatm+++/f9p9991bHeWS0s7vhzz++OOpvr4+VVVVpaKiolRRUZFGjx6d5s6dW3CN2dl9ho5k5l1S2q7yRitWrFgRo0aNiueffz722Wefr1sdvhcaGxvjpJNOcl7A95h2AIjQFgAdy8w7tTo6AAAAfJ/s1N+Ef5+sXbt2h38vLS2N7t27Z7Q3AAAA7AxCeEa+buqh6dOnxx//+MdsdgYAAICdQgjPyPbzOm6vI/OkAgAA8L9JCM/IhAkTdvYuAAAAsJMpzAYAAAAZEcIBAAAgI0I4AAAAZEQIBwAAgIx0qDBbU1PTf2s/4H/O66+/HhHOC/g+0w4AEdoCoGPn/y4ppfR1K7355pvxox/9KD755JP/aMfg/5tdd901tm7durN3A9iJtANAhLYAiCgrK4umpqYYMGDADtdrVwiP2BbE33vvvW9l5+D/i88//zxKSkp29m4AO5F2AIjQFgARvXr1+toAHtGBEA4AAAD8ZxRmAwAAgIwI4QAAAJARIRwAAAAyIoQDAABARoRwAAAAyIgQDgAAABkRwgEAACAjQjgAAABkRAgHAACAjAjhAAAAkBEhHAAAADIihAMAAEBGhHAAAADIiBAOAAAAGRHCAQAAICNCOAAAAGRECAcAAICMCOEAAACQESEcAAAAMiKEAwAAQEaEcAAAAMiIEA4AAAAZEcIBAAAgI0I4AAAAZEQIBwAAgIwI4QAAAJARIRwAAAAyIoQDAABARoRwAAAAyIgQDgAAABkRwgEAACAjQjgAAABkRAgHAACAjAjhAAAAkBEhHAAAADIihAMAAEBGhHAAAADIiBAOAAAAGRHCAQAAICNCOAAAAGRECAcAAICMCOEAAACQESEcAAAAMiKEAwAAQEaEcAAAAMiIEA4AAAAZEcIBAAAgI0I4AAAAZEQIBwAAgIwI4QAAAJARIRwAAAAyIoQDAABARoRwAAAAyIgQDgAAABkRwgEAACAjQjgAAABkRAgHAACAjAjhAAAAkBEhHAAAADIihAMAAEBGhHAAAADIiBAOAAAAGRHCAQAAICNCOAAAAGRECAcAAICMCOEAAACQESEcAAAAMiKEAwAAQEaEcAAAAMiIEA4AAAAZEcIBAAAgI0I4AAAAZEQIBwAAgIwI4QAAAJARIRwAAAAyIoQDAABARoRwAAAAyIgQDgAAABkRwgEAACAjQjgAAABkRAgHAACAjAjhAAAAkBEhHAAAADIihAMAAEBGhHAAAADIiBAOAAAAGRHCAQAAICNCOAAAAGRECAcAAICMCOEAAACQESEcAAAAMiKEAwAAQEaEcAAAAMiIEA4AAAAZEcIBAAAgI0I4AAAAZEQIBwAAgIwI4QAAAJARIRwAAAAyIoQDAABARoRwAAAAyIgQDgAAABkRwgEAACAjQjgAAABkRAgHAACAjAjhAAAAkBEhHAAAADIihAMAAEBGhHAAAADIiBAOAAAAGRHCAQAAICNCOAAAAGRECAcAAICMCOEAAACQESEcAAAAMiKEAwAAQEaEcAAAAMiIEA4AAAAZEcIBAAAgI0I4AAAAZEQIBwAAgIwI4QAAAJARIRwAAAAyIoQDAABARoRwAAAAyIgQDgAAABkRwgEAACAjQjgAAABkRAgHAACAjAjhAAAAkBEhHAAAADIihAMAAEBGhHAAAADIiBAOAAAAGRHCAQAAICNCOAAAAGRECAcAAICMCOEAAACQkf8D+fIw6fVPsSIAAAAASUVORK5CYII=\n"},"metadata":{}}]},{"cell_type":"markdown","metadata":{"id":"qZCUBAPvL7ak"},"source":["# RAGAS Evaluation for chunking strategy"]},{"cell_type":"code","execution_count":24,"metadata":{"id":"3UOpKSFRL7ak","executionInfo":{"status":"ok","timestamp":1727205417584,"user_tz":-60,"elapsed":183202,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}}},"outputs":[],"source":["from langchain.text_splitter import RecursiveCharacterTextSplitter\n","from langchain_experimental.text_splitter import SemanticChunker\n","\n","# Using fine_tuned_embedding_model from before\n","\n","# First Semantic chunker\n","\n","semantic_chunker = SemanticChunker(\n"," embedding_model,\n"," buffer_size=3,\n"," breakpoint_threshold_type=\"percentile\",\n"," breakpoint_threshold_amount=90,\n"," )\n","\n","# second Splitter RecursiveCharacterTextSplitter\n","recursive_text_splitter = RecursiveCharacterTextSplitter(\n"," chunk_size = 1024,\n"," chunk_overlap = 100,\n"," length_function = len,\n",")\n","recursive_chunked_docs = recursive_text_splitter.split_documents(documents)\n","\n","#### Create two rag chains with different chunking strategy\n","# Semantic chunker\n","rag_chain_semantic,retriever_semantic = await create_rag_chain(\n"," fine_tuned_embedding_model,\n"," semantic_chunker,\n"," \"snowflake-arctic-embed-l-ft\"\n",")\n","\n","# Recursive splitter\n","rag_chain_recursive, retriever_recursive = await create_rag_chain(\n"," fine_tuned_embedding_model,\n"," recursive_text_splitter,\n"," \"snowflake-arctic-embed-l-ft\"\n",")\n","\n"]},{"cell_type":"code","source":["# Run evaluation for the semantic chunker based rag chain\n","semantic_df = ragas_evaluate(rag_chain_semantic)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":138,"referenced_widgets":["72584a70d8124b18bc44fe171ac66cb7","af0e6592a8ee461eae997c727ae0696d","3b1cbb8a7f84466fbbb0cf9cada079c6","23ab547479c744768ab08064c13460cf","ac1518d71ead440b974f2bf229355c34","720aeba021664c8aa0ecf11190887e3d","1717ad2782e74fad8a31be5202b255cc","85c4801f23ba4d89a2cfd8326ca2d20b","8d07456aedcd432ab08c24c87a7fbb5f","65faaa51a55d429eb73557af3d7300a1","1de250ea2d4d41e5bcef954fdc324c67"]},"id":"AR9BjnvamCt9","executionInfo":{"status":"ok","timestamp":1727206043209,"user_tz":-60,"elapsed":225207,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"f9a40abc-f3db-4867-96a5-af1b1865d249"},"execution_count":27,"outputs":[{"output_type":"display_data","data":{"text/plain":["Evaluating: 0%| | 0/245 [00:00"],"image/png":"iVBORw0KGgoAAAANSUhEUgAAA90AAAJOCAYAAACqS2TfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB7PUlEQVR4nOzdd3QU5dvG8WsT0iuhJJRIICBFqaEjhCoogijSlSagaAREULHQFCJIU0RRpIkgoGADQYpEqoJ0MIQWeqjSwQSS5/2DN/tjSQIJZIiB7+ecPSc788zMPbOz5crMPGMzxhgBAAAAAIBM55TVBQAAAAAAcK8idAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0A8B/gM1m08CBA7O6jDs2bdo0lShRQi4uLvL398/qcv6zpkyZIpvNpn379mV1Kfe9ffv2yWazacqUKVldyh0JCQlRx44d7c+joqJks9kUFRWVZTXdTPJ74K+//srqUjKkdu3aql27dlaXASCbIXQD+E/Ys2ePXnjhBRUpUkTu7u7y9fVVjRo19NFHH+ny5ctZXR7SYceOHerYsaNCQ0M1YcIEffHFFzdtv3LlSj322GMqUKCA3N3d9cADD6hJkyaaMWPGXarYekOHDtUPP/yQ1WXckYEDB8pms9kfLi4uCgkJUY8ePXTmzJmsLg8ZMGPGDI0ZMybF8CNHjmjgwIHatGnTXa8JAO4HObK6AACYP3++WrRoITc3N7Vv314PP/ywEhIStHLlSvXt21fbt2+/ZYDL7i5fvqwcObL3R3JUVJSSkpL00UcfqWjRojdt++2336pVq1YqV66cevbsqZw5cyo2NlbLly/XhAkT1LZt27tUtbWGDh2qZ555Rs2aNXMY/txzz6l169Zyc3PLmsJuw2effSZvb29dvHhRS5cu1dixY7VhwwatXLkyq0u7I4UKFdLly5fl4uKS1aVkqlq1auny5ctydXW1D5sxY4a2bdumXr16ObQ9cuSIBg0apJCQEJUrV+7uFgoA94Hs/QsPQLYXGxur1q1bq1ChQvrtt9+UL18++7iXX35Zu3fv1vz587OwQuskJSUpISFB7u7ucnd3z+py7tjx48clKV2nlQ8cOFClSpXSH3/84RAKrp/PvczZ2VnOzs5ZXUaGPPPMM8qdO7ck6YUXXlDr1q01a9YsrV27VpUrV87i6hwZY/Tvv//Kw8Pjlm1tNts98f67kZOTU5av18WLF+Xl5ZWlNfxXXP95D+D+w+nlALLU8OHDdeHCBU2cONEhcCcrWrSoevbsaX9+9epVvffeewoNDZWbm5tCQkL01ltvKT4+3mG6kJAQPfHEE4qKilLFihXl4eGh0qVL269vnDt3rkqXLi13d3eFhYVp48aNDtN37NhR3t7e2rt3rxo2bCgvLy/lz59fgwcPljHGoe2IESNUvXp15cqVSx4eHgoLC9N3332XYl1sNpsiIiI0ffp0PfTQQ3Jzc9PChQvt466/pvv8+fPq1auXQkJC5Obmprx586pBgwbasGGDwzy//fZbhYWFycPDQ7lz59azzz6rw4cPp7ouhw8fVrNmzeTt7a08efKoT58+SkxMTOOVcfTpp5/aa86fP79efvllh1OLQ0JCNGDAAElSnjx5bnmN+p49e1SpUqUUgVuS8ubN6/A8KSlJY8aM0UMPPSR3d3cFBgbqhRde0OnTpx3a3elrvmXLFnXs2NF+iUNQUJA6d+6sU6dOObRLPt169+7d6tixo/z9/eXn56dOnTrp0qVL9nY2m00XL17U1KlT7admJ19zm9Y13QsWLFB4eLh8fHzk6+urSpUq3fR0+++++042m02///57inGff/65bDabtm3bJkk6evSoOnXqpIIFC8rNzU358uXTk08+edvXldesWVPStdfyen/++acaNWokPz8/eXp6Kjw8XKtWrUox/eHDh/X8888rf/78cnNzU+HChdW9e3clJCRI+t92vlFq2y75tf/111/tr/3nn38uSVq8eLEeeeQR+fv7y9vbW8WLF9dbb71ln/bGa7pHjBghm82m/fv3p1h2v3795Orq6rDvpXd9UzN27Fg99NBD8vT0VM6cOVWxYkWH1zt5G+zYsUMtW7aUr6+vcuXKpZ49e+rff/+96bxvvKa7du3amj9/vvbv32/fH0NCQhQVFaVKlSpJkjp16mQfd/017ulZx+Ra//77b7Vt21Y5c+bUI488csttcOnSJb3wwgvKlSuXfH191b59+xTv7R9//FGNGze27yuhoaF67733Unx+7dq1S82bN1dQUJDc3d1VsGBBtW7dWmfPnnVo9/XXX9s/NwMCAtS6dWsdPHgwRW1ffPGFQkND5eHhocqVK2vFihW3XJ9kN/u8T+93xuXLl9WjRw/lzp1bPj4+atq0qQ4fPnzP9AEC3E840g0gS/38888qUqSIqlevnq72Xbp00dSpU/XMM8/otdde059//qnIyEhFR0fr+++/d2i7e/dutW3bVi+88IKeffZZjRgxQk2aNNH48eP11ltv6aWXXpIkRUZGqmXLloqJiZGT0//+F5mYmKhGjRqpatWqGj58uBYuXKgBAwbo6tWrGjx4sL3dRx99pKZNm6pdu3ZKSEjQzJkz1aJFC82bN0+NGzd2qOm3337T7NmzFRERody5cyskJCTV9XzxxRf13XffKSIiQqVKldKpU6e0cuVKRUdHq0KFCpKuhY9OnTqpUqVKioyM1LFjx/TRRx9p1apV2rhxo8MR58TERDVs2FBVqlTRiBEjtGTJEo0cOVKhoaHq3r37Tbf5wIEDNWjQINWvX1/du3dXTEyMPvvsM61bt06rVq2Si4uLxowZo6+++krff/+9/TTkMmXKpDnPQoUKaenSpTp06JAKFix40+W/8MIL9nXt0aOHYmNj9cknn2jjxo325Se7k9d88eLF2rt3rzp16qSgoCD7ZQ3bt2/XH3/8kSIAtmzZUoULF1ZkZKQ2bNigL7/8Unnz5tWwYcMkXetUrkuXLqpcubK6desmSQoNDU1zPadMmaLOnTvroYceUr9+/eTv76+NGzdq4cKFaZ5u37hxY3l7e2v27NkKDw93GDdr1iw99NBDevjhhyVJzZs31/bt2/XKK68oJCREx48f1+LFi3XgwIE098ObSQ69OXPmtA/77bff9NhjjyksLEwDBgyQk5OTJk+erLp162rFihX2I+JHjhxR5cqVdebMGXXr1k0lSpTQ4cOH9d133+nSpUup/jPmVmJiYtSmTRu98MIL6tq1q4oXL67t27friSeeUJkyZTR48GC5ublp9+7dNw3FLVu21Ouvv67Zs2erb9++DuNmz56tRx991L7O6V3f1EyYMEE9evTQM888Yw/RW7Zs0Z9//pni9W7ZsqVCQkIUGRmpP/74Qx9//LFOnz6tr776Kt3b5+2339bZs2d16NAhjR49WpLk7e2tkiVLavDgwerfv7+6detm/2dK8mdyRtexRYsWKlasmIYOHZriH5SpiYiIkL+/vwYOHGj/bNm/f7/9nwbStfeGt7e3evfuLW9vb/3222/q37+/zp07pw8//FCSlJCQoIYNGyo+Pl6vvPKKgoKCdPjwYc2bN09nzpyRn5+fJGnIkCF699131bJlS3Xp0kUnTpzQ2LFjVatWLYfPzYkTJ+qFF15Q9erV1atXL+3du1dNmzZVQECAgoOD07XN0/q8T+93RseOHTV79mw999xzqlq1qn7//fcU3ykAsgkDAFnk7NmzRpJ58skn09V+06ZNRpLp0qWLw/A+ffoYSea3336zDytUqJCRZFavXm0f9uuvvxpJxsPDw+zfv98+/PPPPzeSzLJly+zDOnToYCSZV155xT4sKSnJNG7c2Li6upoTJ07Yh1+6dMmhnoSEBPPwww+bunXrOgyXZJycnMz27dtTrJskM2DAAPtzPz8/8/LLL6e5LRISEkzevHnNww8/bC5fvmwfPm/ePCPJ9O/fP8W6DB482GEe5cuXN2FhYWkuwxhjjh8/blxdXc2jjz5qEhMT7cM/+eQTI8lMmjTJPmzAgAFGksO2ScvEiRONJOPq6mrq1Klj3n33XbNixQqHZRhjzIoVK4wkM336dIfhCxcuTDH8Tl/zG19HY4z55ptvjCSzfPnyFOvZuXNnh7ZPPfWUyZUrl8MwLy8v06FDhxTznTx5spFkYmNjjTHGnDlzxvj4+JgqVao4vJ7GXNvvbqZNmzYmb9685urVq/ZhcXFxxsnJyf6anz592kgyH3744U3nlZrk9Y2JiTEnTpww+/btM5MmTTIeHh4mT5485uLFi/Y6ixUrZho2bOhQ86VLl0zhwoVNgwYN7MPat29vnJyczLp161IsL3na5OXe6MZtZ8z/XvuFCxc6tB09evQt98nY2FgjyUyePNk+rFq1aineG2vXrjWSzFdffZXh9U3Nk08+aR566KGbtkneBk2bNnUY/tJLLxlJZvPmzfZhhQoVctjXli1blmIfb9y4sSlUqFCK5axbty7FNsjoOibX2qZNm5uuU7Lk1zEsLMwkJCTYhw8fPtxIMj/++KPD8m70wgsvGE9PT/Pvv/8aY4zZuHGjkWS+/fbbNJe5b98+4+zsbIYMGeIwfOvWrSZHjhz24cmfr+XKlTPx8fH2dl988YWRZMLDw2+5fjf7vE/Pd8b69euNJNOrVy+Hth07dkzxfQHgv4/TywFkmXPnzkmSfHx80tX+l19+kST17t3bYfhrr70mSSmu/S5VqpSqVatmf16lShVJUt26dfXAAw+kGL53794Uy4yIiLD/nXy6YEJCgpYsWWIffv11o6dPn9bZs2dVs2bNFKeCS1J4eLhKlSp1izW9dl30n3/+qSNHjqQ6/q+//tLx48f10ksvOVwj2LhxY5UoUSLV6+BffPFFh+c1a9ZMdZ2vt2TJEiUkJKhXr14OZwF07dpVvr6+t329fefOnbVw4ULVrl1bK1eu1HvvvaeaNWuqWLFiWr16tb3dt99+Kz8/PzVo0EAnT560P8LCwuTt7a1ly5Y5zPdOXvPrX8d///1XJ0+eVNWqVSUp1dcyte156tQp+36dEYsXL9b58+f15ptvprjmM7VTrK/XqlUrHT9+3OHWUN99952SkpLUqlUrSdfWzdXVVVFRUSlO3U2v4sWLK0+ePAoJCVHnzp1VtGhRLViwQJ6enpKkTZs2adeuXWrbtq1OnTplf60uXryoevXqafny5UpKSlJSUpJ++OEHNWnSRBUrVkyxnFutb1oKFy6shg0bOgxLPmr5448/KikpKd3zatWqldavX+9w6vysWbPk5uamJ598UlL61zct/v7+OnTokNatW3fLel5++WWH56+88oqk/30mWuV21vHG98WtdOvWzeFsle7duytHjhwO63b9e/P8+fM6efKkatasqUuXLmnHjh2SZD+S/euvvzpc5nG9uXPnKikpSS1btnT4PAkKClKxYsXsnyfJn68vvviiw1kXHTt2tC8nPdL6vE/Pd0byqejJZ+ckS37tAWQvhG4AWcbX11fStR9R6bF//345OTml6Bk7KChI/v7+Ka7BvD5kSf/7UXbjqYHJw28MI05OTipSpIjDsAcffFCSHK4nnTdvnqpWrSp3d3cFBAQoT548+uyzz1JcRyhdCwbpMXz4cG3btk3BwcGqXLmyBg4c6BAQk9e1ePHiKaYtUaJEim3h7u6uPHnyOAzLmTPnLQNYWstxdXVVkSJFUr3uNb0aNmyoX3/9VWfOnNHy5cv18ssva//+/XriiSfsnant2rVLZ8+eVd68eZUnTx6Hx4ULF1J0unYnr/k///yjnj17KjAwUB4eHsqTJ4/99UrttbxxWcmnHN9OqE0Od8mngmdE8rW2s2bNsg+bNWuWypUrZ99f3dzcNGzYMC1YsECBgYGqVauWhg8frqNHj6Z7OXPmzNHixYs1Y8YMVa1aVcePH3cID7t27ZIkdejQIcVr9eWXXyo+Pl5nz57ViRMndO7cudta15tJ7b3VqlUr1ahRQ126dFFgYKBat26t2bNn3zKAt2jRQk5OTvZtaozRt99+q8cee8z+uZXe9U3LG2+8IW9vb1WuXFnFihXTyy+/nOZp78WKFXN4HhoaKicnJ8vv834765jez7hkN66bt7e38uXL57Bu27dv11NPPSU/Pz/5+voqT548evbZZyX9771ZuHBh9e7dW19++aVy586thg0baty4cQ717dq1S8YYFStWLMX6REdH2z9Pkj/XbqzNxcUlxXfCzaS1LdLznZH8fXfjPG51ZwgA/01c0w0gy/j6+ip//vz2jp7SK71HwtLqHTqt4SYd1x/eaMWKFWratKlq1aqlTz/9VPny5ZOLi4smT56cagdY6elNWbp2DWfNmjX1/fffa9GiRfrwww81bNgwzZ07V4899liG6/wv95Tt6empmjVrqmbNmsqdO7cGDRqkBQsWqEOHDkpKSlLevHk1ffr0VKe98R8Jd/Kat2zZUqtXr1bfvn1Vrlw5eXt7KykpSY0aNUo1pGXmfnQn3Nzc1KxZM33//ff69NNPdezYMa1atUpDhw51aNerVy81adJEP/zwg3799Ve9++67ioyM1G+//aby5cvfcjm1atWy917epEkTlS5dWu3atdP69evl5ORk30Yffvhhmred8vb21j///JOu9UrrfZ5W53+pvbc8PDy0fPlyLVu2TPPnz9fChQs1a9Ys1a1bV4sWLUrzNcyfP79q1qyp2bNn66233tIff/yhAwcO2K/Xl5Tu9U1LyZIlFRMTo3nz5mnhwoWaM2eOPv30U/Xv31+DBg1Kczrp9s8GyKjbWcf0fsal15kzZxQeHi5fX18NHjxYoaGhcnd314YNG/TGG284vDdHjhypjh076scff9SiRYvUo0cP+3XwBQsWVFJSkmw2mxYsWJDqa3+z1+t2pLYtMvqdAeDeQOgGkKWeeOIJffHFF1qzZo3DacGpKVSokJKSkrRr1y6VLFnSPvzYsWM6c+aMChUqlKm1JSUlae/evfajhZK0c+dOSbJ3iDNnzhy5u7vr119/dbjn8uTJk+94+fny5dNLL72kl156ScePH1eFChU0ZMgQPfbYY/Z1jYmJUd26dR2mi4mJybRtcf1yrj/Ck5CQoNjYWNWvXz9TlpMs+XTjuLg4SdeO6C1ZskQ1atTI9B/z1zt9+rSWLl2qQYMGqX///vbhyUf6bld6w1FyB2vbtm27rSNZrVq10tSpU7V06VJFR0fLGGM/tfzG5bz22mt67bXXtGvXLpUrV04jR47U119/naHleXt7a8CAAerUqZNmz56t1q1b29fB19f3pvtFnjx55Ovre8t/tiWfOXDmzBmHTgEzenaFk5OT6tWrp3r16mnUqFEaOnSo3n77bS1btuymdbZq1UovvfSSYmJiNGvWLHl6eqpJkyb28eld35vx8vJSq1at1KpVKyUkJOjpp5/WkCFD1K9fP4fLDHbt2uVwxHP37t1KSkrKcAd4ae2PaQ3PjHW8lV27dqlOnTr25xcuXFBcXJwef/xxSdd6YT916pTmzp2rWrVq2dvFxsamOr/SpUurdOnSeuedd7R69WrVqFFD48eP1/vvv6/Q0FAZY1S4cGGHz/UbJX/u7dq1y+Hz9cqVK4qNjVXZsmVve33T+52R/H0XGxvrcMR99+7dt71sAFmH08sBZKnXX39dXl5e6tKli44dO5Zi/J49e/TRRx9Jkv1H2JgxYxzajBo1SpIs6dX1k08+sf9tjNEnn3wiFxcX1atXT9K1o502m83h6Nu+ffv0ww8/3PYyExMTU5yymTdvXuXPn99+a7SKFSsqb968Gj9+vMPt0hYsWKDo6OhM2xb169eXq6urPv74Y4cjuBMnTtTZs2dvezlLly5NdXjydZzJp7O3bNlSiYmJeu+991K0vXr1qsNty+5E8lGvG49S37ivZZSXl1e6anz00Ufl4+OjyMjIFLeCSs+R8/r16ysgIECzZs3SrFmzVLlyZYeQdunSpRTzDQ0NlY+PT4rb7aVXu3btVLBgQfvR37CwMIWGhmrEiBG6cOFCivYnTpyQdC0EN2vWTD///LP++uuvFO2S1zc58C1fvtw+LvkWbOmV2lH15CO2t1rv5s2by9nZWd98842+/fZbPfHEEw73nE7v+qblxlvRubq6qlSpUjLG6MqVKw7jxo0b5/B87NixkpThs168vLxSPeU9eb1u3FfvdB3T44svvnBY388++0xXr161r1tq782EhAR9+umnDvM5d+6crl696jCsdOnScnJysr/WTz/9tJydnTVo0KAU7ytjjP01qVixovLkyaPx48fbb2EnXetF/U4/c9L7nZHcP8GN65n82gPIXjjSDSBLhYaGasaMGWrVqpVKliyp9u3b6+GHH1ZCQoJWr16tb7/91n5v47Jly6pDhw764osv7Kccrl27VlOnTlWzZs0cjpZkBnd3dy1cuFAdOnRQlSpVtGDBAs2fP19vvfWW/bTmxo0ba9SoUWrUqJHatm2r48ePa9y4cSpatKi2bNlyW8s9f/68ChYsqGeeeUZly5aVt7e3lixZonXr1mnkyJGSrl1bOGzYMHXq1Enh4eFq06aN/ZZhISEhevXVVzNlG+TJk0f9+vXToEGD1KhRIzVt2lQxMTH69NNPValSJft1lRn15JNPqnDhwmrSpIlCQ0N18eJFLVmyRD///LMqVapkP6IYHh6uF154QZGRkdq0aZMeffRRubi4aNeuXfr222/10Ucf6Zlnnrnj9fT19bVf53zlyhUVKFBAixYtSvNoWnqFhYVpyZIlGjVqlPLnz6/ChQvbO3G7cfmjR49Wly5dVKlSJft9jjdv3qxLly7dMmi6uLjo6aef1syZM3Xx4kWNGDHCYfzOnTtVr149tWzZUqVKlVKOHDn0/fff69ixY2rduvVtrZuLi4t69uypvn37auHChWrUqJG+/PJLPfbYY3rooYfUqVMnFShQQIcPH9ayZcvk6+urn3/+WZI0dOhQLVq0SOHh4erWrZtKliypuLg4ffvtt1q5cqX8/f316KOP6oEHHtDzzz+vvn37ytnZWZMmTVKePHl04MCBdNU4ePBgLV++XI0bN1ahQoV0/PhxffrppypYsOAt7yGdN29e1alTR6NGjdL58+dTnDng5OSU7vVNzaOPPqqgoCDVqFFDgYGBio6O1ieffKLGjRun6FwyNjZWTZs2VaNGjbRmzRp9/fXXatu2bYaPuIaFhWnWrFnq3bu3KlWqJG9vb/t70N/fX+PHj5ePj4+8vLxUpUoVFS5c+I7WMT0SEhLs+2byZ8sjjzyipk2bSrp267KcOXOqQ4cO6tGjh2w2m6ZNm5YiNP/222+KiIhQixYt9OCDD+rq1auaNm2anJ2d1bx5c0nXvm/ef/999evXT/v27VOzZs3k4+Oj2NhYff/99+rWrZv69OkjFxcXvf/++3rhhRdUt25dtWrVSrGxsZo8eXKGrulOTXq/M8LCwtS8eXONGTNGp06dst8yLPlsq7t1iQGATHLX+0sHgFTs3LnTdO3a1YSEhBhXV1fj4+NjatSoYcaOHWu/JYwxxly5csUMGjTIFC5c2Li4uJjg4GDTr18/hzbGXLt9TuPGjVMsR1KKW3El3zLo+tspdejQwXh5eZk9e/aYRx991Hh6eprAwEAzYMCAFLe1mjhxoilWrJhxc3MzJUqUMJMnT071dkepLfv6ccm3gImPjzd9+/Y1ZcuWNT4+PsbLy8uULVvWfPrppymmmzVrlilfvrxxc3MzAQEBpl27dubQoUMObZLX5UZp3ZIpNZ988okpUaKEcXFxMYGBgaZ79+7m9OnTqc4vPbcM++abb0zr1q1NaGio8fDwMO7u7qZUqVLm7bffNufOnUvR/osvvjBhYWHGw8PD+Pj4mNKlS5vXX3/dHDlyxN7mTl/zQ4cOmaeeesr4+/sbPz8/06JFC3PkyJEUt+dJaz1Tu5XVjh07TK1atYyHh4eRZL+lU2ptjTHmp59+MtWrVzceHh7G19fXVK5c2XzzzTe32pzGGGMWL15sJBmbzWYOHjzoMO7kyZPm5ZdfNiVKlDBeXl7Gz8/PVKlSxcyePfuW873Z63r27Fnj5+fncAuljRs3mqefftrkypXLuLm5mUKFCpmWLVuapUuXOky7f/9+0759e5MnTx7j5uZmihQpYl5++WWHWzStX7/eVKlSxbi6upoHHnjAjBo1Ks1bhqX22i9dutQ8+eSTJn/+/MbV1dXkz5/ftGnTxuzcudPeJrVbhiWbMGGCkWR8fHxS3Moto+t7o88//9zUqlXLPl1oaKjp27evOXv2rL1N8rb/+++/zTPPPGN8fHxMzpw5TURERIp60nPLsAsXLpi2bdsaf39/I8nh9mE//vijKVWqlMmRI0eK7ZGedczI+9+Y/70Hfv/9d9OtWzeTM2dO4+3tbdq1a2dOnTrl0HbVqlWmatWqxsPDw+TPn9+8/vrr9tsBJq/f3r17TefOnU1oaKhxd3c3AQEBpk6dOmbJkiUplj1nzhzzyCOPGC8vL+Pl5WVKlChhXn75ZRMTE+PQ7tNPPzWFCxc2bm5upmLFimb58uUmPDw83bcMS+vzPr3fGRcvXjQvv/yyCQgIMN7e3qZZs2YmJibGSDIffPDBLWsA8N9hM+Yu9/gCANlAx44d9d1336V6SiUA3A0DBw7UoEGDdOLECXsndri/bdq0SeXLl9fXX3+tdu3aZXU5ANKJa7oBAACA/5jLly+nGDZmzBg5OTk5dCoH4L+Pa7oBAACA/5jhw4dr/fr1qlOnjnLkyKEFCxZowYIF6tatm4KDg7O6PAAZQOgGAAAA/mOqV6+uxYsX67333tOFCxf0wAMPaODAgXr77bezujQAGcQ13QAAAAAAWIRrugEAAAAAsAihGwAAAAAAi9x313QnJSXpyJEj8vHxkc1my+pyAAAAAADZkDFG58+fV/78+eXklPbx7PsudB85coQeHwEAAAAAmeLgwYMqWLBgmuPvu9Dt4+Mj6dqG8fX1zeJqAAAAAADZ0blz5xQcHGzPmGm570J38inlvr6+hG4AAAAAwB251WXLdKQGAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEXuu2u6AQAAANxdSUlJSkhIyOoygAxxcXGRs7PzHc+H0A0AAADAMgkJCYqNjVVSUlJWlwJkmL+/v4KCgm7ZWdrNELoBAAAAWMIYo7i4ODk7Oys4OFhOTlzdiuzBGKNLly7p+PHjkqR8+fLd9rwI3QAAAAAscfXqVV26dEn58+eXp6dnVpcDZIiHh4ck6fjx48qbN+9tn2rOv5oAAAAAWCIxMVGS5OrqmsWVALcn+Z9FV65cue15ELoBAAAAWOpOrocFslJm7LuEbgAAAAAALELoBgAAAIB73L59+2Sz2bRp0yZLlzNw4ECVK1fO0mVIUlRUlGw2m86cOWP5su4UHakBAAAAuKvmxsTd1eU9XTxjPU+fOHFC/fv31/z583Xs2DHlzJlTZcuWVf/+/VWjRg2Lqsw8HTt21JkzZ/TDDz/YhwUHBysuLk65c+e+o3nPmTNHY8eO1caNG5WYmKgiRYromWeeUUREhAICAu6w8nsTR7oBAAAA4DrNmzfXxo0bNXXqVO3cuVM//fSTateurVOnTmV1abfN2dlZQUFBypHj9o+7vv3222rVqpUqVaqkBQsWaNu2bRo5cqQ2b96sadOmZWK1WeNOOku7GUI3AAAAAPy/M2fOaMWKFRo2bJjq1KmjQoUKqXLlyurXr5+aNm3q0K5Lly7KkyePfH19VbduXW3evNk+Pvk060mTJumBBx6Qt7e3XnrpJSUmJmr48OEKCgpS3rx5NWTIEIfljxo1SqVLl5aXl5eCg4P10ksv6cKFC/bxU6ZMkb+/v3799VeVLFlS3t7eatSokeLi4uzLnTp1qn788UfZbDbZbDZFRUWlenr59u3b9cQTT8jX11c+Pj6qWbOm9uzZk+p2Wbt2rYYOHaqRI0fqww8/VPXq1RUSEqIGDRpozpw56tChg0P7adOmKSQkRH5+fmrdurXOnz9vHxcSEqIxY8Y4tC9XrpwGDhxof26z2fTll1/qqaeekqenp4oVK6affvopzdft0qVLeuyxx1SjRg37KedffvmlSpYsKXd3d5UoUUKffvqpvX3y9pg1a5bCw8Pl7u6u6dOnpzn/O0HoBgAAAID/5+3tLW9vb/3www+Kj49Ps12LFi10/PhxLViwQOvXr1eFChVUr149/fPPP/Y2e/bs0YIFC7Rw4UJ98803mjhxoho3bqxDhw7p999/17Bhw/TOO+/ozz//tE/j5OSkjz/+WNu3b9fUqVP122+/6fXXX3dY9qVLlzRixAhNmzZNy5cv14EDB9SnTx9JUp8+fdSyZUt7EI+Li1P16tVT1H/48GHVqlVLbm5u+u2337R+/Xp17txZV69eTXV9p0+fbv/HQWr8/f0d1vuHH37QvHnzNG/ePP3+++/64IMP0tyWaRk0aJBatmypLVu26PHHH1e7du0ctm+yM2fOqEGDBkpKStLixYvl7++v6dOnq3///hoyZIiio6M1dOhQvfvuu5o6darDtG+++aZ69uyp6OhoNWzYMMM1pgfXdAMAAADA/8uRI4emTJmirl27avz48apQoYLCw8PVunVrlSlTRpK0cuVKrV27VsePH5ebm5skacSIEfrhhx/03XffqVu3bpKkpKQkTZo0ST4+PipVqpTq1KmjmJgY/fLLL3JyclLx4sU1bNgwLVu2TFWqVJEk9erVy15LSEiI3n//fb344osOR2mvXLmi8ePHKzQ0VJIUERGhwYMHS7r2TwMPDw/Fx8crKCgozfUcN26c/Pz8NHPmTLm4uEiSHnzwwTTb79q1S0WKFLG3vZmkpCRNmTJFPj4+kqTnnntOS5cuTXFU/1Y6duyoNm3aSJKGDh2qjz/+WGvXrlWjRo3sbY4ePapWrVqpWLFimjFjhv2e8AMGDNDIkSP19NNPS5IKFy6sv//+W59//rnDUflevXrZ21iFI90AAAAAcJ3mzZvryJEj+umnn9SoUSNFRUWpQoUKmjJliiRp8+bNunDhgnLlymU/Mu7t7a3Y2FiH07NDQkLswVOSAgMDVapUKTk5OTkMO378uP35kiVLVK9ePRUoUEA+Pj567rnndOrUKV26dMnextPT0x64JSlfvnwO80iPTZs2qWbNmukK0ZJkjEn3vG9c79upT5L9nxyS5OXlJV9f3xTzadCggYoWLapZs2bZA/fFixe1Z88ePf/88w6vz/vvv5/i9PmKFStmuK6M4kg3AAAAANzA3d1dDRo0UIMGDfTuu++qS5cuGjBggDp27KgLFy4oX758ioqKSjHd9adZ3xhobTZbqsOSkpIkXbvO+IknnlD37t01ZMgQBQQEaOXKlXr++eeVkJAgT0/PNOebkVAsSR4eHhlq/+CDD2rlypW6cuXKLYP6zdZRunYK/Y31ptaJ2a3mI0mNGzfWnDlz9Pfff6t06dKSZL8GfsKECfYzCJI5Ozs7PPfy8rrpumQGjnQDAAAAwC2UKlVKFy9elCRVqFBBR48eVY4cOVS0aFGHx53ckmv9+vVKSkrSyJEjVbVqVT344IM6cuRIhufj6uqqxMTEm7YpU6aMVqxYke4eu9u2basLFy44nOZ+vYzcLztPnjz2jt8k6dy5c4qNjU339Nf74IMP1KFDB9WrV09///23pGtnD+TPn1979+5N8foULlz4tpZzJwjdAAAAAPD/Tp06pbp16+rrr7/Wli1bFBsbq2+//VbDhw/Xk08+KUmqX7++qlWrpmbNmmnRokXat2+fVq9erbffflt//fXXbS+7aNGiunLlisaOHau9e/dq2rRpGj9+fIbnExISoi1btigmJkYnT55MNVhHRETo3Llzat26tf766y/t2rVL06ZNU0xMTKrzrFKlil5//XW99tprev3117VmzRrt379fS5cuVYsWLVJ0UHYzdevW1bRp07RixQpt3bpVHTp0SHEEOiNGjBihdu3aqW7dutqxY4eka52wRUZG6uOPP9bOnTu1detWTZ48WaNGjbrt5dwuTi8HAAAAgP/n7e2tKlWqaPTo0dqzZ4+uXLmi4OBgde3aVW+99Zaka6c5//LLL3r77bfVqVMnnThxQkFBQapVq5YCAwNve9lly5bVqFGjNGzYMPXr10+1atVSZGSk2rdvn6H5dO3aVVFRUapYsaIuXLigZcuWKSQkxKFNrly59Ntvv6lv374KDw+Xs7OzypUrpxo1aqQ532HDhiksLEzjxo3T+PHjlZSUpNDQUD3zzDMpbhl2M/369VNsbKyeeOIJ+fn56b333rvtI93JRo8ercTERNWtW1dRUVHq0qWLPD099eGHH6pv377y8vJS6dKlHTqqu1tsJqMn/2dz586dk5+fn86ePStfX9+sLgcAgP+sj05/lNUlZImeOXtmdQnAPePff/9VbGysChcuLHd396wuB8iwm+3D6c2WnF4OAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAABwj7DZbPrhhx+yuoxbmjJlivz9/e3PBw4cqHLlymVZPVbKkdUFAAAAALi/fHT6o7u6vJ45e2aofceOHTV16lRJUo4cOVSwYEG1aNFCgwcPlru7uxUlZpq4uDjlzJnT0mVs3rxZ7777rv744w+dO3dOQUFBqlKlisaOHau8efPe1jz79OmjV155xf68Y8eOOnPmjMM/EPbt26fChQtr48aN2SqgE7oBAAAA4AaNGjXS5MmTdeXKFa1fv14dOnSQzWbTsGHDsqymhIQEubq63rRNUFCQpTWcOHFC9erV0xNPPKFff/1V/v7+2rdvn3766SddvHjxtufr7e0tb2/vTKz05q5cuSIXF5e7sixOLwcAAACAG7i5uSkoKEjBwcFq1qyZ6tevr8WLF9vHJyUlKTIyUoULF5aHh4fKli2r7777zmEe27dv1xNPPCFfX1/5+PioZs2a2rNnjySpdu3a6tWrl0P7Zs2aqWPHjvbnISEheu+999S+fXv5+vqqW7duSkhIUEREhPLlyyd3d3cVKlRIkZGR9mmuP728evXqeuONNxyWceLECbm4uGj58uWSpPj4ePXp00cFChSQl5eXqlSpoqioqDS3y6pVq3T27Fl9+eWXKl++vAoXLqw6depo9OjRKly4sCQpKipKNptN8+fPV5kyZeTu7q6qVatq27Ztac73+tPLBw4cqKlTp+rHH3+UzWaTzWZTVFSUff7ly5eXzWZT7dq17dN/+eWXKlmypNzd3VWiRAl9+umn9nH79u2TzWbTrFmzFB4eLnd3d02fPj3NWjIboRsAAAAAbmLbtm1avXq1w1HmyMhIffXVVxo/fry2b9+uV199Vc8++6x+//13SdLhw4dVq1Ytubm56bffftP69evVuXNnXb16NUPLHjFihMqWLauNGzfq3Xff1ccff6yffvpJs2fPVkxMjKZPn66QkJBUp23Xrp1mzpwpY4x92KxZs5Q/f37VrFlTkhQREaE1a9Zo5syZ2rJli1q0aKFGjRpp165dqc4zKChIV69e1ffff+8w39T07dtXI0eO1Lp165QnTx41adJEV65cueU69+nTRy1btlSjRo0UFxenuLg4Va9eXWvXrpUkLVmyRHFxcZo7d64kafr06erfv7+GDBmi6OhoDR06VO+++679EoFkb775pnr27Kno6Gg1bNjwlnVkFk4vBwAAAIAbzJs3T97e3rp69ari4+Pl5OSkTz75RNK1o8NDhw7VkiVLVK1aNUlSkSJFtHLlSn3++ecKDw/XuHHj5Ofnp5kzZ9pPY37wwQczXEfdunX12muv2Z8fOHBAxYoV0yOPPCKbzaZChQqlOW3Lli3Vq1cvrVy50h6yZ8yYoTZt2shms+nAgQOaPHmyDhw4oPz580u6FngXLlyoyZMna+jQoSnmWbVqVb311ltq27atXnzxRVWuXFl169ZV+/btFRgY6NB2wIABatCggSRp6tSpKliwoL7//nu1bNnypuvs7e0tDw8PxcfHO5wunydPHklSrly5HIYPGDBAI0eO1NNPPy1JKly4sP7++299/vnn6tChg71dr1697G3uJkI3AAAAANygTp06+uyzz3Tx4kWNHj1aOXLkUPPmzSVJu3fv1qVLl+yBMllCQoLKly8vSdq0aZNq1qx5x9cNV6xY0eF5x44d1aBBAxUvXlyNGjXSE088oUcffTTVafPkyaNHH31U06dPV82aNRUbG6s1a9bo888/lyRt3bpViYmJKf4ZEB8fr1y5cqVZ05AhQ9S7d2/99ttv+vPPPzV+/HgNHTpUy5cvV+nSpe3tkv8hIUkBAQEqXry4oqOjM7wNbubixYvas2ePnn/+eXXt2tU+/OrVq/Lz83Noe+O2vFsI3QAAAABwAy8vLxUtWlSSNGnSJJUtW1YTJ07U888/rwsXLkiS5s+frwIFCjhM5+bmJkny8PC46fydnJxSnJ6d2qnXXl5eDs8rVKig2NhYLViwQEuWLFHLli1Vv379FNeTJ2vXrp169OihsWPHasaMGSpdurQ9GF+4cEHOzs5av369nJ2dHaa7VadmuXLlUosWLdSiRQsNHTpU5cuX14gRI1Kc0m215NdiwoQJqlKlisO4G9fpxm15txC6AQAAAOAmnJyc9NZbb6l3795q27atSpUqJTc3Nx04cEDh4eGpTlOmTBlNnTo1zV6y8+TJo7i4OPvzxMREbdu2TXXq1LllPb6+vmrVqpVatWqlZ555Ro0aNdI///yjgICAFG2ffPJJdevWTQsXLtSMGTPUvn17+7jy5csrMTFRx48ft59+fjtcXV0VGhqaovfyP/74Qw888IAk6fTp09q5c6dKliyZ7nkmJiamGCbJYXhgYKDy58+vvXv3ql27dre9DlaiIzUAAAAAuIUWLVrI2dlZ48aNk4+Pj/r06aNXX31VU6dO1Z49e7RhwwaNHTvWfqQ3IiJC586dU+vWrfXXX39p165dmjZtmmJiYiRdu1Z7/vz5mj9/vnbs2KHu3bvrzJkzt6xj1KhR+uabb7Rjxw7t3LlT3377rYKCguTv759qey8vLzVr1kzvvvuuoqOj1aZNG/u4Bx98UO3atVP79u01d+5cxcbGau3atYqMjNT8+fNTnd+8efP07LPPat68edq5c6diYmI0YsQI/fLLL3ryyScd2g4ePFhLly7Vtm3b1LFjR+XOnVvNmjW79cbWtZ7bt2zZopiYGJ08eVJXrlxR3rx55eHhoYULF+rYsWM6e/asJGnQoEGKjIzUxx9/rJ07d2rr1q2aPHmyRo0ala5lWY3QDQAAAAC3kCNHDkVERGj48OG6ePGi3nvvPb377ruKjIxUyZIl1ahRI82fP99+W6tcuXLpt99+04ULFxQeHq6wsDBNmDDBftS7c+fO6tChg9q3b6/w8HAVKVIkXUe5fXx8NHz4cFWsWFGVKlXSvn379Msvv8jJKe1o165dO23evFk1a9a0H3lONnnyZLVv316vvfaaihcvrmbNmmndunUp2iUrVaqUPD099dprr6lcuXKqWrWqZs+erS+//FLPPfecQ9sPPvhAPXv2VFhYmI4ePaqff/75lvcZT9a1a1cVL15cFStWVJ48ebRq1SrlyJFDH3/8sT7//HPlz5/fHvK7dOmiL7/8UpMnT1bp0qUVHh6uKVOm2F+LrGYzt+rn/R5z7tw5+fn56ezZs/L19c3qcgAA+M/66PRHWV1CluiZs2dWlwDcM/7991/FxsaqcOHCcnd3z+pycJdERUWpTp06On36dJpH4LOLm+3D6c2WHOkGAAAAAMAihG4AAAAAACxC7+UAAAAAgExTu3btFLdDu59xpBsAAAAAAIsQugEAAAAAsAihGwAAAIClONUY2VVSUtIdz4NrugEAAABYwsXFRTabTSdOnFCePHlks9myuiQgXYwxSkhI0IkTJ+Tk5JTu+4unhtANAAAAwBLOzs4qWLCgDh06pH379mV1OUCGeXp66oEHHpCT0+2fJE7oBgAAAGAZb29vFStWTFeuXMnqUoAMcXZ2Vo4cOe74DA1CNwAAAABLOTs7y9nZOavLALIEHakBAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgkf9E6B43bpxCQkLk7u6uKlWqaO3atWm2nTJlimw2m8PD3d39LlYLAAAAAED6ZHnonjVrlnr37q0BAwZow4YNKlu2rBo2bKjjx4+nOY2vr6/i4uLsj/3799/FigEAAAAASJ8sD92jRo1S165d1alTJ5UqVUrjx4+Xp6enJk2alOY0NptNQUFB9kdgYOBdrBgAAAAAgPTJ0tCdkJCg9evXq379+vZhTk5Oql+/vtasWZPmdBcuXFChQoUUHBysJ598Utu3b78b5QIAAAAAkCFZGrpPnjypxMTEFEeqAwMDdfTo0VSnKV68uCZNmqQff/xRX3/9tZKSklS9enUdOnQo1fbx8fE6d+6cwwMAAAAAgLshy08vz6hq1aqpffv2KleunMLDwzV37lzlyZNHn3/+eartIyMj5efnZ38EBwff5YoBAAAAAPerLA3duXPnlrOzs44dO+Yw/NixYwoKCkrXPFxcXFS+fHnt3r071fH9+vXT2bNn7Y+DBw/ecd0AAAAAAKRHloZuV1dXhYWFaenSpfZhSUlJWrp0qapVq5aueSQmJmrr1q3Kly9fquPd3Nzk6+vr8AAAAAAA4G7IkdUF9O7dWx06dFDFihVVuXJljRkzRhcvXlSnTp0kSe3bt1eBAgUUGRkpSRo8eLCqVq2qokWL6syZM/rwww+1f/9+denSJStXAwAAAACAFLI8dLdq1UonTpxQ//79dfToUZUrV04LFy60d6524MABOTn974D86dOn1bVrVx09elQ5c+ZUWFiYVq9erVKlSmXVKgAAAAAAkCqbMcZkdRF307lz5+Tn56ezZ89yqjkAADfx0emPsrqELNEzZ8+sLgEAkA2kN1tmu97LAQAAAADILgjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEVyZHUBAAAAgBU+Ov1RVpeQZXrm7JnVJQD4fxzpBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAAAAALELoBgAAAADAIoRuAAAAAAAsQugGAAAAAMAihG4AAAAAACySI6sLAIB70UenP8rqErJEz5w9s7oEAACA/xSOdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgkf9E6B43bpxCQkLk7u6uKlWqaO3atemabubMmbLZbGrWrJm1BQIAAAAAcBtyZHUBs2bNUu/evTV+/HhVqVJFY8aMUcOGDRUTE6O8efOmOd2+ffvUp08f1axZ8y5Wi7vho9MfZXUJWaJnzp5ZXQIAAACATJblR7pHjRqlrl27qlOnTipVqpTGjx8vT09PTZo0Kc1pEhMT1a5dOw0aNEhFihS5i9UCAAAAAJB+WXqkOyEhQevXr1e/fv3sw5ycnFS/fn2tWbMmzekGDx6svHnz6vnnn9eKFSvuRqkAAAAAcNdxFmj2l6Wh++TJk0pMTFRgYKDD8MDAQO3YsSPVaVauXKmJEydq06ZN6VpGfHy84uPj7c/PnTt32/UCAAAAAJARWX56eUacP39ezz33nCZMmKDcuXOna5rIyEj5+fnZH8HBwRZXCQAAAADANVl6pDt37txydnbWsWPHHIYfO3ZMQUFBKdrv2bNH+/btU5MmTezDkpKSJEk5cuRQTEyMQkNDHabp16+fevfubX9+7tw5gjcAAAAA4K7I0tDt6uqqsLAwLV261H7br6SkJC1dulQREREp2pcoUUJbt251GPbOO+/o/Pnz+uijj1IN025ubnJzc7OkfgAAAAAAbibLbxnWu3dvdejQQRUrVlTlypU1ZswYXbx4UZ06dZIktW/fXgUKFFBkZKTc3d318MMPO0zv7+8vSSmGAwAAAACQ1bI8dLdq1UonTpxQ//79dfToUZUrV04LFy60d6524MABOTllq0vPAQAAAACQ9B8I3ZIUERGR6unkkhQVFXXTaadMmZL5BQEAAAAAkAk4hAwAAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYJEdWFwAAAABrzY2Jy+oSskberC4AADjSDQAAAACAZQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYJMOhe+rUqZo/f779+euvvy5/f39Vr15d+/fvz9TiAAAAAADIzjIcuocOHSoPDw9J0po1azRu3DgNHz5cuXPn1quvvprpBQIAAAAAkF3lyOgEBw8eVNGiRSVJP/zwg5o3b65u3bqpRo0aql27dmbXBwAAAABAtpXhI93e3t46deqUJGnRokVq0KCBJMnd3V2XL1/O3OoAAAAAAMjGMnyku0GDBurSpYvKly+vnTt36vHHH5ckbd++XSEhIZldHwAAAAAA2VaGj3SPGzdO1apV04kTJzRnzhzlypVLkrR+/Xq1adMm0wsEAAAAACC7yvCRbn9/f33yyScphg8aNChTCgIAAAAA4F5xW/fpXrFihZ599llVr15dhw8fliRNmzZNK1euzNTiAAAAAADIzjIcuufMmaOGDRvKw8NDGzZsUHx8vCTp7NmzGjp0aKYXCAAAAABAdpXh0P3+++9r/PjxmjBhglxcXOzDa9SooQ0bNmRqcQAAAAAAZGcZDt0xMTGqVatWiuF+fn46c+ZMZtQEAAAAAMA9IcOhOygoSLt3704xfOXKlSpSpEimFAUAAAAAwL0gw6G7a9eu6tmzp/7880/ZbDYdOXJE06dPV58+fdS9e3cragQAAAAAIFvK8C3D3nzzTSUlJalevXq6dOmSatWqJTc3N/Xp00evvPKKFTUCAAAAAJAtZSh0JyYmatWqVXr55ZfVt29f7d69WxcuXFCpUqXk7e1tVY0AAAAAAGRLGQrdzs7OevTRRxUdHS1/f3+VKlXKqroAAAAAAMj2MnxN98MPP6y9e/daUQsAAAAAAPeU27pPd58+fTRv3jzFxcXp3LlzDg8AAAAAAHBNhjtSe/zxxyVJTZs2lc1msw83xshmsykxMTHzqgMAAAAAIBvLcOhetmyZFXUAAAAAAHDPyXDoDg8Pt6IOAAAAAADuORkO3ZJ05swZTZw4UdHR0ZKkhx56SJ07d5afn1+mFgcAAAAAQHaW4Y7U/vrrL4WGhmr06NH6559/9M8//2jUqFEKDQ3Vhg0brKgRAAAAAIBsKcNHul999VU1bdpUEyZMUI4c1ya/evWqunTpol69emn58uWZXiQAAAAAANlRhkP3X3/95RC4JSlHjhx6/fXXVbFixUwtDgAAAACA7CzDp5f7+vrqwIEDKYYfPHhQPj4+mVIUAAAAAAD3ggyH7latWun555/XrFmzdPDgQR08eFAzZ85Uly5d1KZNGytqBAAAAAAgW8rw6eUjRoyQzWZT+/btdfXqVUmSi4uLunfvrg8++CDTCwQAAAAAILvKcOh2dXXVRx99pMjISO3Zs0eSFBoaKk9Pz0wvDgAAAACA7CzDofvs2bNKTExUQECASpcubR/+zz//KEeOHPL19c3UAgEAAAAAyK4yfE1369atNXPmzBTDZ8+erdatW2dKUQAAAAAA3AsyHLr//PNP1alTJ8Xw2rVr688//8yUogAAAAAAuBdkOHTHx8fbO1C73pUrV3T58uVMKQoAAAAAgHtBhkN35cqV9cUXX6QYPn78eIWFhWVKUQAAAAAA3Asy3JHa+++/r/r162vz5s2qV6+eJGnp0qVat26dFi1alOkFAgAAAACQXWX4SHeNGjW0Zs0aBQcHa/bs2fr5559VtGhRbdmyRTVr1rSiRgAAAAAAsqUMH+mWpHLlymn69OmZXQsAAAAAAPeUdIfuq1evKjExUW5ubvZhx44d0/jx43Xx4kU1bdpUjzzyiCVFAgAAAACQHaU7dHft2lWurq76/PPPJUnnz59XpUqV9O+//ypfvnwaPXq0fvzxRz3++OOWFQsAAAAAQHaS7mu6V61apebNm9uff/XVV0pMTNSuXbu0efNm9e7dWx9++KElRQIAAAAAkB2lO3QfPnxYxYoVsz9funSpmjdvLj8/P0lShw4dtH379syvEAAAAACAbCrdodvd3V2XL1+2P//jjz9UpUoVh/EXLlzI3OoAAAAAAMjG0h26y5Urp2nTpkmSVqxYoWPHjqlu3br28Xv27FH+/Pkzv0IAAAAAALKpdHek1r9/fz322GOaPXu24uLi1LFjR+XLl88+/vvvv1eNGjUsKRIAAAAAgOwo3Ue6w8PDtX79evXo0UOTJ0/WhAkTHMaXK1dOr7766m0VMW7cOIWEhMjd3V1VqlTR2rVr02w7d+5cVaxYUf7+/vLy8nI4Ag8AAAAAwH9Juo90S1LJkiVVsmTJVMd169bttgqYNWuWevfurfHjx6tKlSoaM2aMGjZsqJiYGOXNmzdF+4CAAL399tsqUaKEXF1dNW/ePHXq1El58+ZVw4YNb6sGAAAAAACskO4j3VYZNWqUunbtqk6dOqlUqVIaP368PD09NWnSpFTb165dW0899ZRKliyp0NBQ9ezZU2XKlNHKlSvvcuUAAAAAANxclobuhIQErV+/XvXr17cPc3JyUv369bVmzZpbTm+M0dKlSxUTE6NatWql2iY+Pl7nzp1zeAAAAAAAcDdkaeg+efKkEhMTFRgY6DA8MDBQR48eTXO6s2fPytvbW66urmrcuLHGjh2rBg0apNo2MjJSfn5+9kdwcHCmrgMAAAAAAGnJ8tPLb4ePj482bdqkdevWaciQIerdu7eioqJSbduvXz+dPXvW/jh48ODdLRYAAAAAcN/KUEdqyc6cOaPvvvtOe/bsUd++fRUQEKANGzYoMDBQBQoUSPd8cufOLWdnZx07dsxh+LFjxxQUFJTmdE5OTipatKika72mR0dHKzIyUrVr107R1s3NTW5ubumuCQAAAACAzJLhI91btmzRgw8+qGHDhmnEiBE6c+aMpGu38urXr1+G5uXq6qqwsDAtXbrUPiwpKUlLly5VtWrV0j2fpKQkxcfHZ2jZAAAAAABYLcOhu3fv3urYsaN27dold3d3+/DHH39cy5cvz3ABvXv31oQJEzR16lRFR0ere/fuunjxojp16iRJat++vUOYj4yM1OLFi7V3715FR0dr5MiRmjZtmp599tkMLxsAAAAAACtl+PTydevW6fPPP08xvECBAjft/CwtrVq10okTJ9S/f38dPXpU5cqV08KFC+2dqx04cEBOTv/738DFixf10ksv6dChQ/Lw8FCJEiX09ddfq1WrVhleNgAAAAAAVspw6HZzc0v1tls7d+5Unjx5bquIiIgIRUREpDruxg7S3n//fb3//vu3tRwAAAAA2dPcmLisLiFr5M3qAnCnMnx6edOmTTV48GBduXJFkmSz2XTgwAG98cYbat68eaYXCAAAAABAdpXhI90jR47UM888o7x58+ry5csKDw/X0aNHVa1aNQ0ZMsSKGgEAyDL37ZEViaMrAABkggyHbj8/Py1evFgrV67Uli1bdOHCBVWoUEH169e3oj4AAAAAALKt27pPtyQ98sgjeuSRRzKzFgAAAAAA7ikZDt0ff/xxqsNtNpvc3d1VtGhR1apVS87OzndcHAAAAAAA2VmGQ/fo0aN14sQJXbp0STlz5pQknT59Wp6envL29tbx48dVpEgRLVu2TMHBwZleMAAAAAAA2UWGey8fOnSoKlWqpF27dunUqVM6deqUdu7cqSpVquijjz7SgQMHFBQUpFdffdWKegEAAAAAyDYyfKT7nXfe0Zw5cxQaGmofVrRoUY0YMULNmzfX3r17NXz4cG4fBgAAAAC472X4SHdcXJyuXr2aYvjVq1d19OhRSVL+/Pl1/vz5O68OAAAAAIBsLMOhu06dOnrhhRe0ceNG+7CNGzeqe/fuqlu3riRp69atKly4cOZVCQAAAABANpTh0D1x4kQFBAQoLCxMbm5ucnNzU8WKFRUQEKCJEydKkry9vTVy5MhMLxYAAAAAgOwkw9d0BwUFafHixdqxY4d27twpSSpevLiKFy9ub1OnTp3MqxAAAAAAgGwqw6E7WYkSJVSiRInMrAUAAAAAgHvKbYXuQ4cO6aefftKBAweUkJDgMG7UqFGZUhgAAAAAANldhkP30qVL1bRpUxUpUkQ7duzQww8/rH379skYowoVKlhRIwAAAAAA2VKGO1Lr16+f+vTpo61bt8rd3V1z5szRwYMHFR4erhYtWlhRIwAAAAAA2VKGQ3d0dLTat28vScqRI4cuX74sb29vDR48WMOGDcv0AgEAAAAAyK4yHLq9vLzs13Hny5dPe/bssY87efJk5lUGAAAAAEA2l+FruqtWraqVK1eqZMmSevzxx/Xaa69p69atmjt3rqpWrWpFjQAAAAAAZEsZDt2jRo3ShQsXJEmDBg3ShQsXNGvWLBUrVoyeywEAAAAAuE6GQndiYqIOHTqkMmXKSLp2qvn48eMtKQwAAAAAgOwuQ9d0Ozs769FHH9Xp06etqgcAAAAAgHtGhjtSe/jhh7V3714ragEAAAAA4J6S4dD9/vvvq0+fPpo3b57i4uJ07tw5hwcAAAAAALgmwx2pPf7445Kkpk2bymaz2YcbY2Sz2ZSYmJh51QEAAAAAkI1lOHQvW7bMijoAAAAAALjnZDh0h4eHW1EHAAAAAAD3nAxf0y1JK1as0LPPPqvq1avr8OHDkqRp06Zp5cqVmVocAAAAAADZWYZD95w5c9SwYUN5eHhow4YNio+PlySdPXtWQ4cOzfQCAQAAAADIrm6r9/Lx48drwoQJcnFxsQ+vUaOGNmzYkKnFAQAAAACQnWU4dMfExKhWrVophvv5+enMmTOZURMAAAAAAPeEDIfuoKAg7d69O8XwlStXqkiRIplSFAAAAAAA94IMh+6uXbuqZ8+e+vPPP2Wz2XTkyBFNnz5dffr0Uffu3a2oEQAAAACAbCnDtwx78803lZSUpHr16unSpUuqVauW3Nzc1KdPH73yyitW1AgAAAAAQLaU4dBts9n09ttvq2/fvtq9e7cuXLigUqVKydvb24r6AAAAAADItjJ8evnXX3+tS5cuydXVVaVKlVLlypUJ3AAAAAAApCLDofvVV19V3rx51bZtW/3yyy9KTEy0oi4AAAAAALK9DIfuuLg4zZw5UzabTS1btlS+fPn08ssva/Xq1VbUBwAAAABAtpXh0J0jRw498cQTmj59uo4fP67Ro0dr3759qlOnjkJDQ62oEQAAAACAbCnDHaldz9PTUw0bNtTp06e1f/9+RUdHZ1ZdAAAAAABkexk+0i1Jly5d0vTp0/X444+rQIECGjNmjJ566ilt3749s+sDAAAAACDbyvCR7tatW2vevHny9PRUy5Yt9e6776patWpW1AYAAAAAQLaW4dDt7Oys2bNnq2HDhnJ2dnYYt23bNj388MOZVhwAAAAAANlZhkP39OnTHZ6fP39e33zzjb788kutX7+eW4gBAAAAAPD/buuabklavny5OnTooHz58mnEiBGqW7eu/vjjj8ysDQAAAACAbC1DR7qPHj2qKVOmaOLEiTp37pxatmyp+Ph4/fDDDypVqpRVNQIAAAAAkC2l+0h3kyZNVLx4cW3ZskVjxozRkSNHNHbsWCtrAwAAAAAgW0v3ke4FCxaoR48e6t69u4oVK2ZlTQAAAAAA3BPSfaR75cqVOn/+vMLCwlSlShV98sknOnnypJW1AQAAAACQraU7dFetWlUTJkxQXFycXnjhBc2cOVP58+dXUlKSFi9erPPnz1tZJwAAAAAA2U6Gey/38vJS586dtXLlSm3dulWvvfaaPvjgA+XNm1dNmza1okYAAAAAALKl275lmCQVL15cw4cP16FDh/TNN99kVk0AAAAAANwT7ih0J3N2dlazZs30008/ZcbsAAAAAAC4J2RK6AYAAAAAACkRugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwSI6sLgDAvW1uTFxWl5A18mZ1AQAAAPgv4Eg3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYBFCNwAAAAAAFiF0AwAAAABgEUI3AAAAAAAWIXQDAAAAAGARQjcAAAAAABYhdAMAAAAAYJH/ROgeN26cQkJC5O7uripVqmjt2rVptp0wYYJq1qypnDlzKmfOnKpfv/5N2wMAAAAAkFWyPHTPmjVLvXv31oABA7RhwwaVLVtWDRs21PHjx1NtHxUVpTZt2mjZsmVas2aNgoOD9eijj+rw4cN3uXIAAAAAAG4uy0P3qFGj1LVrV3Xq1EmlSpXS+PHj5enpqUmTJqXafvr06XrppZdUrlw5lShRQl9++aWSkpK0dOnSu1w5AAAAAAA3l6WhOyEhQevXr1f9+vXtw5ycnFS/fn2tWbMmXfO4dOmSrly5ooCAAKvKBAAAAADgtuTIyoWfPHlSiYmJCgwMdBgeGBioHTt2pGseb7zxhvLnz+8Q3K8XHx+v+Ph4+/Nz587dfsEAAAAAAGRAlp9efic++OADzZw5U99//73c3d1TbRMZGSk/Pz/7Izg4+C5XCQAAAAC4X2Vp6M6dO7ecnZ117Ngxh+HHjh1TUFDQTacdMWKEPvjgAy1atEhlypRJs12/fv109uxZ++PgwYOZUjsAAAAAALeSpaHb1dVVYWFhDp2gJXeKVq1atTSnGz58uN577z0tXLhQFStWvOky3Nzc5Ovr6/AAAAAAAOBuyNJruiWpd+/e6tChgypWrKjKlStrzJgxunjxojp16iRJat++vQoUKKDIyEhJ0rBhw9S/f3/NmDFDISEhOnr0qCTJ29tb3t7eWbYeVpgbE5fVJWSNvFldAAAAAABkjiwP3a1atdKJEyfUv39/HT16VOXKldPChQvtnasdOHBATk7/OyD/2WefKSEhQc8884zDfAYMGKCBAwfezdIBAAAAALipLA/dkhQREaGIiIhUx0VFRTk837dvn/UFAQAAAACQCbJ17+UAAAAAAPyXEboBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAIsQugEAAAAAsAihGwAAAAAAixC6AQAAAACwCKEbAAAAAACLELoBAAAAALAIoRsAAAAAAItkeegeN26cQkJC5O7uripVqmjt2rVptt2+fbuaN2+ukJAQ2Ww2jRkz5u4VCgAAAABABmVp6J41a5Z69+6tAQMGaMOGDSpbtqwaNmyo48ePp9r+0qVLKlKkiD744AMFBQXd5WoBAAAAAMiYLA3do0aNUteuXdWpUyeVKlVK48ePl6enpyZNmpRq+0qVKunDDz9U69at5ebmdperBQAAAAAgY7IsdCckJGj9+vWqX7/+/4pxclL9+vW1Zs2aTFtOfHy8zp075/AAAAAAAOBuyLLQffLkSSUmJiowMNBheGBgoI4ePZppy4mMjJSfn5/9ERwcnGnzBgAAAADgZrK8IzWr9evXT2fPnrU/Dh48mNUlAQAAAADuEzmyasG5c+eWs7Ozjh075jD82LFjmdpJmpubG9d/AwAAAACyRJYd6XZ1dVVYWJiWLl1qH5aUlKSlS5eqWrVqWVUWAAAAAACZJsuOdEtS79691aFDB1WsWFGVK1fWmDFjdPHiRXXq1EmS1L59exUoUECRkZGSrnW+9vfff9v/Pnz4sDZt2iRvb28VLVo0y9YDAAAAAIDUZGnobtWqlU6cOKH+/fvr6NGjKleunBYuXGjvXO3AgQNycvrfwfgjR46ofPny9ucjRozQiBEjFB4erqioqLtdPgAAAAAAN5WloVuSIiIiFBERkeq4G4N0SEiIjDF3oSoAAAAAAO7cPd97OQAAAAAAWYXQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARQjdAAAAAABYhNANAAAAAIBFCN0AAAAAAFiE0A0AAAAAgEUI3QAAAAAAWITQDQAAAACARf4ToXvcuHEKCQmRu7u7qlSporVr1960/bfffqsSJUrI3d1dpUuX1i+//HKXKgUAAAAAIP2yPHTPmjVLvXv31oABA7RhwwaVLVtWDRs21PHjx1Ntv3r1arVp00bPP/+8Nm7cqGbNmqlZs2batm3bXa4cAAAAAICby/LQPWrUKHXt2lWdOnVSqVKlNH78eHl6emrSpEmptv/oo4/UqFEj9e3bVyVLltR7772nChUq6JNPPrnLlQMAAAAAcHNZGroTEhK0fv161a9f3z7MyclJ9evX15o1a1KdZs2aNQ7tJalhw4ZptgcAAAAAIKvkyMqFnzx5UomJiQoMDHQYHhgYqB07dqQ6zdGjR1Ntf/To0VTbx8fHKz4+3v787NmzkqRz587dSel3xaUL57O6hCzxr/u/WV1Cljjn/N/fJ28H+/H95V7cj+/XfVhiP76X3K/78f26D0vsx/eS+3U/zg77cHKmNMbctF2Whu67ITIyUoMGDUoxPDg4OAuqAdL2pt7M6hKAO8Z+jHsB+zHuBezHyO6y0z58/vx5+fn5pTk+S0N37ty55ezsrGPHjjkMP3bsmIKCglKdJigoKEPt+/Xrp969e9ufJyUl6Z9//lGuXLlks9nucA2Q2c6dO6fg4GAdPHhQvr6+WV0OcFvYj3EvYD9Gdsc+jHsB+/F/mzFG58+fV/78+W/aLktDt6urq8LCwrR06VI1a9ZM0rVQvHTpUkVERKQ6TbVq1bR06VL16tXLPmzx4sWqVq1aqu3d3Nzk5ubmMMzf3z8zyoeFfH19+WBBtsd+jHsB+zGyO/Zh3AvYj/+7bnaEO1mWn17eu3dvdejQQRUrVlTlypU1ZswYXbx4UZ06dZIktW/fXgUKFFBkZKQkqWfPngoPD9fIkSPVuHFjzZw5U3/99Ze++OKLrFwNAAAAAABSyPLQ3apVK504cUL9+/fX0aNHVa5cOS1cuNDeWdqBAwfk5PS/TtarV6+uGTNm6J133tFbb72lYsWK6YcfftDDDz+cVasAAAAAAECqsjx0S1JERESap5NHRUWlGNaiRQu1aNHC4qqQFdzc3DRgwIAUlwQA2Qn7Me4F7MfI7tiHcS9gP7432Myt+jcHAAAAAAC3xenWTQAAAAAAwO0gdAMAAAAAYBFC933KGKNu3bopICBANptNmzZtumn7ffv2patd7dq1HW7nlpqjR4+qQYMG8vLySvft26KiomSz2XTmzJl0tQeyg44dO9pvlwjg9tz4/cT3Bf4LQkJCNGbMmExvCyB7InTfpxYuXKgpU6Zo3rx5iouLu2Xv78HBwQ7t7uRHzejRoxUXF6dNmzZp586dt1M+AOD/WfWDnSCA28U+Ka1bt07dunXL9LZAVpgyZUq6D5Qhdf+J3stx9+3Zs0f58uVT9erV09Xe2dlZQUFBmbbssLAwFStWLFPmB1gpMTFRNpvN4daFt5KQkCBXV1cLqwL++27nvQNktcz6/M6TJ48lbZE9ZMXnX1r77pUrV+Ti4nLX6kDq+Ca8D3Xs2FGvvPKKDhw4IJvNppCQEC1cuFCPPPKI/P39lStXLj3xxBPas2ePfZrrT9/bt2+f6tSpI0nKmTOnbDabOnbsaG+blJSk119/XQEBAQoKCtLAgQPt40JCQjRnzhx99dVX9ulSO3X9zJkzstlsqd4yTvrff9x+/fVXlSxZUt7e3mrUqJHi4uIc2n355ZcqWbKk3N3dVaJECX366af2cQkJCYqIiFC+fPnk7u6uQoUKKTIyUtK10+8HDhyoBx54QG5ubsqfP7969Ohxm1scN9u/kl//uXPnqk6dOvL09FTZsmW1Zs0a+/T79+9XkyZNlDNnTnl5eemhhx7SL7/8IkmqWLGiRowYYW/brFkzubi46MKFC5KkQ4cOyWazaffu3ZKk+Ph49enTRwUKFJCXl5eqVKnisJ8l71s//fSTSpUqJTc3Nx04cOCm65d8mviQIUOUP39+FS9eXJJ08OBBtWzZUv7+/goICNCTTz6pffv2pTmfpKQkRUZGqnDhwvLw8FDZsmX13Xff2ccVLFhQn332mcM0GzdulJOTk/bv3y9JGjVqlEqXLi0vLy8FBwfrpZdesm+L69fvVu+dSZMm6aGHHpKbm5vy5ctnv61j586d9cQTTzi0vXLlivLmzauJEyfedDvdr5KSkjR8+HAVLVpUbm5ueuCBBzRkyBBJ0tatW1W3bl15eHgoV65c6tatm8PrlbxvjRgxQvny5VOuXLn08ssv68qVK5KuXdKzf/9+vfrqq7LZbLLZbPZpV65cqZo1a8rDw0PBwcHq0aOHLl68KEn66quv5O3trV27dtnbv/TSSypRooQuXbp00/mmJa33zq3ec5K0atUq1a5dW56ensqZM6caNmyo06dPS7r55wduz/22T/7www8qVqyY3N3d1bBhQx08eNDeZuDAgSpXrpy+/PJLFS5cWO7u7pKu/Q7p0qWL8uTJI19fX9WtW1ebN292mP/PP/+sSpUqyd3dXblz59ZTTz1lH3f9Uflb/aa48Qj+gQMH9OSTT8rb21u+vr5q2bKljh07lqLmadOmKSQkRH5+fmrdurXOnz9/y22Sndzrvx2ktL9rpfTvBzfuuzabTZ999pmaNm0qLy8v+3v7xx9/VIUKFeTu7q4iRYpo0KBBunr1qn1+Z86c0QsvvKDAwEC5u7vr4Ycf1rx58xQVFaVOnTrp7Nmz9vde8m/7kJAQDR06VJ07d5aPj48eeOABffHFFw7reKvfQlFRUapcubL9stMaNWrYf9Ns3rxZderUkY+Pj3x9fRUWFqa//vrrltv1P8ngvnPmzBkzePBgU7BgQRMXF2eOHz9uvvvuOzNnzhyza9cus3HjRtOkSRNTunRpk5iYaIwxJjY21kgyGzduNFevXjVz5swxkkxMTIyJi4szZ86cMcYYEx4ebnx9fc3AgQPNzp07zdSpU43NZjOLFi0yxhhz/Phx06hRI9OyZUv7dNfPO9np06eNJLNs2TJjjDHLli0zkszp06eNMcZMnjzZuLi4mPr165t169aZ9evXm5IlS5q2bdva5/H111+bfPnymTlz5pi9e/eaOXPmmICAADNlyhRjjDEffvihCQ4ONsuXLzf79u0zK1asMDNmzDDGGPPtt98aX19f88svv5j9+/ebP//803zxxRdWviz3tJvtX8mvf4kSJcy8efNMTEyMeeaZZ0yhQoXMlStXjDHGNG7c2DRo0MBs2bLF7Nmzx/z888/m999/N8YY07t3b9O4cWNjjDFJSUkmICDA5M6d2yxYsMAYc20/KFCggL2WLl26mOrVq5vly5eb3bt3mw8//NC4ubmZnTt3GmP+t29Vr17drFq1yuzYscNcvHjxpuvXoUMH4+3tbZ577jmzbds2s23bNpOQkGBKlixpOnfubLZs2WL+/vtv07ZtW1O8eHETHx9vn+7JJ5+0z+f99983JUqUMAsXLjR79uwxkydPNm5ubiYqKsoYY0yfPn3MI4884rDs1157zWHY6NGjzW+//WZiY2PN0qVLTfHixU337t3t49Pz3vn000+Nu7u7GTNmjImJiTFr1641o0ePNsYYs2rVKuPs7GyOHDlibz937lzj5eVlzp8/f9PtdL96/fXXTc6cOc2UKVPM7t27zYoVK8yECRPMhQsXTL58+czTTz9ttm7dapYuXWoKFy5sOnToYJ+2Q4cOxtfX17z44osmOjra/Pzzz8bT09P+eXTq1ClTsGBBM3jwYBMXF2fi4uKMMcbs3r3beHl5mdGjR5udO3eaVatWmfLly5uOHTva592iRQtTqVIlc+XKFTNv3jzj4uJi/vrrr5vO92bSeu/c6j23ceNG4+bmZrp37242bdpktm3bZsaOHWtOnDhhjLn554cxJsV3yI3fF0jpftsnK1asaFavXm3++usvU7lyZVO9enV7mwEDBhgvLy/TqFEjs2HDBrN582ZjjDH169c3TZo0MevWrTM7d+40r732msmVK5c5deqUMcaYefPmGWdnZ9O/f3/z999/m02bNpmhQ4fa51uoUCH75+atflNc3zYxMdGUK1fOPPLII+avv/4yf/zxhwkLCzPh4eEONXt7e9tfp+XLl5ugoCDz1ltv3XKbZCf3+m+Hm33Xpnc/SG3flWTy5s1rJk2aZPbs2WP2799vli9fbnx9fc2UKVPMnj17zKJFi0xISIgZOHCgfXlVq1Y1Dz30kFm0aJF9e/3yyy8mPj7ejBkzxvj6+trfe8nf94UKFTIBAQFm3LhxZteuXSYyMtI4OTmZHTt2GGPMLX8LXblyxfj5+Zk+ffqY3bt3m7///ttMmTLF7N+/3xhjzEMPPWSeffZZEx0dbXbu3Glmz55tNm3adHs7VBYjdN+nRo8ebQoVKpTm+BMnThhJZuvWrcaY9P+oCQ8PTxEKKlWqZN544w378yeffNLhC/x2Q7cks3v3bvs048aNM4GBgfbnoaGh9hCd7L333jPVqlUzxhjzyiuvmLp165qkpKQU6z9y5Ejz4IMPmoSEhDS3EW7f9ftX8uv/5Zdf2sdv377dSDLR0dHGGGNKly5t/2K40U8//WT8/PzM1atXzaZNm0xQUJDp2bOnfZ/r0qWLPVDu37/fODs7m8OHDzvMo169eqZfv37GmP/tWxn5UO/QoYMJDAy0h2ljjJk2bZopXry4w/4VHx9vPDw8zK+//mqfLjl0//vvv8bT09OsXr3aYd7PP/+8adOmjTHmWjix2Wz2L6PExERToEAB89lnn6VZ27fffmty5cplf56e907+/PnN22+/neY8S5UqZYYNG2Z/3qRJE4cfzvifc+fOGTc3NzNhwoQU47744guTM2dOc+HCBfuw+fPnGycnJ3P06FFjzLV9pFChQubq1av2Ni1atDCtWrWyP7/+B3uy559/3nTr1s1h2IoVK4yTk5O5fPmyMcaYf/75xxQsWNB0797dBAYGmiFDhji0T22+N5Paeyc977k2bdqYGjVqpHs5t/v9hGvux33yjz/+sA+Ljo42ksyff/5pjLkWXFxcXMzx48cd6vL19TX//vuvw/xCQ0PN559/bowxplq1aqZdu3ZpLvv6Wm/1m+L6tosWLTLOzs7mwIED9vHJ34lr16611+zp6WnOnTtnb9O3b19TpUqVW22SbO1e++1ws+/a9O4HN+67xlwL3b169UpR6/X/FDLm2u+UfPnyGWOM+fXXX42Tk5OJiYlJtZ7JkycbPz+/FMMLFSpknn32WfvzpKQkkzdvXvvvklv9Fjp16pSRZD+4cCMfHx/7wbLsjtPLIUnatWuX2rRpoyJFisjX11chISGSlK5TY25UpkwZh+f58uXT8ePHM6NMB56engoNDU11ORcvXtSePXv0/PPPy9vb2/54//337acmdezYUZs2bVLx4sXVo0cPLVq0yD6vFi1a6PLlyypSpIi6du2q77//3uEUHGRMevav6/ebfPnySZL99ezRo4fef/991ahRQwMGDNCWLVvsbWvWrKnz589r48aN+v333xUeHq7atWvbT/v6/fffVbt2bUnXTptMTEzUgw8+6LBf/P777w6nq7q6uqbYj2+ldOnSDtdSbd68Wbt375aPj499OQEBAfr3339TPTV29+7dunTpkho0aOBQ21dffWVvX65cOZUsWVIzZsywr9vx48fVokUL+3yWLFmievXqqUCBAvLx8dFzzz2nU6dO6dKlS/Y2N3vvHD9+XEeOHFG9evXSXNcuXbpo8uTJkqRjx45pwYIF6ty5c4a21/0iOjpa8fHxqW7P6OholS1bVl5eXvZhNWrUUFJSkmJiYuzDHnroITk7O9ufp+czdfPmzZoyZYrDvtSwYUMlJSUpNjZW0rXLgyZOnKjPPvtMoaGhevPNN+90dVO8d9Lzntu0adNN97fM/H7C/bdP5siRQ5UqVbI/L1GihPz9/RUdHW0fVqhQIYfrqjdv3qwLFy4oV65cDvXGxsame7+9XkZ+U0RHRys4OFjBwcH2YaVKlUpRc0hIiHx8fOzPrfqtlZXu5d8Ot/quTe9+cOO+m6xixYoOzzdv3qzBgwc71N+1a1fFxcXp0qVL2rRpkwoWLKgHH3wwXfVf7/p1ttlsCgoKsr8Gt/otFBAQoI4dO6phw4Zq0qSJPvroI4fL3Xr37q0uXbqofv36+uCDD7L1pUV0pAZJUpMmTVSoUCFNmDBB+fPnV1JSkh5++GElJCRkeF43dtZgs9mUlJSUZvvkTiaMMfZhydeGZXQ5yfNIviZnwoQJqlKlikO75B8KFSpUUGxsrBYsWKAlS5aoZcuWql+/vr777jsFBwcrJiZGS5Ys0eLFi/XSSy/pww8/1O+//05nFLchPfvX9ds1+Vq95P2mS5cuatiwoebPn69FixYpMjJSI0eO1CuvvCJ/f3+VLVtWUVFRWrNmjRo0aKBatWqpVatW2rlzp3bt2qXw8HBJ1/YLZ2dnrV+/3uEHoyR5e3vb//bw8EjX9YLXu/5HavKywsLCNH369BRtU/uCTN5n58+frwIFCjiMc3Nzs//drl07zZgxQ2+++aZmzJihRo0aKVeuXJKuXeP2xBNPqHv37hoyZIgCAgK0cuVKPf/880pISJCnp6ekm793PDw8brmu7du315tvvqk1a9Zo9erVKly4sGrWrHnL6e5H6dmet5LRz1Tp2v70wgsvpNoXxQMPPGD/e/ny5XJ2dlZcXJwuXrzo8CP+dtz43knPe+5W2ygzv59w/+2T6ZHa53e+fPlS7VcmuQfnjGxHK35T3M5rkN3cy78dMuN9KKXcd9MafuHCBQ0aNEhPP/10irbu7u53VM/N9sX0/BaaPHmyevTooYULF2rWrFl65513tHjxYlWtWlUDBw5U27ZtNX/+fC1YsEADBgzQzJkzHfpPyC440g2dOnVKMTExeuedd1SvXj2VLFnS3oFNWpKP6CUmJt7x8pPfdNf/Z+tW9wO/lcDAQOXPn1979+5V0aJFHR6FCxe2t/P19VWrVq00YcIEzZo1S3PmzNE///wj6doHYpMmTfTxxx/bP5S3bt16R3Xdj25n/0pNcHCwXnzxRc2dO1evvfaaJkyYYB8XHh6uZcuWafny5apdu7YCAgJUsmRJDRkyRPny5bP/57Z8+fJKTEzU8ePHU+wXmdU7f7IKFSpo165dyps3b4pl+fn5pWh/fccrN7a//j/dbdu21bZt27R+/Xp99913ateunX3c+vXrlZSUpJEjR6pq1ap68MEHdeTIkQzV7ePjo5CQEC1dujTNNrly5VKzZs00efJkTZkyRZ06dcrQMu4nxYoVk4eHR6rbs2TJktq8ebO9IynpWodiTk5O9s740sPV1TXFZ3GFChX0999/p9iXihYtav/8Xr16tYYNG6aff/5Z3t7eDh34pDXfjErPe65MmTJp7m+Z9fmB/7nf9smrV686dLwUExOjM2fOqGTJkmlOU6FCBR09elQ5cuRIUWvu3Lkl3Xy/TU16f1OULFlSBw8edOjs7e+//9aZM2dUqlSpdC8vu7vXfzvc6rs2s/eDChUqKCYmJtX3n5OTk8qUKaNDhw6leSvf2/0+SO9vofLly6tfv35avXq1Hn74YfsZfZL04IMP6tVXX9WiRYv09NNP28+0y24I3VDOnDmVK1cuffHFF9q9e7d+++039e7d+6bTFCpUSDabTfPmzdOJEyccejbNKA8PD1WtWlUffPCBoqOj9fvvv+udd9657fklGzRokCIjI/Xxxx9r586d2rp1qyZPnqxRo0ZJutbL8zfffKMdO3Zo586d+vbbbxUUFCR/f39NmTJFEydO1LZt27R37159/fXX8vDwUKFChe64rvvN7exfN+rVq5d+/fVXxcbGasOGDVq2bJnDD6batWvr119/VY4cOVSiRAn7sOnTp9v/Uy1d++Bu166d2rdvr7lz5yo2NlZr165VZGSk5s+fnzkr/P/atWun3Llz68knn9SKFSsUGxurqKgo9ejRQ4cOHUrR3sfHR3369NGrr76qqVOnas+ePdqwYYPGjh2rqVOn2tuFhISoevXqev7555WYmKimTZvaxxUtWlRXrlzR2LFjtXfvXk2bNk3jx4/PcO0DBw7UyJEj9fHHH2vXrl32Oq7XpUsXTZ06VdHR0erQoUOGl3G/cHd31xtvvKHXX3/dfqnAH3/8oYkTJ6pdu3Zyd3dXhw4dtG3bNi1btkyvvPKKnnvuOQUGBqZ7GSEhIVq+fLkOHz6skydPSpLeeOMNrV69WhEREdq0aZN27dqlH3/80R5izp8/r+eee049evTQY489punTp2vWrFn23vLTmm9Gpec9169fP61bt04vvfSStmzZoh07duizzz7TyZMnM+XzA47ut33SxcVFr7zyiv7880+tX79eHTt2VNWqVVW5cuU0p6lfv76qVaumZs2aadGiRdq3b59Wr16tt99+2x7gBwwYoG+++UYDBgxQdHS0tm7dqmHDhqU6v4z8pqhfv75Kly6tdu3aacOGDVq7dq3at2+v8PDwFKcM38vuh98ON/uuzez9oH///vrqq680aNAgbd++XdHR0Zo5c6b993Z4eLhq1aql5s2ba/HixfYzQRcuXCjp2nvvwoULWrp0qU6ePOlwydrN3Oq3UGxsrPr166c1a9Zo//79WrRokXbt2qWSJUvq8uXLioiIUFRUlPbv369Vq1Zp3bp1N/2H2X9aFl9TjixyY0dqixcvNiVLljRubm6mTJkyJioqykgy33//vTEm9c7OBg8ebIKCgozNZrN3jBYeHm569uzpsKwbO0678bkxxvz999+mWrVqxsPDw5QrV84sWrTolh2p3dihw/fff29u3KWnT59uypUrZ1xdXU3OnDlNrVq1zNy5c40x1zqMKVeunPHy8jK+vr6mXr16ZsOGDfZ5ValSxfj6+hovLy9TtWpVs2TJknRtW6R0s/0rPR3pRUREmNDQUOPm5mby5MljnnvuOXPy5El7+1OnThmbzebQkU/y/jB+/HiHWhISEkz//v1NSEiIcXFxMfny5TNPPfWU2bJlizEm7c5CbubGXsiTxcXFmfbt25vcuXMbNzc3U6RIEdO1a1dz9uzZVKdLSkoyY8aMMcWLFzcuLi4mT548pmHDhvbeVpN9+umnRpJp3759imWOGjXK5MuXz3h4eJiGDRuar7766rbeO+PHj7fXkS9fPvPKK684jE9KSjKFChUyjz/+eDq30v0rMTHRvP/++6ZQoULGxcXFPPDAA/YObbZs2WLq1Klj3N3dTUBAgOnatatDL/Cp7Vs9e/Z06MF2zZo1pkyZMsbNzc3hdVy7dq1p0KCB8fb2Nl5eXqZMmTL2jqk6depkSpcu7dBR1MiRI01AQIA5dOjQTeeblrTeO7d6zxljTFRUlKlevbpxc3Mz/v7+pmHDhvZ9NqPfT3Skdmv32z45Z84cU6RIEePm5mbq169v74zSmGudUZUtWzbFtOfOnTOvvPKKyZ8/v3FxcTHBwcGmXbt2Dh1bzZkzx/4bI3fu3Obpp5+2j7u+c7Rb/aa4sYO4/fv3m6ZNmxovLy/j4+NjWrRoYe/ILq2ab9VBbnZ0r/92MObm37W3sx8YYxw+H6+3cOFCU716dePh4WF8fX1N5cqVHXrRP3XqlOnUqZPJlSuXcXd3Nw8//LCZN2+effyLL75ocuXKZSSZAQMGGGNS79ywbNmy9vHG3Py30NGjR02zZs1Mvnz5jKurqylUqJDp37+/SUxMNPHx8aZ169YmODjYuLq6mvz585uIiAh7x4vZjc2Y6y6kBQAgHS5cuKACBQpo8uTJqV4jBgBZbcqUKerVq5fOnDmT1aUAuM/RkRoAIN2SkpJ08uRJjRw5Uv7+/g6ntwMAACAlrukGgFu4/hYbNz5WrFiR1eXdVQcOHFBgYKBmzJihSZMmKUcO/nd7v3jsscfSfB8MHTo0q8vDfYh9Ev9l/HbA9Ti9HABuYffu3WmOK1CgQKbd+gP4Lzt8+LAuX76c6riAgAAFBATc5Ypwv2OfxH8Zvx1wPUI3AAAAAAAW4fRyAAAAAAAsQugGAAAAAMAihG4AAAAAACxC6AYAAAAAwCKEbgAAkGE2m00//PBDVpcBAMB/HqEbAIBsqmPHjrLZbHrxxRdTjHv55Zdls9nUsWPHdM0rKipKNptNZ86cSVf7uLg4PfbYYxmoFgCA+xOhGwCAbCw4OFgzZ850uF/xv//+qxkzZuiBBx7I9OUlJCRIkoKCguTm5pbp8wcA4F5D6AYAIBurUKGCgoODNXfuXPuwuXPn6oEHHlD58uXtw5KSkhQZGanChQvLw8NDZcuW1XfffSdJ2rdvn+rUqSNJypkzp8MR8tq1aysiIkK9evVS7ty51bBhQ0kpTy8/dOiQ2rRpo4CAAHl5ealixYr6888/JUmbN29WnTp15OPjI19fX4WFhemvv/6ycrMAAPCfkSOrCwAAAHemc+fOmjx5stq1aydJmjRpkjp16qSoqCh7m8jISH399dcaP368ihUrpuXLl+vZZ59Vnjx59Mgjj2jOnDlq3ry5YmJi5OvrKw8PD/u0U6dOVffu3bVq1apUl3/hwgWFh4erQIEC+umnnxQUFKQNGzYoKSlJktSuXTuVL19en332mZydnbVp0ya5uLhYt0EAAPgPIXQDAJDNPfvss+rXr5/2798vSVq1apVmzpxpD93x8fEaOnSolixZomrVqkmSihQpopUrV+rzzz9XeHi4AgICJEl58+aVv7+/w/yLFSum4cOHp7n8GTNm6MSJE1q3bp19PkWLFrWPP3DggPr27asSJUrY5wcAwP2C0A0AQDaXJ08eNW7cWFOmTJExRo0bN1bu3Lnt43fv3q1Lly6pQYMGDtMlJCQ4nIKelrCwsJuO37Rpk8qXL28P3Dfq3bu3unTpomnTpql+/fpq0aKFQkND07FmAABkf4RuAADuAZ07d1ZERIQkady4cQ7jLly4IEmaP3++ChQo4DAuPZ2heXl53XT89aeip2bgwIFq27at5s+frwULFmjAgAGaOXOmnnrqqVsuGwCA7I6O1AAAuAc0atRICQkJunLlir2zs2SlSpWSm5ubDhw4oKJFizo8goODJUmurq6SpMTExAwvu0yZMtq0aZP++eefNNs8+OCDevXVV7Vo0SI9/fTTmjx5coaXAwBAdkToBgDgHuDs7Kzo6Gj9/fffcnZ2dhjn4+OjPn366NVXX9XUqVO1Z88ebdiwQWPHjtXUqVMlSYUKFZLNZtO8efN04sQJ+9Hx9Gjzf+3aIY4iURSG0X9SCbIkTYLCkJRgH4BCkbRhBSwAg6ggYDNIDApTQbAHEhaARjJu1IgR/TKdzjn2mnfll/s+PzMYDLJYLNJ1Xe73e47HY67Xa16vV9brdS6XSx6PR7quy+12S9M0X7o/AHxXohsAfoi6rlPX9V9nu90u2+02+/0+TdNkOp3mdDplNBolSYbDYdq2zWazycfHx5+v6v+i1+vlfD6n3+9nPp9nMpnkcDikqqpUVZXn85nVapXxeJzlcpnZbJa2bb9kZwD47n693+/3/34EAAAA/EQu3QAAAFCI6AYAAIBCRDcAAAAUIroBAACgENENAAAAhYhuAAAAKER0AwAAQCGiGwAAAAoR3QAAAFCI6AYAAIBCRDcAAAAUIroBAACgkN+yn+H9utPOQQAAAABJRU5ErkJggg==\n"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["
"],"image/png":"iVBORw0KGgoAAAANSUhEUgAAA9gAAAJOCAYAAABMYq+bAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABm8UlEQVR4nO3dd3QV1eL28eekhxRCIBBKSOhF6Vy6dEykCKg0gxQptoA0vXCvSlFBFBTlIkURREBUwAZKUUAUkCpFQQSkQwgQQwiRkLLfP3gzPw4phDAIge9nrSw4M3v27Jmzz8k8mZk9DmOMEQAAAAAAuCEut7oBAAAAAADcCQjYAAAAAADYgIANAAAAAIANCNgAAAAAANiAgA0AAAAAgA0I2AAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAEg6deqUHnnkERUsWFAOh0OTJk261U3KtbCwMLVt2/ZWN+O6rFmzRg6HQ2vWrLnVTcmxUaNGyeFw6MyZM7dk/Q6HQ1FRUdmWOXTokBwOh2bPnv3PNAoA7nIEbAD4/2bPni2Hw6EtW7bc6qYgC4mJiRo1atRNCWGDBw/W8uXLNWLECH300UeKiIjIsqzD4XD68ff3V5MmTbR06dIsl4mLi5OXl5ccDof27NmTZbm0tDTNmTNHrVq1UqFCheTu7q7ChQvr/vvv14wZM5SUlHRD2wncTr755huNGjXqptR9M78vACArbre6AQAA5FRiYqJGjx4tSWratKmtda9atUrt27fXsGHDclS+VatW6tGjh4wxOnz4sKZOnap27drp22+/VXh4eIbyn332mRwOh4KDgzVv3jy98sorGcr8/fff6tixo5YvX64GDRpo2LBhKlKkiGJjY/XDDz/o6aef1saNGzVz5swb3l7cHUJDQ/X333/L3d39VjclU998842mTJlyU0L2zfy+AICsELAB4DZljNHFixfl7e19q5tyV4iJiVFAQECOy5cvX17du3e3Xj/88MOqXLmy3n777UwD9ty5c9W6dWuFhoZq/vz5mQbs9LPokyZN0rPPPus0b+jQodq3b59WrlyZ843CXc/hcMjLy+tWN8MWKSkpSktLk4eHx61uCgBkiUvEASAbvXr1kq+vr44cOaK2bdvK19dXxYsX15QpUyRJu3btUvPmzeXj42MFpyulX3a+du1aPfHEEypYsKD8/f3Vo0cP/fXXX05l0++bXb58uWrXri1vb29Nnz5dkvTnn3+qU6dOCgwMVL58+VSvXj2ny5FPnTolNzc362zNlfbu3SuHw6H//e9/1rS4uDgNGjRIISEh8vT0VNmyZTV+/HilpaVZZdLv3ZwwYYKmTJmi0qVLK1++fLr//vt19OhRGWP08ssvq0SJEvL29lb79u0VGxubYf3ffvut7rvvPvn4+MjPz09t2rTRb7/9lul+Pn78uDp06CBfX18FBQVp2LBhSk1NtdoTFBQkSRo9erR1efa1znxda9+lv0fGGE2ZMsWq93pVqlRJhQoV0oEDBzLMO3LkiH788Ud17dpVXbt21cGDB7V+/XqnMkePHtX777+viIiIDOE6Xbly5fT000/nuE0rVqxQ9erV5eXlpcqVK2vx4sVO82NjYzVs2DBVqVJFvr6+8vf31wMPPKAdO3ZkqGvy5Mm65557lC9fPhUoUEC1a9fO0N+PHz+uxx9/XEWKFJGnp6fuueceffDBBxnqOnbsmDp06CAfHx8VLlxYgwcPvq5L33/55Rc98MAD8vf3l6+vr1q0aKGff/7ZqUz6+7pu3ToNGTJEQUFB8vHxUceOHXX69Okcref3339X586dFRQUJG9vb1WoUEH//e9/M5SLi4tTr169FBAQoPz586t3795KTEy05md3H/TVfTj9vu79+/dnW2dWXnnlFbm4uGjy5MlZrjsnn7d0Z8+e1WOPPSZ/f38FBASoZ8+e2rFjR47u605OTtbo0aNVrlw5eXl5qWDBgmrUqJH1R6JevXpZ36VX3nJxZbsnTJigSZMmqUyZMvL09NTu3bt16dIlvfTSS6pVq5by588vHx8f3XfffVq9erXTPr/W98Xvv/+uRx55RIGBgfLy8lLt2rX11VdfZdiOnTt3qkmTJvL29laJEiX0yiuvaNasWXI4HDp06JAkqWfPnipUqJCSk5MzLH///ferQoUK2e4rAHcOzmADwDWkpqbqgQceUOPGjfX6669r3rx5ioqKko+Pj/773/8qMjJSDz30kKZNm6YePXqofv36KlWqlFMdUVFRCggI0KhRo7R3715NnTpVhw8ftgZ2Srd3715169ZNTzzxhPr166cKFSro1KlTatCggRITEzVw4EAVLFhQH374oR588EEtXLhQHTt2VJEiRdSkSRN9+umnGjlypNO6P/nkE7m6uqpTp06SLl822aRJEx0/flxPPPGESpYsqfXr12vEiBE6efJkhsG95s2bp0uXLmnAgAGKjY3V66+/rs6dO6t58+Zas2aN/v3vf2v//v2aPHmyhg0b5hSoPvroI/Xs2VPh4eEaP368EhMTNXXqVDVq1Ei//PKLwsLCnPZzeHi46tatqwkTJui7777TxIkTVaZMGT311FMKCgrS1KlT9dRTT6ljx4566KGHJElVq1bN8r3Lyb5r3LixPvroIz322GPWZd+5ce7cOf31118qU6ZMhnkff/yxfHx81LZtW3l7e6tMmTKaN2+eGjRoYJX59ttvlZqa6nRW/Ebs27dPXbp00ZNPPqmePXtq1qxZ6tSpk5YtW6ZWrVpJuvzHhy+++EKdOnVSqVKldOrUKU2fPl1NmjTR7t27VaxYMUnSe++9p4EDB+qRRx7Rs88+q4sXL2rnzp3auHGjHn30UUmX93W9evWsgbeCgoL07bffqk+fPoqPj9egQYMkXb4MvkWLFjpy5IgGDhyoYsWK6aOPPtKqVatytF2//fab7rvvPvn7++v555+Xu7u7pk+frqZNm+qHH35Q3bp1ncoPGDBABQoU0MiRI3Xo0CFNmjRJUVFR+uSTT7Jdz86dO3XffffJ3d1d/fv3V1hYmA4cOKCvv/5ar776qlPZzp07q1SpUho3bpy2bdum999/X4ULF9b48eNztE2ZyU2dL7zwgsaOHavp06erX79+2dZ/rc+bdHk8gHbt2mnTpk166qmnVLFiRX355Zfq2bNnjrZh1KhRGjdunPr27as6deooPj5eW7Zs0bZt29SqVSs98cQTOnHihFauXKmPPvoo0zpmzZqlixcvqn///vL09FRgYKDi4+P1/vvvq1u3burXr5/Onz+vmTNnKjw8XJs2bVL16tWv+X3x22+/qWHDhipevLiGDx8uHx8fffrpp+rQoYMWLVqkjh07Srr8R6NmzZrJ4XBoxIgR8vHx0fvvvy9PT0+ndj722GOaM2eOli9f7jTAYHR0tFatWpXhexnAHcwAAIwxxsyaNctIMps3b7am9ezZ00gyY8eOtab99ddfxtvb2zgcDrNgwQJr+u+//24kmZEjR2aos1atWubSpUvW9Ndff91IMl9++aU1LTQ01Egyy5Ytc2rXoEGDjCTz448/WtPOnz9vSpUqZcLCwkxqaqoxxpjp06cbSWbXrl1Oy1euXNk0b97cev3yyy8bHx8f88cffziVGz58uHF1dTVHjhwxxhhz8OBBI8kEBQWZuLg4q9yIESOMJFOtWjWTnJxsTe/WrZvx8PAwFy9etNoYEBBg+vXr57Se6Ohokz9/fqfp6ft5zJgxTmVr1KhhatWqZb0+ffp0hn2cnZzuO2OMkWSeeeaZHNUryfTp08ecPn3axMTEmC1btpiIiAgjybzxxhsZylepUsVERkZar//zn/+YQoUKOe2/wYMHG0lm+/btTssmJSWZ06dPWz9nzpy5ZvvS+9KiRYusaefOnTNFixY1NWrUsKZdvHjRaR8Yc/l99/T0dHov2rdvb+65555s19mnTx9TtGjRDO3r2rWryZ8/v0lMTDTGGDNp0iQjyXz66adWmQsXLpiyZcsaSWb16tXZrqdDhw7Gw8PDHDhwwJp24sQJ4+fnZxo3bmxNS//stWzZ0qSlpVnTBw8ebFxdXZ36dGYaN25s/Pz8zOHDh52mX1nXyJEjjSTz+OOPO5Xp2LGjKViwoPU6/bM0a9asDOu5uj/ntM70ZdP77NChQ42Li4uZPXu2U5nM1p3Tz9uiRYuMJDNp0iRrWmpqqmnevHmW23OlatWqmTZt2mRb5plnnjGZHY6mt9vf39/ExMQ4zUtJSTFJSUlO0/766y9TpEgRp/2W3fdFixYtTJUqVazvK2Muv7cNGjQw5cqVs6YNGDDAOBwO88svv1jTzp49awIDA40kc/DgQWPM5f1SokQJ06VLF6f1vPnmm8bhcJg///wz2/0A4M7BJeIAkAN9+/a1/h8QEKAKFSrIx8dHnTt3tqZXqFBBAQEB+vPPPzMs379/f6dBhp566im5ubnpm2++cSpXqlSpDPfvfvPNN6pTp44aNWpkTfP19VX//v116NAh7d69W5L00EMPyc3NzenM3K+//qrdu3erS5cu1rTPPvtM9913nwoUKKAzZ85YPy1btlRqaqrWrl3rtP5OnTopf/781uv0M4Tdu3eXm5ub0/RLly7p+PHjkqSVK1cqLi5O3bp1c1qPq6ur6tat63Q5Z7onn3zS6fV9992X6f7MqZzuu9yYOXOmgoKCVLhwYdWuXVvff/+9nn/+eQ0ZMsSp3M6dO7Vr1y5169bNmpa+T5YvX25Ni4+Pt9p39TYEBQVZP6GhoTlqX7FixayzcJKsWxN++eUXRUdHS5I8PT3l4nL5UCA1NVVnz56Vr6+vKlSooG3btlnLBgQE6NixY9q8eXOm6zLGaNGiRWrXrp2MMU7vd3h4uM6dO2fV980336ho0aJ65JFHrOXz5cun/v37X3ObUlNTtWLFCnXo0EGlS5e2phctWlSPPvqofvrpJ2s/puvfv7/TVSL33XefUlNTdfjw4SzXc/r0aa1du1aPP/64SpYs6TQvs9sHMuu3Z8+ezdCW65HTOo0xioqK0ttvv625c+fm+OxyVuu48vO2bNkyubu7O50Nd3Fx0TPPPJOj+gMCAvTbb79p3759OW7T1R5++GHrUu90rq6u1n3YaWlpio2NVUpKimrXru3Ub7MSGxurVatWqXPnzjp//rzVV8+ePavw8HDt27fP+h5btmyZ6tevr+rVq1vLBwYGKjIy0qlOFxcXRUZG6quvvtL58+et6elXqlx9VROAOxcBGwCuwcvLK8MBXv78+VWiRIkMB9v58+fPcG+1dPne2Sv5+vqqaNGi1v176TI7CDt8+HCm9+9VqlTJmi9JhQoVUosWLfTpp59aZT755BO5ublZl0dKly8dXrZsmVNoCwoKUsuWLSVdHuzrSlcHjPSwHRISkun09O1PP6hu3rx5hnWtWLEiw3oy288FChTIdH/mVE73XW60b99eK1eu1NKlS637ZhMTE63Amm7u3Lny8fFR6dKltX//fu3fv19eXl4KCwvTvHnzrHJ+fn6SpISEBKflGzZsqJUrV2rlypW6//77c9y+smXLZuif5cuXlySr36Wlpemtt95SuXLl5OnpqUKFCikoKEg7d+7UuXPnrOX+/e9/y9fXV3Xq1FG5cuX0zDPPaN26ddb806dPKy4uTjNmzMjwXvfu3VvS//Wrw4cPZ9q2nNyjevr0aSUmJmb5nqalpeno0aNO06/uvwUKFJCkbPtVesi89957r9mm3K7DrjrnzJmjKVOmaPLkyU5/xLmWnHzeDh8+rKJFiypfvnxO5cqWLZujdYwZM0ZxcXEqX768qlSpoueee047d+7McRulzL8TJenDDz9U1apVrXu7g4KCtHTpUqd+m5X9+/fLGKMXX3wxQ39Nv5T76v56tcym9ejRQ3///bc+//xzSZdv+dm6dasee+yxHG8vgLyPe7AB4BpcXV2va7oxJtfrutERw7t27arevXtr+/btql69uj799FO1aNFChQoVssqkpaWpVatWev755zOtIz2Epcvt9qcPmPbRRx8pODg4Q7krz35nV9/tqkSJEtYfJVq3bq1ChQopKipKzZo1s/6gYYzRxx9/rAsXLqhy5coZ6oiJiVFCQoJ8fX1VsWJFSZevOqhWrZpV5so/fsydO9fWbRg7dqxefPFFPf7443r55ZcVGBgoFxcXDRo0yGnAu0qVKmnv3r1asmSJli1bpkWLFundd9/VSy+9pNGjR1tlu3fvnuUZ1Ozulb+Zbsbn9HrXkdWgeVcPKHY9daZr2LChtm/frv/973/q3LmzAgMDc9Lkf+Tz1rhxYx04cEBffvmlVqxYoffff19vvfWWpk2b5nRVUHYy+06cO3euevXqpQ4dOui5555T4cKF5erqqnHjxmU6yODV0vvrsGHDMh3xX8r5HxGuVLlyZdWqVUtz585Vjx49NHfuXHl4eDhd6QTgzkfABoB/wL59+9SsWTPrdUJCgk6ePKnWrVtfc9nQ0FDt3bs3w/Tff//dmp+uQ4cOeuKJJ6zLxP/44w+NGDHCabkyZcooISHBCm03S/pgX4ULF7ZtXdc7uvf17Lsb9cQTT+itt97SCy+8oI4dO8rhcOiHH37QsWPHNGbMGOusebq//vpL/fv31xdffKHu3bvrgQcekKurq+bNm5fh8tPcSD9Ld+U+++OPPyTJGlxu4cKFatasWYbnasfFxTn9UUaSfHx81KVLF3Xp0kWXLl3SQw89pFdffVUjRoxQUFCQ/Pz8lJqaes33OjQ0VL/++muGtmX2Pl0tKChI+fLly/I9dXFxyXBlRW6kX37+66+/3nBd0v+dfY6Li3OafiNXUKQrW7asXn/9dTVt2lQRERH6/vvvrashblRoaKhWr16txMREp7PY+/fvz3EdgYGB6t27t3r37q2EhAQ1btxYo0aNsgJ2bkbsX7hwoUqXLq3Fixc7LX/1QGJZ1Z3+/rq7u+eov2a2vVntgx49emjIkCE6efKk5s+frzZt2ljvP4C7A5eIA8A/YMaMGU6Pb5k6dapSUlL0wAMPXHPZ1q1ba9OmTdqwYYM17cKFC5oxY4bCwsKczowGBAQoPDxcn376qRYsWCAPDw916NDBqb7OnTtrw4YNTvf/pouLi1NKSkoutjCj8PBw+fv7a+zYsZk+uianj0q6UvpB/tVBJSvXs+9ulJubm4YOHao9e/boyy+/lPR/l4c/99xzeuSRR5x++vXrp3LlylmXiZcsWVKPP/64vv32W6dHql3pes66njhxwrpUVbp8j/ecOXNUvXp164oCV1fXDHV+9tln1v2n6c6ePev02sPDQ5UrV5YxRsnJyXJ1ddXDDz+sRYsWZRpKr3yvW7durRMnTmjhwoXWtMTERM2YMeOa2+Tq6qr7779fX375pdPtFadOndL8+fPVqFEj+fv7X7OeawkKClLjxo31wQcf6MiRI07zcnPm29/fX4UKFcowvsG77757Q+1MV7VqVX3zzTfas2eP2rVrp7///tuWesPDw5WcnKz33nvPmpaWlmY9Wutaru43vr6+Klu2rNMj2Xx8fCTl/DMt/d/Z9yvfi40bNzp9zqWsvy8KFy6spk2bavr06Tp58mSG+q/sr+Hh4dqwYYO2b99uTYuNjXW6veNK3bp1k8Ph0LPPPqs///zTtqcCAMg7OIMNAP+AS5cuqUWLFurcubP27t2rd999V40aNdKDDz54zWWHDx+ujz/+WA888IAGDhyowMBAffjhhzp48KAWLVqU4Z7fLl26qHv37nr33XcVHh6ugIAAp/nPPfecvvrqK7Vt21a9evVSrVq1dOHCBe3atUsLFy7UoUOHMpy9zA1/f39NnTpVjz32mGrWrKmuXbsqKChIR44c0dKlS9WwYcMsg2RWvL29VblyZX3yyScqX768AgMDde+992Z5r+z17rsb1atXL7300ksaP368HnjgAS1atEitWrWSl5dXpuUffPBBvf3224qJiVHhwoU1adIkHTx4UAMGDNCCBQvUrl07FS5cWGfOnNG6dev09ddf5/h5uuXLl1efPn20efNmFSlSRB988IFOnTqlWbNmWWXatm2rMWPGqHfv3mrQoIF27dqlefPmOQ0gJl1+jm9wcLAaNmyoIkWKaM+ePfrf//6nNm3aWGdLX3vtNa1evVp169ZVv379VLlyZcXGxmrbtm367rvvrGek9+vXT//73//Uo0cPbd26VUWLFtVHH32U4T7frLzyyitauXKlGjVqpKefflpubm6aPn26kpKS9Prrr+eojpx455131KhRI9WsWVP9+/dXqVKldOjQIS1dutQpbOVU37599dprr6lv376qXbu21q5da11RYId69erpyy+/VOvWrfXII4/oiy++cBpYMTc6dOigOnXqaOjQodq/f78qVqyor776ynovr3X2uXLlymratKlq1aqlwMBAbdmyRQsXLlRUVJRVplatWpKkgQMHKjw8XK6ururatWu29bZt21aLFy9Wx44d1aZNGx08eFDTpk1T5cqVncYwyO77YsqUKWrUqJGqVKmifv36qXTp0jp16pQ2bNigY8eOWc+Cf/755zV37ly1atVKAwYMsB7TVbJkScXGxmbYB0FBQYqIiNBnn32mgIAAtWnTJuc7HMCd4R8ftxwAblNZPabLx8cnQ9kmTZpk+tii0NBQp8fSpNf5ww8/mP79+5sCBQoYX19fExkZac6ePZvtslc6cOCAeeSRR0xAQIDx8vIyderUMUuWLMm0bHx8vPH29jaSzNy5czMtc/78eTNixAhTtmxZ4+HhYQoVKmQaNGhgJkyYYD1OLP0xOVc/dmr16tVGkvnss8+cpme2/9LLh4eHm/z58xsvLy9TpkwZ06tXL7NlyxarTFb7Of2RRVdav369qVWrlvHw8MjRI7tyuu90nY/pyqrsqFGjrEdkSTIzZ87Msp41a9YYSebtt9+2pqWkpJhZs2aZ5s2bm8DAQOPm5mYKFSpkWrRoYaZNm2b+/vvva7YvvS8tX77cVK1a1Xh6epqKFStmeM8uXrxohg4daooWLWq8vb1Nw4YNzYYNG0yTJk1MkyZNrHLTp083jRs3NgULFjSenp6mTJky5rnnnjPnzp1zqu/UqVPmmWeeMSEhIcbd3d0EBwebFi1amBkzZjiVO3z4sHnwwQdNvnz5TKFChcyzzz5rli1blqPHdBljzLZt20x4eLjx9fU1+fLlM82aNTPr1693KpNdf8zpen799VfTsWNHq+9UqFDBvPjii9b89P55+vTpTNed/ggnY4xJTEw0ffr0Mfnz5zd+fn6mc+fOJiYmJsvHdOWkzsz64Zdffmnc3NxMly5dTGpqapaP6crp5+306dPm0UcfNX5+fiZ//vymV69eZt26dUaS02MKM/PKK6+YOnXqmICAAOPt7W0qVqxoXn31VadHFqakpJgBAwaYoKAg43A4rPVn9f1jzOXHaY0dO9aEhoYaT09PU6NGDbNkyRLTs2dPExoa6lQ2u++LAwcOmB49epjg4GDj7u5uihcvbtq2bWsWLlzoVMcvv/xi7rvvPuPp6WlKlChhxo0bZ9555x0jyURHR2do36effmokmf79+2e7fwDcmRzG2DjKBwDAyezZs9W7d29t3rxZtWvXvtXNAYAb9sUXX6hjx4766aef1LBhw1vdnFti0KBBmj59uhISEjIMGPfll1+qQ4cOWrt2re67775b1EIAtwr3YAMAACBTV9/PnZqaqsmTJ8vf3181a9a8Ra36Z129D86ePauPPvpIjRo1ynQ09vfee0+lS5dWo0aN/qkmAriNcA82AAAAMjVgwAD9/fffql+/vpKSkrR48WKtX79eY8eOveHHCuYV9evXV9OmTVWpUiWdOnVKM2fOVHx8vF588UWncgsWLNDOnTu1dOlSvf3227kaIR1A3kfABgAAQKaaN2+uiRMnasmSJbp48aLKli2ryZMnOw1Udqdr3bq1Fi5cqBkzZsjhcKhmzZqaOXOmGjdu7FSuW7du8vX1VZ8+ffT000/fotYCuNXy3D3YU6ZM0RtvvKHo6GhVq1ZNkydPVp06dbIs/9lnn+nFF1/UoUOHVK5cOY0fP97pubPGGI0cOVLvvfee4uLi1LBhQ02dOlXlypX7JzYHAAAAAHCHyFP3YH/yyScaMmSIRo4cqW3btqlatWoKDw9XTExMpuXXr1+vbt26qU+fPvrll1/UoUMHdejQwekZna+//rreeecdTZs2TRs3bpSPj4/Cw8N18eLFf2qzAAAAAAB3gDx1Brtu3br617/+ZT03NS0tTSEhIRowYICGDx+eoXyXLl104cIFLVmyxJpWr149Va9eXdOmTZMxRsWKFdPQoUM1bNgwSdK5c+dUpEgRzZ49+5rPYQQAAAAAIF2euQf70qVL2rp1q0aMGGFNc3FxUcuWLbVhw4ZMl9mwYYOGDBniNC08PFxffPGFJOngwYOKjo5Wy5Ytrfn58+dX3bp1tWHDhiwDdlJSkpKSkqzXaWlpio2NVcGCBRnQAgAAAADuMMYYnT9/XsWKFZOLS9YXgueZgH3mzBmlpqaqSJEiTtOLFCmi33//PdNloqOjMy0fHR1tzU+fllWZzIwbN06jR4++7m0AAAAAAORdR48eVYkSJbKcn2cC9u1kxIgRTmfGz507p5IlS+ro0aPy9/e/hS3L2rj84251E24LI86NuHYh5Gn0dfr53YK+Tl+/G9DPL6Ov3/no67d/P4+Pj1dISIj8/PyyLZdnAnahQoXk6uqqU6dOOU0/deqUgoODM10mODg42/Lp/546dUpFixZ1KlO9evUs2+Lp6SlPT88M0/39/W/bgO0lr1vdhNvC7fr+wD70dfr53YK+Tl+/G9DPL6Ov3/no63mnn1/rluA8M4q4h4eHatWqpe+//96alpaWpu+//17169fPdJn69es7lZeklStXWuVLlSql4OBgpzLx8fHauHFjlnUCAAAAAJCZPHMGW5KGDBminj17qnbt2qpTp44mTZqkCxcuqHfv3pKkHj16qHjx4ho37vIlFs8++6yaNGmiiRMnqk2bNlqwYIG2bNmiGTNmSLr814dBgwbplVdeUbly5VSqVCm9+OKLKlasmDp06HCrNhMAAAAAkAflqYDdpUsXnT59Wi+99JKio6NVvXp1LVu2zBqk7MiRI04jujVo0EDz58/XCy+8oP/85z8qV66cvvjiC917771Wmeeff14XLlxQ//79FRcXp0aNGmnZsmXy8uIyDQAAAABAzuWpgC1JUVFRioqKynTemjVrMkzr1KmTOnXqlGV9DodDY8aM0ZgxY+xqIgAAAADgLpRn7sEGAAAAAOB2RsAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbJBnAnZsbKwiIyPl7++vgIAA9enTRwkJCdkuc/HiRT3zzDMqWLCgfH199fDDD+vUqVNOZRwOR4afBQsW3MxNAQAAAADcgfJMwI6MjNRvv/2mlStXasmSJVq7dq369++f7TKDBw/W119/rc8++0w//PCDTpw4oYceeihDuVmzZunkyZPWT4cOHW7SVgAAAAAA7lRut7oBObFnzx4tW7ZMmzdvVu3atSVJkydPVuvWrTVhwgQVK1YswzLnzp3TzJkzNX/+fDVv3lzS5SBdqVIl/fzzz6pXr55VNiAgQMHBwf/MxgAAAAAA7kh54gz2hg0bFBAQYIVrSWrZsqVcXFy0cePGTJfZunWrkpOT1bJlS2taxYoVVbJkSW3YsMGp7DPPPKNChQqpTp06+uCDD2SMuTkbAgAAAAC4Y+WJM9jR0dEqXLiw0zQ3NzcFBgYqOjo6y2U8PDwUEBDgNL1IkSJOy4wZM0bNmzdXvnz5tGLFCj399NNKSEjQwIEDs2xPUlKSkpKSrNfx8fG52CoAAAAAwJ3klgbs4cOHa/z48dmW2bNnz01tw4svvmj9v0aNGrpw4YLeeOONbAP2uHHjNHr06JvaLgAAAABA3nJLA/bQoUPVq1evbMuULl1awcHBiomJcZqekpKi2NjYLO+dDg4O1qVLlxQXF+d0FvvUqVPZ3m9dt25dvfzyy0pKSpKnp2emZUaMGKEhQ4ZYr+Pj4xUSEpLtdgAAAAAA7my3NGAHBQUpKCjomuXq16+vuLg4bd26VbVq1ZIkrVq1Smlpaapbt26my9SqVUvu7u76/vvv9fDDD0uS9u7dqyNHjqh+/fpZrmv79u0qUKBAluFakjw9PbOdDwAAAAC4++SJe7ArVaqkiIgI9evXT9OmTVNycrKioqLUtWtXawTx48ePq0WLFpozZ47q1Kmj/Pnzq0+fPhoyZIgCAwPl7++vAQMGqH79+tYI4l9//bVOnTqlevXqycvLSytXrtTYsWM1bNiwW7m5AAAAAIA8KE8EbEmaN2+eoqKi1KJFC7m4uOjhhx/WO++8Y81PTk7W3r17lZiYaE176623rLJJSUkKDw/Xu+++a813d3fXlClTNHjwYBljVLZsWb355pvq16/fP7ptAAAAAIC8L88E7MDAQM2fPz/L+WFhYRker+Xl5aUpU6ZoypQpmS4TERGhiIgIW9sJAAAAALg75YnnYAMAAAAAcLsjYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2cLvVDQAAAAAyM9KMvNVNAIDrwhlsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABnkmYMfGxioyMlL+/v4KCAhQnz59lJCQkO0yM2bMUNOmTeXv7y+Hw6G4uDhb6gUAAAAA4Gp5JmBHRkbqt99+08qVK7VkyRKtXbtW/fv3z3aZxMRERURE6D//+Y+t9QIAAAAAcDW3W92AnNizZ4+WLVumzZs3q3bt2pKkyZMnq3Xr1powYYKKFSuW6XKDBg2SJK1Zs8bWegEAAAAAuFqeOIO9YcMGBQQEWCFYklq2bCkXFxdt3LjxH683KSlJ8fHxTj8AAAAAgLtbngjY0dHRKly4sNM0Nzc3BQYGKjo6+h+vd9y4ccqfP7/1ExISkus2AAAAAADuDLc0YA8fPlwOhyPbn99///1WNjFTI0aM0Llz56yfo0eP3uomAQAAAABusVt6D/bQoUPVq1evbMuULl1awcHBiomJcZqekpKi2NhYBQcH53r9ua3X09NTnp6euV4vAAAAAODOc0sDdlBQkIKCgq5Zrn79+oqLi9PWrVtVq1YtSdKqVauUlpamunXr5nr9N6teAAAAAMDdJ0/cg12pUiVFRESoX79+2rRpk9atW6eoqCh17drVGun7+PHjqlixojZt2mQtFx0dre3bt2v//v2SpF27dmn79u2KjY3Ncb0AAAAAAOREngjYkjRv3jxVrFhRLVq0UOvWrdWoUSPNmDHDmp+cnKy9e/cqMTHRmjZt2jTVqFFD/fr1kyQ1btxYNWrU0FdffZXjegEAAAAAyIk88RxsSQoMDNT8+fOznB8WFiZjjNO0UaNGadSoUTdULwAAAAAAOZFnzmADAAAAAHA7I2ADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADZwu9UNAAAAAIC72Ugz8lY3ATbhDDYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANshVwHZ1dVVMTEyG6WfPnpWrq+sNNwoAAAAAgLwmVwHbGJPp9KSkJHl4eNxQgwAAAAAAyIvcrqfwO++8I0lyOBx6//335evra81LTU3V2rVrVbFiRXtbCAAAAABAHnBdAfutt96SdPkM9rRp05wuB/fw8FBYWJimTZtmbwsBAAAAAMgDritgHzx4UJLUrFkzLV68WAUKFLgpjQIAAAAAIK+5roCdbvXq1Xa3AwAAAACAPC1XATs1NVWzZ8/W999/r5iYGKWlpTnNX7VqlS2NAwAAAAAgr8hVwH722Wc1e/ZstWnTRvfee68cDofd7QIAAAAAIE/JVcBesGCBPv30U7Vu3dru9gAAAAAAkCfl6jnYHh4eKlu2rN1tAQAAAAAgz8pVwB46dKjefvttGWPsbg8AAAAAAHlSri4R/+mnn7R69Wp9++23uueee+Tu7u40f/HixbY0DgAAAACAvCJXATsgIEAdO3a0uy0AAAAAAORZuQrYs2bNsrsdAAAAAADkabm6B1uSUlJS9N1332n69Ok6f/68JOnEiRNKSEiwrXEAAAAAAOQVuTqDffjwYUVEROjIkSNKSkpSq1at5Ofnp/HjxyspKUnTpk2zu50AAAAAANzWcnUG+9lnn1Xt2rX1119/ydvb25resWNHff/997Y1DgAAAACAvCJXZ7B//PFHrV+/Xh4eHk7Tw8LCdPz4cVsaBgAAAABAXpKrM9hpaWlKTU3NMP3YsWPy8/O74UYBAAAAAJDX5Cpg33///Zo0aZL12uFwKCEhQSNHjlTr1q3tahsAAAAAAHlGri4RnzhxosLDw1W5cmVdvHhRjz76qPbt26dChQrp448/truNAAAAAADc9nIVsEuUKKEdO3ZowYIF2rlzpxISEtSnTx9FRkY6DXoGAAAAAMDdIlcBW5Lc3NzUvXt3O9sCAAAAAECeleuAfeLECf3000+KiYlRWlqa07yBAwfecMMAAAAAAMhLchWwZ8+erSeeeEIeHh4qWLCgHA6HNc/hcBCwAQAAAAB3nVwF7BdffFEvvfSSRowYIReXXA1EDgAAAADAHSVX6TgxMVFdu3YlXAMAAAAA8P/lKiH36dNHn332md1tAQAAAAAgz8rVJeLjxo1T27ZttWzZMlWpUkXu7u5O8998801bGgcAAAAAQF6R64C9fPlyVahQQZIyDHIGAAAAAMDdJlcBe+LEifrggw/Uq1cvm5sDAAAAAEDelKt7sD09PdWwYUO72wIAAAAAQJ6Vq4D97LPPavLkyXa3BQAAAACAPCtXl4hv2rRJq1at0pIlS3TPPfdkGORs8eLFtjQOAAAAAIC8IlcBOyAgQA899JDdbQEAAAAAIM/KVcCeNWuW3e0AAAAAACBPy9U92JKUkpKi7777TtOnT9f58+clSSdOnFBCQoJtjQMAAAAAIK/I1Rnsw4cPKyIiQkeOHFFSUpJatWolPz8/jR8/XklJSZo2bZrd7QQAAAAA4LaW61HEa9eurb/++kve3t7W9I4dO+r777+3rXEAAAAAAOQVuTqD/eOPP2r9+vXy8PBwmh4WFqbjx4/b0jAAAAAAAPKSXJ3BTktLU2pqaobpx44dk5+f3w03CgAAAACAvCZXAfv+++/XpEmTrNcOh0MJCQkaOXKkWrdubVfbAAAAAADIM3J1ifjEiRMVHh6uypUr6+LFi3r00Ue1b98+FSpUSB9//LHdbQQAAAAA4LaXq4BdokQJ7dixQwsWLNDOnTuVkJCgPn36KDIy0mnQMwAAAAAA7ha5CtgXL16Ul5eXunfvbnd7AAAAAADIk3J1D3bhwoXVs2dPrVy5UmlpaXa3CQAAAACAPCdXAfvDDz9UYmKi2rdvr+LFi2vQoEHasmWL3W0DAAAAACDPyFXA7tixoz777DOdOnVKY8eO1e7du1WvXj2VL19eY8aMsbuNAAAAAADc9nIVsNP5+fmpd+/eWrFihXbu3CkfHx+NHj3arrYBAAAAAJBn3FDAvnjxoj799FN16NBBNWvWVGxsrJ577jm72gYAAAAAQJ6Rq1HEly9frvnz5+uLL76Qm5ubHnnkEa1YsUKNGze2u30AAAAAAOQJuQrYHTt2VNu2bTVnzhy1bt1a7u7udrcLAAAAAIA8JVeXiJ86dUqffvqp2rdv/4+F69jYWEVGRsrf318BAQHq06ePEhISsl1mxowZatq0qfz9/eVwOBQXF5ehTFhYmBwOh9PPa6+9dpO2AgAAAABwp8rVGWw/Pz+lpqbqiy++0J49eyRJlStXVvv27eXq6mprA9NFRkbq5MmTWrlypZKTk9W7d2/1799f8+fPz3KZxMRERUREKCIiQiNGjMiy3JgxY9SvXz/rtZ+fn61tBwAAAADc+XIVsPfv36/WrVvr+PHjqlChgiRp3LhxCgkJ0dKlS1WmTBlbG7lnzx4tW7ZMmzdvVu3atSVJkydPVuvWrTVhwgQVK1Ys0+UGDRokSVqzZk229fv5+Sk4ONjOJgMAAAAA7jK5ukR84MCBKlOmjI4ePapt27Zp27ZtOnLkiEqVKqWBAwfa3UZt2LBBAQEBVriWpJYtW8rFxUUbN2684fpfe+01FSxYUDVq1NAbb7yhlJSUG64TAAAAAHB3ydUZ7B9++EE///yzAgMDrWkFCxbUa6+9poYNG9rWuHTR0dEqXLiw0zQ3NzcFBgYqOjr6huoeOHCgatasqcDAQK1fv14jRozQyZMn9eabb2a5TFJSkpKSkqzX8fHxN9QGAAAAAEDel6sz2J6enjp//nyG6QkJCfLw8MhxPcOHD88wwNjVP7///ntumphjQ4YMUdOmTVW1alU9+eSTmjhxoiZPnuwUoK82btw45c+f3/oJCQm5qW0EAAAAANz+cnUGu23bturfv79mzpypOnXqSJI2btyoJ598Ug8++GCO6xk6dKh69eqVbZnSpUsrODhYMTExTtNTUlIUGxtr+73TdevWVUpKig4dOmTdX361ESNGaMiQIdbr+Ph4QjYAAAAA3OVyFbDfeecd9ezZU/Xr17ce05WSkqIHH3xQb7/9do7rCQoKUlBQ0DXL1a9fX3Fxcdq6datq1aolSVq1apXS0tJUt27d3GxClrZv3y4XF5cMl6RfydPTU56enrauFwAAAACQt113wDbGKD4+XgsWLNDx48etx3RVqlRJZcuWtb2B6XVHRESoX79+mjZtmpKTkxUVFaWuXbtaI4gfP35cLVq00Jw5c6yz6tHR0YqOjtb+/fslSbt27ZKfn59KliypwMBAbdiwQRs3blSzZs3k5+enDRs2aPDgwerevbsKFChwU7YFAAAAAHBnylXALlu2rH777TeVK1fupoXqq82bN09RUVFq0aKFXFxc9PDDD+udd96x5icnJ2vv3r1KTEy0pk2bNk2jR4+2Xjdu3FiSNGvWLPXq1Uuenp5asGCBRo0apaSkJJUqVUqDBw92uvwbAAAAAICcuO6A7eLionLlyuns2bMqV67czWhTpgIDAzV//vws54eFhckY4zRt1KhRGjVqVJbL1KxZUz///LNdTQQAAAAA3MVyNYr4a6+9pueee06//vqr3e0BAAAAACBPytUgZz169FBiYqKqVasmDw8PeXt7O82PjY21pXEAAAAAAOQVuQrYkyZNsrkZAAAAAADkbbkK2D179rS7HQAAAAAA5Gm5CtiSlJqaqs8//9x6TFflypXVvn17ubnlukoAAAAAAPKsXKXh3377TQ8++KCio6NVoUIFSdL48eMVFBSkr7/+Wvfee6+tjQQAAAAA4HaXq1HE+/btq3vuuUfHjh3Ttm3btG3bNh09elRVq1ZV//797W4jAAAAAAC3vVydwd6+fbu2bNmiAgUKWNMKFCigV199Vf/6179saxwAAAAAAHlFrs5gly9fXqdOncowPSYmRmXLlr3hRgEAAAAAkNfkKmCPGzdOAwcO1MKFC3Xs2DEdO3ZMCxcu1KBBgzR+/HjFx8dbPwAAAAAA3A1ydYl427ZtJUmdO3eWw+GQJBljJEnt2rWzXjscDqWmptrRTgAAAAAAbmu5CtirV6+2ux0AAAAAAORpuQrYTZo0sbsdAAAAAADkabkK2JJ08eJF7dy5UzExMUpLS3Oa9+CDD95wwwAAAAAAyEtyFbCXLVumHj166MyZMxnmcd81AAAAAOBulKtRxAcMGKBOnTrp5MmTSktLc/ohXAMAAAAA7ka5CtinTp3SkCFDVKRIEbvbAwAAAABAnpSrgP3II49ozZo1NjcFAAAAAIC8K1f3YP/vf/9Tp06d9OOPP6pKlSpyd3d3mj9w4EBbGgcAAAAAQF6Rq4D98ccfa8WKFfLy8tKaNWvkcDiseQ6Hg4ANAAAAALjr5Cpg//e//9Xo0aM1fPhwubjk6ipzAAAAAADuKLlKx5cuXVKXLl0I1wAAAAAA/H+5Ssg9e/bUJ598YndbAAAAAADIs3J1iXhqaqpef/11LV++XFWrVs0wyNmbb75pS+MAAAAAAMgrchWwd+3apRo1akiSfv31V1sbBAAAAABAXpSrgL169Wq72wEAAAAAQJ52XQH7oYceumYZh8OhRYsW5bpBAAAAAADkRdcVsPPnz3+z2gEAAAAAQJ52XQF71qxZN6sdAAAAAADkaTzIGgAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAG+SZgB0bG6vIyEj5+/srICBAffr0UUJCQrblBwwYoAoVKsjb21slS5bUwIEDde7cOadyR44cUZs2bZQvXz4VLlxYzz33nFJSUm725gAAAAAA7jBut7oBORUZGamTJ09q5cqVSk5OVu/evdW/f3/Nnz8/0/InTpzQiRMnNGHCBFWuXFmHDx/Wk08+qRMnTmjhwoWSpNTUVLVp00bBwcFav369Tp48qR49esjd3V1jx479JzcPAAAAAJDH5YmAvWfPHi1btkybN29W7dq1JUmTJ09W69atNWHCBBUrVizDMvfee68WLVpkvS5TpoxeffVVde/eXSkpKXJzc9OKFSu0e/dufffddypSpIiqV6+ul19+Wf/+9781atQoeXh4/GPbCAAAAADI2/LEJeIbNmxQQECAFa4lqWXLlnJxcdHGjRtzXM+5c+fk7+8vNzc3q94qVaqoSJEiVpnw8HDFx8frt99+y7KepKQkxcfHO/0AAAAAAO5ueeIMdnR0tAoXLuw0zc3NTYGBgYqOjs5RHWfOnNHLL7+s/v37O9V7ZbiWZL3Ort5x48Zp9OjROW0+gH/QSDPyVjcBAAAAd6lbegZ7+PDhcjgc2f78/vvvN7ye+Ph4tWnTRpUrV9aoUaNuuL4RI0bo3Llz1s/Ro0dvuE4AAAAAQN52S89gDx06VL169cq2TOnSpRUcHKyYmBin6SkpKYqNjVVwcHC2y58/f14RERHy8/PT559/Lnd3d2tecHCwNm3a5FT+1KlT1ryseHp6ytPTM9v1AgAAAADuLrc0YAcFBSkoKOia5erXr6+4uDht3bpVtWrVkiStWrVKaWlpqlu3bpbLxcfHKzw8XJ6envrqq6/k5eWVod5XX31VMTEx1iXoK1eulL+/vypXrnwDWwYAAAAAuNvkiUHOKlWqpIiICPXr10+bNm3SunXrFBUVpa5du1ojiB8/flwVK1a0zkjHx8fr/vvv14ULFzRz5kzFx8crOjpa0dHRSk1NlSTdf//9qly5sh577DHt2LFDy5cv1wsvvKBnnnmGM9QAAAAAgOuSJwY5k6R58+YpKipKLVq0kIuLix5++GG988471vzk5GTt3btXiYmJkqRt27ZZI4yXLVvWqa6DBw8qLCxMrq6uWrJkiZ566inVr19fPj4+6tmzp8aMGfPPbRgAAAAA4I6QZwJ2YGCg5s+fn+X8sLAwGWOs102bNnV6nZXQ0FB98803trQRAAAAAHD3yhOXiAMAAAAAcLsjYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANnC71Q3AP2OkGXmrmwAAAAAAdzTOYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYIM8E7BjY2MVGRkpf39/BQQEqE+fPkpISMi2/IABA1ShQgV5e3urZMmSGjhwoM6dO+dUzuFwZPhZsGDBzd4cAAAAAMAdxu1WNyCnIiMjdfLkSa1cuVLJycnq3bu3+vfvr/nz52da/sSJEzpx4oQmTJigypUr6/Dhw3ryySd14sQJLVy40KnsrFmzFBERYb0OCAi4mZsCAAAAALgD5YmAvWfPHi1btkybN29W7dq1JUmTJ09W69atNWHCBBUrVizDMvfee68WLVpkvS5TpoxeffVVde/eXSkpKXJz+79NDwgIUHBw8M3fEAAAAADAHStPXCK+YcMGBQQEWOFaklq2bCkXFxdt3Lgxx/WcO3dO/v7+TuFakp555hkVKlRIderU0QcffCBjjG1tBwAAAADcHfLEGezo6GgVLlzYaZqbm5sCAwMVHR2dozrOnDmjl19+Wf3793eaPmbMGDVv3lz58uXTihUr9PTTTyshIUEDBw7Msq6kpCQlJSVZr+Pj469jawAAAAAAd6JbGrCHDx+u8ePHZ1tmz549N7ye+Ph4tWnTRpUrV9aoUaOc5r344ovW/2vUqKELFy7ojTfeyDZgjxs3TqNHj77hdgEAAAAA7hy3NGAPHTpUvXr1yrZM6dKlFRwcrJiYGKfpKSkpio2Nvea90+fPn1dERIT8/Pz0+eefy93dPdvydevW1csvv6ykpCR5enpmWmbEiBEaMmSI9To+Pl4hISHZ1gsAAAAAuLPd0oAdFBSkoKCga5arX7++4uLitHXrVtWqVUuStGrVKqWlpalu3bpZLhcfH6/w8HB5enrqq6++kpeX1zXXtX37dhUoUCDLcC1Jnp6e2c4HAAAAANx98sQ92JUqVVJERIT69eunadOmKTk5WVFRUeratas1gvjx48fVokULzZkzR3Xq1FF8fLzuv/9+JSYmau7cuYqPj7fulQ4KCpKrq6u+/vprnTp1SvXq1ZOXl5dWrlypsWPHatiwYbdycwEAAAAAeVCeCNiSNG/ePEVFRalFixZycXHRww8/rHfeecean5ycrL179yoxMVGStG3bNmuE8bJlyzrVdfDgQYWFhcnd3V1TpkzR4MGDZYxR2bJl9eabb6pfv37/3IYBAAAAAO4IDsMzqW5YfHy88ufPbz0GDACAm220g8E2R5qRt7oJAIC7RE4zX554DjYAAAAAALc7AjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADYgYAMAAAAAYAMCNgAAAAAANiBgAwAAAABgAwI2AAAAAAA2IGADAAAAAGADAjYAAAAAADZwu9UNAAAA12+kGXmrmwAAAK7CGWwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABgRsAAAAAABsQMAGAAAAAMAGBGwAAAAAAGxAwAYAAAAAwAYEbAAAAAAAbEDABgAAAADABm63ugF3AmOMJCk+Pv4WtwQAAAAAYLf0rJee/bJCwLbB+fPnJUkhISG3uCUAAAAAgJvl/Pnzyp8/f5bzHeZaERzXlJaWphMnTsjPz08Oh+NWN+e2FB8fr5CQEB09elT+/v63ujnATUNfx92Afo67BX0ddwv6+rUZY3T+/HkVK1ZMLi5Z32nNGWwbuLi4qESJEre6GXmCv78/H1rcFejruBvQz3G3oK/jbkFfz152Z67TMcgZAAAAAAA2IGADAAAAAGADAjb+EZ6enho5cqQ8PT1vdVOAm4q+jrsB/Rx3C/o67hb0dfswyBkAAAAAADbgDDYAAAAAADYgYAMAAAAAYAMC9h3GGKP+/fsrMDBQDodD27dvz7b8oUOHclSuadOmGjRoULZloqOj1apVK/n4+CggICBH7V2zZo0cDofi4uJyVB64U/Tq1UsdOnS41c0A7mhX/47jdw7ykrCwME2aNMn2sgBuLgL2HWbZsmWaPXu2lixZopMnT+ree+/NtnxISIhTuRs5+Hjrrbd08uRJbd++XX/88Udumg8AsNHNOujmYB43G31X2rx5s/r37297WeB2NXv27ByfpLudud3qBsBeBw4cUNGiRdWgQYMclXd1dVVwcLBt665Vq5bKlStnS33ArZaamiqHwyEXl5z/LfLSpUvy8PC4ia0C8r7cfLaAvMKu3wNBQUE3pSzuHLfiuzSr/p2cnCx3d/d/rB23M36z3UF69eqlAQMG6MiRI3I4HAoLC9OyZcvUqFEjBQQEqGDBgmrbtq0OHDhgLXPl5XOHDh1Ss2bNJEkFChSQw+FQr169rLJpaWl6/vnnFRgYqODgYI0aNcqaFxYWpkWLFmnOnDnWcpldfh4XFyeHw6E1a9Zkug3pf7lavny5KlWqJF9fX0VEROjkyZNO5d5//31VqlRJXl5eqlixot59911r3qVLlxQVFaWiRYvKy8tLoaGhGjdunKTLl9CPGjVKJUuWlKenp4oVK6aBAwfmco8jJ7Lrg+l9ZPHixWrWrJny5cunatWqacOGDdbyhw8fVrt27VSgQAH5+Pjonnvu0TfffCNJql27tiZMmGCV7dChg9zd3ZWQkCBJOnbsmBwOh/bv3y9JSkpK0rBhw1S8eHH5+Piobt26Tn0xvf999dVXqly5sjw9PXXkyJFsty/9Uu9XX31VxYoVU4UKFSRJR48eVefOnRUQEKDAwEC1b99ehw4dyrKetLQ0jRs3TqVKlZK3t7eqVaumhQsXWvNKlCihqVOnOi3zyy+/yMXFRYcPH5Ykvfnmm6pSpYp8fHwUEhKip59+2toXV27ftT5fH3zwge655x55enqqaNGiioqKkiQ9/vjjatu2rVPZ5ORkFS5cWDNnzsx2PyFzaWlpev3111W2bFl5enqqZMmSevXVVyVJu3btUvPmzeXt7a2CBQuqf//+Tu9net+bMGGCihYtqoIFC+qZZ55RcnKypMu39hw+fFiDBw+Ww+GQw+Gwlv3pp5903333ydvbWyEhIRo4cKAuXLggSZozZ458fX21b98+q/zTTz+tihUrKjExMdt6s5LVZ+tan0lJWrdunZo2bap8+fKpQIECCg8P119//SUp++8X3Fx3W9/94osvVK5cOXl5eSk8PFxHjx61yowaNUrVq1fX+++/r1KlSsnLy0vS5WOevn37KigoSP7+/mrevLl27NjhVP/XX3+tf/3rX/Ly8lKhQoXUsWNHa96VZ9uvdfxy9Zn5I0eOqH379vL19ZW/v786d+6sU6dOZWjzRx99pLCwMOXPn19du3bV+fPnr7lP7jR3+nGKlPXvdSnnfeXq/u1wODR16lQ9+OCD8vHxsT7/X375pWrWrCkvLy+VLl1ao0ePVkpKilVfXFycnnjiCRUpUkReXl669957tWTJEq1Zs0a9e/fWuXPnrM9netYICwvT2LFj9fjjj8vPz08lS5bUjBkznLbxWsdda9asUZ06daxbWRs2bGgdP+3YsUPNmjWTn5+f/P39VatWLW3ZsuWa+zVLBneMuLg4M2bMGFOiRAlz8uRJExMTYxYuXGgWLVpk9u3bZ3755RfTrl07U6VKFZOammqMMebgwYNGkvnll19MSkqKWbRokZFk9u7da06ePGni4uKMMcY0adLE+Pv7m1GjRpk//vjDfPjhh8bhcJgVK1YYY4yJiYkxERERpnPnztZyV9ad7q+//jKSzOrVq40xxqxevdpIMn/99ZcxxphZs2YZd3d307JlS7N582azdetWU6lSJfPoo49adcydO9cULVrULFq0yPz5559m0aJFJjAw0MyePdsYY8wbb7xhQkJCzNq1a82hQ4fMjz/+aObPn2+MMeazzz4z/v7+5ptvvjGHDx82GzduNDNmzLiZb8tdL7s+mN5HKlasaJYsWWL27t1rHnnkERMaGmqSk5ONMca0adPGtGrVyuzcudMcOHDAfP311+aHH34wxhgzZMgQ06ZNG2OMMWlpaSYwMNAUKlTIfPvtt8aYy32lePHiVlv69u1rGjRoYNauXWv2799v3njjDePp6Wn++OMPY8z/9b8GDRqYdevWmd9//91cuHAh2+3r2bOn8fX1NY899pj59ddfza+//mouXbpkKlWqZB5//HGzc+dOs3v3bvPoo4+aChUqmKSkJGu59u3bW/W88sorpmLFimbZsmXmwIEDZtasWcbT09OsWbPGGGPMsGHDTKNGjZzWPXToUKdpb731llm1apU5ePCg+f77702FChXMU089Zc3Pyefr3XffNV5eXmbSpElm7969ZtOmTeatt94yxhizbt064+rqak6cOGGVX7x4sfHx8THnz5/Pdj8hc88//7wpUKCAmT17ttm/f7/58ccfzXvvvWcSEhJM0aJFzUMPPWR27dplvv/+e1OqVCnTs2dPa9mePXsaf39/8+STT5o9e/aYr7/+2uTLl8/6Tjt79qwpUaKEGTNmjDl58qQ5efKkMcaY/fv3Gx8fH/PWW2+ZP/74w6xbt87UqFHD9OrVy6q7U6dO5l//+pdJTk42S5YsMe7u7mbLli3Z1pudrD5b1/pM/vLLL8bT09M89dRTZvv27ebXX381kydPNqdPnzbGZP/9YozJ8Hvo6t85yL27re/Wrl3brF+/3mzZssXUqVPHNGjQwCozcuRI4+PjYyIiIsy2bdvMjh07jDHGtGzZ0rRr185s3rzZ/PHHH2bo0KGmYMGC5uzZs8YYY5YsWWJcXV3NSy+9ZHbv3m22b99uxo4da9UbGhpqff9e6/jlyrKpqammevXqplGjRmbLli3m559/NrVq1TJNmjRxarOvr6/1Pq1du9YEBweb//znP9fcJ3eaO/04Jbvf6zntK5n1b0mmcOHC5oMPPjAHDhwwhw8fNmvXrjX+/v5m9uzZ5sCBA2bFihUmLCzMjBo1ylpfvXr1zD333GNWrFhh7a9vvvnGJCUlmUmTJhl/f3/r85l+bBEaGmoCAwPNlClTzL59+8y4ceOMi4uL+f33340x5prHXcnJySZ//vxm2LBhZv/+/Wb37t1m9uzZ5vDhw8YYY+655x7TvXt3s2fPHvPHH3+YTz/91Gzfvj13HcoYQ8C+w7z11lsmNDQ0y/mnT582ksyuXbuMMTk/+GjSpEmGg/t//etf5t///rf1un379k6/QHMbsCWZ/fv3W8tMmTLFFClSxHpdpkwZKzCne/nll039+vWNMcYMGDDANG/e3KSlpWXY/okTJ5ry5cubS5cuZbmPcHNd2QfT+8j7779vzf/tt9+MJLNnzx5jjDFVqlSxvpiv9tVXX5n8+fOblJQUs337dhMcHGyeffZZq1/27dvXCo+HDx82rq6u5vjx4051tGjRwowYMcIY83/973q+VHv27GmKFCliBWdjjPnoo49MhQoVnPpgUlKS8fb2NsuXL7eWSw/YFy9eNPny5TPr1693qrtPnz6mW7duxpjLQcPhcFi/DFJTU03x4sXN1KlTs2zbZ599ZgoWLGi9zsnnq1ixYua///1vlnVWrlzZjB8/3nrdrl07p4Nb5Fx8fLzx9PQ07733XoZ5M2bMMAUKFDAJCQnWtKVLlxoXFxcTHR1tjLnch0JDQ01KSopVplOnTqZLly7W6ysPutP16dPH9O/f32najz/+aFxcXMzff/9tjDEmNjbWlChRwjz11FOmSJEi5tVXX3Uqn1m92cnss5WTz2S3bt1Mw4YNc7ye3P6Ow/W5G/vuzz//bE3bs2ePkWQ2btxojLkcQNzd3U1MTIxTu/z9/c3Fixed6itTpoyZPn26McaY+vXrm8jIyCzXfWVbr3X8cmXZFStWGFdXV3PkyBFrfvrv1k2bNlltzpcvn4mPj7fKPPfcc6Zu3brX2iV3vDvtOCW73+s57StX929jLgfsQYMGZWjrlX8kMubyMVHRokWNMcYsX77cuLi4mL1792banlmzZpn8+fNnmB4aGmq6d+9uvU5LSzOFCxe2joGuddx19uxZI8k6aXE1Pz8/60SdHbhE/A63b98+devWTaVLl5a/v7/CwsIkKUeXk1ytatWqTq+LFi2qmJgYO5rpJF++fCpTpkym67lw4YIOHDigPn36yNfX1/p55ZVXrMt5evXqpe3bt6tChQoaOHCgVqxYYdXVqVMn/f333ypdurT69eunzz//3OmyFdgvJ33wyr5VtGhRSbLe84EDB+qVV15Rw4YNNXLkSO3cudMqe9999+n8+fP65Zdf9MMPP6hJkyZq2rSpdTnVDz/8oKZNm0q6fMliamqqypcv79R3fvjhB6dLSj08PDL09WupUqWK0/1IO3bs0P79++Xn52etJzAwUBcvXsz08tX9+/crMTFRrVq1cmrbnDlzrPLVq1dXpUqVNH/+fGvbYmJi1KlTJ6ue7777Ti1atFDx4sXl5+enxx57TGfPnlViYqJVJrvPV0xMjE6cOKEWLVpkua19+/bVrFmzJEmnTp3St99+q8cff/y69hcu27Nnj5KSkjLd33v27FG1atXk4+NjTWvYsKHS0tK0d+9ea9o999wjV1dX63VOvpd37Nih2bNnO/W18PBwpaWl6eDBg5Iu3yY0c+ZMTZ06VWXKlNHw4cNvdHMzfLZy8pncvn17tv3Rzt9xyLm7re+6ubnpX//6l/W6YsWKCggI0J49e6xpoaGhTvdB79ixQwkJCSpYsKBTew8ePJjj/n2l6zl+2bNnj0JCQhQSEmJNq1y5coY2h4WFyc/Pz3p9s47rbnd38nHKtX6v57SvXN2/09WuXdvp9Y4dOzRmzBin9vfr108nT55UYmKitm/frhIlSqh8+fI5av+Vrtxmh8Oh4OBg6z241nFXYGCgevXqpfDwcLVr105vv/220+1xQ4YMUd++fdWyZUu99tprN3yrEYOc3eHatWun0NBQvffeeypWrJjS0tJ077336tKlS9dd19UDFzgcDqWlpWVZPn3ABWOMNS39/qrrXU96Hen3rLz33nuqW7euU7n0X9Q1a9bUwYMH9e233+q7775T586d1bJlSy1cuFAhISHau3evvvvuO61cuVJPP/203njjDf3www8MzHCT5KQPXrnv0++JS+9bffv2VXh4uJYuXaoVK1Zo3LhxmjhxogYMGKCAgABVq1ZNa9as0YYNG9SqVSs1btxYXbp00R9//KF9+/apSZMmki73HVdXV23dutXpoE6SfH19rf97e3vn6L68K115IJm+rlq1amnevHkZymb2Cyq9Xy9dulTFixd3mufp6Wn9PzIyUvPnz9fw4cM1f/58RUREqGDBgpIu3yfWtm1bPfXUU3r11VcVGBion376SX369NGlS5eUL18+Sdl/vry9va+5rT169NDw4cO1YcMGrV+/XqVKldJ99913zeWQUU7297Vc7/eydLm/PfHEE5mOP1GyZEnr/2vXrpWrq6tOnjypCxcuOB2I58bVn62cfCavtY/s/B2HnLvb+m5OZPZ7oGjRopmOOZM+SvL17MebcfySm/fgTnQnH6fY8VmVMvbvrKYnJCRo9OjReuihhzKU9fLyuqH2ZNdfc3LcNWvWLA0cOFDLli3TJ598ohdeeEErV65UvXr1NGrUKD366KNaunSpvv32W40cOVILFixwGhPhenAG+w529uxZ7d27Vy+88IJatGihSpUqWQPDZCX9LFxqauoNrz+9Q1/5F6JrPW/7WooUKaJixYrpzz//VNmyZZ1+SpUqZZXz9/dXly5d9N577+mTTz7RokWLFBsbK+nyl027du30zjvvWF94u3btuqF2IXO56YOZCQkJ0ZNPPqnFixdr6NCheu+996x5TZo00erVq7V27Vo1bdpUgYGBqlSpkl599VUVLVrU+itpjRo1lJqaqpiYmAx9x66R9NPVrFlT+/btU+HChTOsK3/+/BnKXzlQydXlr/yr8qOPPqpff/1VW7du1cKFCxUZGWnN27p1q9LS0jRx4kTVq1dP5cuX14kTJ66r3X5+fgoLC9P333+fZZmCBQuqQ4cOmjVrlmbPnq3evXtf1zrwf8qVKydvb+9M93elSpW0Y8cOa/Am6fJgXy4uLtZAejnh4eGR4fu8Zs2a2r17d4a+VrZsWet3wPr16zV+/Hh9/fXX8vX1dRoQJ6t6r1dOPpNVq1bNsj/a9f2C63e39d2UlBSnAY/27t2ruLg4VapUKctlatasqejoaLm5uWVoa6FChSRl378zk9Pjl0qVKuno0aNOA7Ht3r1bcXFxqly5co7Xdze4049TrvV73e6+UrNmTe3duzfTz6iLi4uqVq2qY8eOZfk439z+bsnpcVeNGjU0YsQIrV+/Xvfee691VaAklS9fXoMHD9aKFSv00EMPWVfr5QYB+w5WoEABFSxYUDNmzND+/fu1atUqDRkyJNtlQkND5XA4tGTJEp0+fdpp1M/r5e3trXr16um1117Tnj179MMPP+iFF17IdX3pRo8erXHjxumdd97RH3/8oV27dmnWrFl68803JV0eSfnjjz/W77//rj/++EOfffaZgoODFRAQoNmzZ2vmzJn69ddf9eeff2ru3Lny9vZWaGjoDbcLGeWmD15t0KBBWr58uQ4ePKht27Zp9erVTgc1TZs21fLly+Xm5qaKFSta0+bNm2f9VVi6/MUZGRmpHj16aPHixTp48KA2bdqkcePGaenSpfZs8P8XGRmpQoUKqX379vrxxx918OBBrVmzRgMHDtSxY8cylPfz89OwYcM0ePBgffjhhzpw4IC2bdumyZMn68MPP7TKhYWFqUGDBurTp49SU1P14IMPWvPKli2r5ORkTZ48WX/++ac++ugjTZs27brbPmrUKE2cOFHvvPOO9u3bZ7XjSn379tWHH36oPXv2qGfPnte9Dlzm5eWlf//733r++eet2wF+/vlnzZw5U5GRkfLy8lLPnj3166+/avXq1RowYIAee+wxFSlSJMfrCAsL09q1a3X8+HGdOXNGkvTvf/9b69evV1RUlLZv3659+/bpyy+/tILI+fPn9dhjj2ngwIF64IEHNG/ePH3yySfWqPZZ1Xu9cvKZHDFihDZv3qynn35aO3fu1O+//66pU6fqzJkztny/IHfutr7r7u6uAQMGaOPGjdq6dat69eqlevXqqU6dOlku07JlS9WvX18dOnTQihUrdOjQIa1fv17//e9/rbA+cuRIffzxxxo5cqT27NmjXbt2afz48ZnWdz3HLy1btlSVKlUUGRmpbdu2adOmTerRo4eaNGmS4ZLeu93dcJyS3e91u/vKSy+9pDlz5mj06NH67bfftGfPHi1YsMA6/m/SpIkaN26shx9+WCtXrrSuOF22bJmky5/PhIQEff/99zpz5ozTLW7ZudZx18GDBzVixAht2LBBhw8f1ooVK7Rv3z5VqlRJf//9t6KiorRmzRodPnxY69at0+bNm7P9A9o12XY3N24LVw9ytnLlSlOpUiXj6elpqlatatasWWMkmc8//9wYk/lAZGPGjDHBwcHG4XBYg5Y1adLEPPvss07runpQs6tfG2PM7t27Tf369Y23t7epXr26WbFixTUHObt6cIPPP//cXN1V582bZ6pXr248PDxMgQIFTOPGjc3ixYuNMZcHWKlevbrx8fEx/v7+pkWLFmbbtm1WXXXr1jX+/v7Gx8fH1KtXz3z33Xc52rfInez6YE4GwouKijJlypQxnp6eJigoyDz22GPmzJkzVvmzZ88ah8PhNDhOep+ZNm2aU1suXbpkXnrpJRMWFmbc3d1N0aJFTceOHc3OnTuNMVkPrpGdq0cDT3fy5EnTo0cPU6hQIePp6WlKly5t+vXrZ86dO5fpcmlpaWbSpEmmQoUKxt3d3QQFBZnw8HBrJNJ07777rpFkevTokWGdb775pilatKjx9vY24eHhZs6cObn6fE2bNs1qR9GiRc2AAQOc5qelpZnQ0FDTunXrHO4lZCU1NdW88sorJjQ01Li7u5uSJUtaA8Ts3LnTNGvWzHh5eZnAwEDTr18/p9HaM+t7zz77rNPorxs2bDBVq1Y1np6eTu/zpk2bTKtWrYyvr6/x8fExVatWtQaD6t27t6lSpYrT4EwTJ040gYGB5tixY9nWm5WsPlvX+kwaY8yaNWtMgwYNjKenpwkICDDh4eFWn77e33EMcmafu63vLlq0yJQuXdp4enqali1bWgNOGnN5EKhq1aplWDY+Pt4MGDDAFCtWzLi7u5uQkBATGRnpNKDUokWLrOOZQoUKmYceesiad+XAZdc6frl68LbDhw+bBx980Pj4+Bg/Pz/TqVMna5C5rNp8rYFy71R3+nGKMdn/Xs9NXzHGOH3XXmnZsmWmQYMGxtvb2/j7+5s6deo4jXh/9uxZ07t3b1OwYEHj5eVl7r33XrNkyRJr/pNPPmkKFixoJJmRI0caYzIfnLBatWrWfGOyP+6Kjo42HTp0MEWLFjUeHh4mNDTUvPTSSyY1NdUkJSWZrl27mpCQEOPh4WGKFStmoqKirIETc8Px/3cQAAA5kpCQoOLFi2vWrFmZ3mcFAHeK2bNna9CgQYqLi7vVTQGQRzDIGQAgR9LS0nTmzBlNnDhRAQEBTpeoAwAAgHuwASBbVz5q4uqfH3/88VY37x915MgRFSlSRPPnz9cHH3wgNzf+RovLHnjggSw/J2PHjr3VzQOyRN9FXsdxyu2HS8QBIBv79+/Pcl7x4sVtewQGkJcdP35cf//9d6bzAgMDFRgY+A+3CMgZ+i7yOo5Tbj8EbAAAAAAAbMAl4gAAAAAA2ICADQAAAACADQjYAAAAAADYgIANAAAAAIANCNgAAMBWDodDX3zxxa1uBgAA/zgCNgAAd6BevXrJ4XDoySefzDDvmWeekcPhUK9evXJU15o1a+RwOBQXF5ej8idPntQDDzxwHa0FAODOQMAGAOAOFRISogULFjg95/fixYuaP3++SpYsafv6Ll26JEkKDg6Wp6en7fUDAHC7I2ADAHCHqlmzpkJCQrR48WJr2uLFi1WyZEnVqFHDmpaWlqZx48apVKlS8vb2VrVq1bRw4UJJ0qFDh9SsWTNJUoECBZzOfDdt2lRRUVEaNGiQChUqpPDwcEkZLxE/duyYunXrpsDAQPn4+Kh27drauHGjJGnHjh1q1qyZ/Pz85O/vr1q1amnLli03c7cAAHDTuN3qBgAAgJvn8ccf16xZsxQZGSlJ+uCDD9S7d2+tWbPGKjNu3DjNnTtX06ZNU7ly5bR27Vp1795dQUFBatSokRYtWqSHH35Ye/fulb+/v7y9va1lP/zwQz311FNat25dputPSEhQkyZNVLx4cX311VcKDg7Wtm3blJaWJkmKjIxUjRo1NHXqVLm6umr79u1yd3e/eTsEAICbiIANAMAdrHv37hoxYoQOHz4sSVq3bp0WLFhgBeykpCSNHTtW3333nerXry9JKl26tH766SdNnz5dTZo0UWBgoCSpcOHCCggIcKq/XLlyev3117Nc//z583X69Glt3rzZqqds2bLW/CNHjui5555TxYoVrfoAAMirCNgAANzBgoKC1KZNG82ePVvGGLVp00aFChWy5u/fv1+JiYlq1aqV03KXLl1yuow8K7Vq1cp2/vbt21WjRg0rXF9tyJAh6tu3rz766CO1bNlSnTp1UpkyZXKwZQAA3H4I2AAA3OEef/xxRUVFSZKmTJniNC8hIUGStHTpUhUvXtxpXk4GKvPx8cl2/pWXk2dm1KhRevTRR7V06VJ9++23GjlypBYsWKCOHTtec90AANxuGOQMAIA7XEREhC5duqTk5GRrILJ0lStXlqenp44cOaKyZcs6/YSEhEiSPDw8JEmpqanXve6qVatq+/btio2NzbJM+fLlNXjwYK1YsUIPPfSQZs2add3rAQDgdkDABgDgDufq6qo9e/Zo9+7dcnV1dZrn5+enYcOGafDgwfrwww914MABbdu2TZMnT9aHH34oSQoNDZXD4dCSJUt0+vRp66x3TnTr1k3BwcHq0KGD1q1bpz///FOLFi3Shg0b9PfffysqKkpr1qzR4cOHtW7dOm3evFmVKlWydfsBAPinELABALgL+Pv7y9/fP9N5L7/8sl588UWNGzdOlSpVUkREhJYuXapSpUpJkooXL67Ro0dr+PDhKlKkiHW5eU54eHhoxYoVKly4sFq3bq0qVarotddek6urq1xdXXX27Fn16NFD5cuXV+fOnfXAAw9o9OjRtmwzAAD/NIcxxtzqRgAAAAAAkNdxBhsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALABARsAAAAAABsQsAEAAAAAsAEBGwAAAAAAGxCwAQAAAACwAQEbAAAAAAAbELABAAAAALDB/wNqVM4NCIQvOgAAAABJRU5ErkJggg==\n"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["
"],"image/png":"iVBORw0KGgoAAAANSUhEUgAAA+EAAAH4CAYAAAAl/+ycAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1QU19sH8O/SO6gUsYEUG/aCXezYKypGfwr2XhI1amIs0VhijEZjSyxRMaKosRfsvZeosaLYGyqIoiDwvH94dl6GXWCxrCnfzzkc3bt37tyZnbkzz8ydOxoRERARERERERHRR2fyqStARERERERE9F/BIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIj+lTQaDcaMGfOpq/Heli5diiJFisDc3BxOTk6fujrv5erVq6hfvz4cHR2h0Wjwxx9/fOoq/SvVrFkTNWvW/NTVIABjxoyBRqP51NV4L3v27IFGo8GePXuUtJCQEHh6en6yOhH90zEIJyL6l4qKikLPnj3h5eUFKysrODg4oGrVqpgxYwZevXr1qatHBrh06RJCQkLg7e2NX375BfPnz88wr/ZkX/tnbm4OT09PDBgwALGxsRlON2zYMGg0GrRr1y7Tuty4cQP9+vVDoUKFYGNjAxsbGxQrVgx9+/bFn3/+adDydO7cGefOncOECROwdOlSlC9f3qDpSNdff/2FMWPGIDo6+lNX5b14enqqtltbW1v4+/tjyZIln7pqlA0JCQkYM2aMKlDX2rx587/igijRh2T2qStAREQf3qZNm9CmTRtYWlqiU6dOKF68OJKSknDgwAEMHToUFy5cyDSg+zd49eoVzMz+2Ye5PXv2IDU1FTNmzICPj49B08yZMwd2dnZ4+fIldu7ciZkzZ+LUqVM4cOCATl4Rwe+//w5PT09s2LAB8fHxsLe318m3ceNGtGvXDmZmZujQoQNKlSoFExMTXLp0CWvWrMGcOXNw48YNeHh4ZFivV69e4fDhw/jqq6/Qr18/w1cC6fXXX39h7NixqFmzps4dye3bt3+aSr2j0qVL44svvgAA3L9/H7/++is6d+6MxMREdO/e/RPX7v18/fXXGD58+Keuxgf3yy+/IDU1VfmckJCAsWPHAoBOL4zNmzfj559/ZiBOlMY/++yEiIh03LhxA8HBwfDw8MCuXbvg7u6ufNe3b19cu3YNmzZt+oQ1/HhSU1ORlJQEKysrWFlZferqvLdHjx4BQLa6oQcFBcHZ2RkA0LNnTwQHByM8PBzHjh2Dv7+/Ku+ePXtw584d7Nq1C4GBgVizZg06d+6syhMVFaVsTzt37lRtTwAwefJkzJ49GyYmmXeue/z4cbaXJSsvX76Era3tByvv38LCwuJTVyFb8ubNi44dOyqfQ0JC4OXlhR9//PFvGYS/fv0aFhYWWW7zAGBmZvaPvxioj7m5+Sedv4jg9evXsLa2/qT1IHpX7I5ORPQvM2XKFLx48QILFizQCZgAwMfHBwMHDlQ+Jycn49tvv4W3tzcsLS3h6emJkSNHIjExUTWdp6cnmjRpgj179qB8+fKwtrZGiRIllO6Ha9asQYkSJWBlZYVy5crh9OnTqulDQkJgZ2eH69evIzAwELa2tsiTJw/GjRsHEVHlnTp1KqpUqYJcuXLB2toa5cqVQ0REhM6yaDQa9OvXD2FhYfDz84OlpSW2bt2qfJf2zkt8fDwGDRoET09PWFpawtXVFfXq1cOpU6dUZa5atQrlypWDtbU1nJ2d0bFjR9y9e1fvsty9exctWrSAnZ0dXFxcMGTIEKSkpGTwy6jNnj1bqXOePHnQt29fVbdxT09PjB49GgDg4uLyzs+4V69eHcDbYDq9sLAwFCtWDLVq1ULdunURFhamk2fKlCl4+fIlFi1apHd7MjMzw4ABA5A/f/4M6zBmzBjlLvnQoUOh0WhUd29Pnz6Nhg0bwsHBAXZ2dqhTpw6OHDmiKmPx4sXQaDTYu3cv+vTpA1dXV+TLly/TZZ85cyb8/PxgY2ODHDlyoHz58li+fLkqz927d9GlSxe4ubnB0tISfn5+WLhwoSqP9pnYlStXYuzYscibNy/s7e0RFBSEuLg4JCYmYtCgQXB1dYWdnR1CQ0N19p9Fixahdu3acHV1haWlJYoVK4Y5c+bo1Fm7nx04cAD+/v6wsrKCl5eXqnv24sWL0aZNGwBArVq1lK7c2n1R3zPhr1+/xpgxY1CoUCFYWVnB3d0drVq10rtdaDVp0gReXl56v6tcubLqcYLIyEhUq1YNTk5OsLOzQ+HChTFy5MgMy86Mi4sLihQpolO31NRUTJ8+HX5+frCysoKbmxt69uyJZ8+e6ZSxZcsWBAQEwN7eHg4ODqhQoYLqt/f09ERISIjOdOnXnfa3X7FiBb7++mvkzZsXNjY2eP78Od68eYOxY8fC19cXVlZWyJUrF6pVq4bIyEhl+vTPhBcvXhy1atXSmW9qairy5s2LoKCgd1re9B48eIDQ0FDky5cPlpaWcHd3R/PmzVWPL2i3te3bt6N06dKwsrJCsWLFsGbNmizLT/tMeHR0NFxcXAAAY8eOVbbHMWPGICQkBD///DMAqB47yO4yauu6bds25fgzb968LOtJ9Hf177s0R0T0H7dhwwZ4eXmhSpUqBuXv1q0bfvvtNwQFBeGLL77A0aNHMXHiRFy8eBFr165V5b127Ro+++wz9OzZEx07dsTUqVPRtGlTzJ07FyNHjkSfPn0AABMnTkTbtm1x+fJl1d2ilJQUNGjQAJUqVcKUKVOwdetWjB49GsnJyRg3bpySb8aMGWjWrBk6dOiApKQkrFixAm3atMHGjRvRuHFjVZ127dqFlStXol+/fnB2ds5wsKBevXohIiIC/fr1Q7FixfDkyRMcOHAAFy9eRNmyZQG8DW5CQ0NRoUIFTJw4EQ8fPsSMGTNw8OBBnD59WnUXNyUlBYGBgahYsSKmTp2KHTt24IcffoC3tzd69+6d6TofM2YMxo4di7p166J37964fPky5syZg+PHj+PgwYMwNzfH9OnTsWTJEqxdu1bpYl6yZMksf8/0tCfdOXLkUKUnJiZi9erVSjfg9u3bIzQ0FA8ePEDu3LmVfBs3boSPjw8qVqyY7XlrtWrVCk5OThg8eDDat2+PRo0awc7ODgBw4cIFVK9eHQ4ODhg2bBjMzc0xb9481KxZE3v37tWZb58+feDi4oJvvvkGL1++zHCev/zyCwYMGICgoCAMHDgQr1+/xp9//omjR4/is88+AwA8fPgQlSpVUi7muLi4YMuWLejatSueP3+OQYMGqcqcOHEirK2tMXz4cFy7dg0zZ86Eubk5TExM8OzZM4wZMwZHjhzB4sWLUbBgQXzzzTfKtHPmzIGfnx+aNWsGMzMzbNiwAX369EFqair69u2rms+1a9cQFBSErl27onPnzli4cCFCQkJQrlw5+Pn5oUaNGhgwYAB++uknjBw5EkWLFgUA5d/0UlJS0KRJE+zcuRPBwcEYOHAg4uPjERkZifPnz8Pb21vvdO3atUOnTp1w/PhxVKhQQUm/efMmjhw5gu+//175DZs0aYKSJUti3LhxsLS0xLVr13Dw4MEMf5/MJCcn486dOzrbbM+ePZV9dMCAAbhx4wZmzZqF06dPK/sN8HY/7tKlC/z8/DBixAg4OTnh9OnT2Lp1q/LbZ9e3334LCwsLDBkyBImJibCwsMCYMWMwceJEdOvWDf7+/nj+/DlOnDiBU6dOoV69enrLadeuHcaMGaOznx04cAD37t1DcHBwtpdXn9atW+PChQvo378/PD098ejRI0RGRuLWrVuqNvLq1ato164devXqhc6dO2PRokVo06YNtm7dmuEypOfi4oI5c+agd+/eaNmyJVq1agUAKFmyJF6+fIl79+4hMjISS5cu1Zk2O8t4+fJltG/fHj179kT37t1RuHBhg+pH9LckRET0rxEXFycApHnz5gblP3PmjACQbt26qdKHDBkiAGTXrl1KmoeHhwCQQ4cOKWnbtm0TAGJtbS03b95U0ufNmycAZPfu3Upa586dBYD0799fSUtNTZXGjRuLhYWFPH78WElPSEhQ1ScpKUmKFy8utWvXVqUDEBMTE7lw4YLOsgGQ0aNHK58dHR2lb9++Ga6LpKQkcXV1leLFi8urV6+U9I0bNwoA+eabb3SWZdy4caoyypQpI+XKlctwHiIijx49EgsLC6lfv76kpKQo6bNmzRIAsnDhQiVt9OjRAkC1bjKizXv58mV5/PixREdHy8KFC8Xa2lpcXFzk5cuXqvwRERECQK5evSoiIs+fPxcrKyv58ccflTza7alFixY683v27Jk8fvxY+Uv/m6V348YNASDff/+9Kr1FixZiYWEhUVFRStq9e/fE3t5eatSooaQtWrRIAEi1atUkOTk5y/XRvHlz8fPzyzRP165dxd3dXWJiYlTpwcHB4ujoqCzT7t27BYAUL15ckpKSlHzt27cXjUYjDRs2VE1fuXJl8fDwUKXpWz+BgYHi5eWlStPuZ/v27VPSHj16JJaWlvLFF18oaatWrdLZx7QCAgIkICBA+bxw4UIBINOmTdPJm5qaqpOmFRcXpzNfEZEpU6aIRqNR9vkff/zR4O00PQ8PD6lfv76yHZ07d07+97//CQDV/rp//34BIGFhYarpt27dqkqPjY0Ve3t7qVixomo/Tr+sHh4e0rlzZ536pF932t/ey8tL5zcsVaqUNG7cONPl0+6XWpcvXxYAMnPmTFW+Pn36iJ2dnTIPQ5dXn2fPnund19LTbmurV69W0uLi4sTd3V3KlCmjpGnXQfr2PO02/vjxY502V6tv376qdaCVnWXU1nXr1q2ZLhPRPwW7oxMR/Ys8f/4cAPQOrqXP5s2bAQCff/65Kl17dzT9s+PFihVD5cqVlc/au5S1a9dGgQIFdNKvX7+uM8+0g3Jp70AmJSVhx44dSnra5/yePXuGuLg4VK9eXafrOAAEBASgWLFiWSzp22eRjx49inv37un9/sSJE3j06BH69Omjep68cePGKFKkiN7n6Hv16qX6XL16db3LnNaOHTuQlJSEQYMGqXoJdO/eHQ4ODu/9vH7hwoXh4uICT09PdOnSBT4+PtiyZQtsbGxU+cLCwlC+fHllwDd7e3s0btxY1SVduz1p71qnVbNmTbi4uCh/2i6n2ZGSkoLt27ejRYsWqm7P7u7u+Oyzz3DgwAGlDlrdu3eHqalplmU7OTnhzp07OH78uN7vRQSrV69G06ZNISKIiYlR/gIDAxEXF6ezvXXq1El1Z65ixYoQEXTp0kWVr2LFirh9+zaSk5OVtLTbdFxcHGJiYhAQEIDr168jLi5ONX2xYsWUxwiAt3caCxcunOW2lZHVq1fD2dkZ/fv31/kus9dnOTg4oGHDhli5cqXqkZHw8HBUqlRJ2ee1PUTWrVunGqzLUNu3b1e2oxIlSmDp0qUIDQ1V7rQDbx8TcXR0RL169VS/Vbly5WBnZ4fdu3cDeNstPj4+HsOHD9cZF+J9XhXWuXNnneePnZyccOHCBVy9etXgcgoVKoTSpUsjPDxcSUtJSUFERASaNm2qzMPQ5dXH2toaFhYW2LNnT5Zd1/PkyYOWLVsqnx0cHNCpUyecPn0aDx48MHi53kV2l7FgwYIIDAz8qHUiMhYG4URE/yIODg4A3j7/bIibN2/CxMREZ+Tt3Llzw8nJCTdv3lSlpw20AcDR0REAdJ4H1qanPwE0MTHReca0UKFCAKB6VnHjxo2oVKkSrKyskDNnTqW7Y/pgBXh7YmaIKVOm4Pz588ifPz/8/f0xZswYVVCjXVZ9XRyLFCmisy6srKyU5yC1cuTIkeVJb0bzsbCwgJeXl858smv16tWIjIzE8uXLUalSJTx69EgneIiNjcXmzZsREBCAa9euKX9Vq1bFiRMncOXKFQD/fzHnxYsXOvOZN28eIiMjsWzZsneu6+PHj5GQkKB3nRctWhSpqam4ffu2Kt3Q3/vLL7+EnZ0d/P394evri759+6q6Rz9+/BixsbGYP3++6mKCi4sLQkNDAfz/wHha2dn+U1NTVdvrwYMHUbduXdja2sLJyQkuLi7KM9Ppt+v08wEM27YyEhUVhcKFC7/TAGHt2rXD7du3cfjwYaWskydPql5p165dO1StWhXdunWDm5sbgoODsXLlSoMD8ooVKyIyMhJbt27F1KlT4eTkhGfPnqkGmLt69Sri4uLg6uqq83u9ePFC+a20z5EXL14828uaGX3b3bhx4xAbG4tChQqhRIkSGDp0qEGv62vXrh0OHjyojDWxZ88ePHr0SLVODV1efSwtLTF58mRs2bIFbm5uqFGjBqZMmaI3qPbx8dG5OKGvTf4YsruMhu77RP8EfCaciOhfxMHBAXny5MH58+ezNZ2hd4gyugOZUbqkG3DNEPv370ezZs1Qo0YNzJ49G+7u7jA3N8eiRYt0BtUCYPDouG3btkX16tWxdu1abN++Hd9//z0mT56MNWvWoGHDhtmupyF3Yz+FGjVqKKOjN23aFCVKlECHDh1w8uRJ5c77qlWrkJiYiB9++AE//PCDThlhYWEYO3YsHB0d4e7urnd70vZ2MPZ7qg39vYsWLYrLly9j48aN2Lp1K1avXo3Zs2fjm2++wdixY5UAsWPHjjojwmulfwb/Xbf/qKgo1KlTB0WKFMG0adOQP39+WFhYYPPmzfjxxx91gtUPuT+9r6ZNm8LGxgYrV65ElSpVsHLlSpiYmCgDwwFvf5N9+/Zh9+7d2LRpE7Zu3Yrw8HDUrl0b27dvz3JfcXZ2Rt26dQEAgYGBKFKkCJo0aYIZM2YovXRSU1Ph6uqqd/BAADoXxLKSUZuXkpKit776trsaNWogKioK69atw/bt2/Hrr7/ixx9/xNy5c9GtW7cM592uXTuMGDECq1atwqBBg7By5Uo4OjqiQYMGSp73Xd5BgwahadOm+OOPP7Bt2zaMGjUKEydOxK5du1CmTJlMpzWW7C4jR0KnfxMG4URE/zJNmjTB/PnzcfjwYVXXcX08PDyQmpqKq1evqgZ1evjwIWJjYzN97/O7SE1NxfXr15U7LQCUu67awYJWr14NKysrbNu2DZaWlkq+RYsWvff83d3d0adPH/Tp0wePHj1C2bJlMWHCBDRs2FBZ1suXL6N27dqq6S5fvvzB1kXa+aTtFZCUlIQbN24owciHYGdnh9GjRyM0NBQrV65UBn0KCwtD8eLFldHX05o3bx6WL1+uvPO3cePG+PXXX/W+4ux9ubi4wMbGBpcvX9b57tKlSzAxMcl01PWs2Nraol27dmjXrh2SkpLQqlUrTJgwASNGjICLiwvs7e2RkpLyQde5Phs2bEBiYiLWr1+vusudWZfirGSna7W3tzeOHj2KN2/eZPvVUra2tmjSpAlWrVqFadOmITw8HNWrV0eePHlU+UxMTFCnTh3UqVMH06ZNw3fffYevvvoKu3fvzvb6bdy4MQICAvDdd9+hZ8+esLW1hbe3N3bs2IGqVatmGoxpB5k7f/68Tg+ftHLkyKF6G4HWzZs3MxwRXp+cOXMiNDQUoaGhePHiBWrUqIExY8ZkGoQXLFgQ/v7+CA8PR79+/bBmzRq0aNFC1d4ZuryZ8fb2xhdffIEvvvgCV69eRenSpfHDDz+oeq9cu3YNIqLantK3yYbIbHvM6LsPsYxE/1Tsjk5E9C8zbNgw2Nraolu3bnj48KHO91FRUZgxYwYAoFGjRgCA6dOnq/JMmzYNAHRGIv8QZs2apfxfRDBr1iyYm5ujTp06AN7eBdRoNKpXfUVHR+OPP/5453mmpKTodPl1dXVFnjx5lFdJlS9fHq6urpg7d67q9VJbtmzBxYsXP9i6qFu3LiwsLPDTTz+p7mwuWLAAcXFxH3ydd+jQAfny5cPkyZMBALdv38a+ffvQtm1bBAUF6fyFhobi2rVrOHr0KIC325ONjQ26dOmid3t6n7uzpqamqF+/PtatW6e6o/7w4UMsX74c1apVUx6xyK4nT56oPltYWKBYsWIQEbx58wampqZo3bo1Vq9erfdOv/a95h+C9s5q2nUVFxf3XheWtO9H1xdIpte6dWvExMSo9j0tQ36/du3a4d69e/j1119x9uxZVbdpAHj69KnONKVLlwYAnVe1GerLL7/EkydP8MsvvwB425MlJSUF3377rU7e5ORkZT3Ur18f9vb2mDhxIl6/fq3Kl3ZZvb29ceTIESQlJSlpGzdu1Hn8ITPptzE7Ozv4+PgYtMzt2rXDkSNHsHDhQsTExOisU0OXV5+EhASdZff29oa9vb1O3e7du6d6C8bz58+xZMkSlC5dWjV6e1a0Y07oq1dG2+r7LCPRPx3vhBMR/ct4e3tj+fLlaNeuHYoWLYpOnTqhePHiSEpKwqFDh7Bq1Srl/bilSpVC586dMX/+fMTGxiIgIADHjh3Db7/9hhYtWuh9n+37sLKywtatW9G5c2dUrFgRW7ZswaZNmzBy5Eil62Hjxo0xbdo0NGjQAJ999hkePXqEn3/+GT4+PgY9b6lPfHw88uXLh6CgIJQqVQp2dnbYsWMHjh8/rnTHNjc3x+TJkxEaGoqAgAC0b99eeUWZp6cnBg8e/EHWgYuLC0aMGIGxY8eiQYMGaNasGS5fvozZs2ejQoUK6Nix4weZj5a5uTkGDhyIoUOHYuvWrTh79ixEBM2aNdObv1GjRjAzM0NYWBgqVqwIX19fLF++HO3bt0fhwoXRoUMHlCpVCiKCGzduYPny5TAxMcnynd0ZGT9+vPKO6T59+sDMzAzz5s1DYmIipkyZ8s7LXb9+feTOnRtVq1aFm5sbLl68iFmzZqFx48bKs+6TJk3C7t27UbFiRXTv3h3FihXD06dPcerUKezYsUNvcPmudbGwsEDTpk3Rs2dPvHjxAr/88gtcXV1x//79dyqzdOnSMDU1xeTJkxEXFwdLS0vlPeTpderUCUuWLMHnn3+OY8eOoXr16nj58iV27NiBPn36oHnz5pnOq1GjRrC3t8eQIUOUixdpjRs3Dvv27UPjxo3h4eGBR48eYfbs2ciXLx+qVav2TsvXsGFDFC9eHNOmTUPfvn0REBCAnj17YuLEiThz5gzq168Pc3NzXL16FatWrcKMGTMQFBQEBwcH/Pjjj+jWrRsqVKiAzz77DDly5MDZs2eRkJCA3377DcDbVzNGRESgQYMGaNu2LaKiorBs2bIMX9emT7FixVCzZk2UK1cOOXPmxIkTJ5TXIGalbdu2GDJkCIYMGYKcOXPq9BYwdHn1uXLlCurUqYO2bduiWLFiMDMzw9q1a/Hw4UPVK9CAt89/d+3aFcePH4ebmxsWLlyIhw8fZvsCkbW1NYoVK4bw8HAUKlQIOXPmRPHixVG8eHGUK1cOADBgwAAEBgbC1NQUwcHB77WMRP94Rh+PnYiIjOLKlSvSvXt38fT0FAsLC7G3t5eqVavKzJkz5fXr10q+N2/eyNixY6VgwYJibm4u+fPnlxEjRqjyiLx9RYy+1/Eg3auERPS/jqpz585ia2srUVFRUr9+fbGxsRE3NzcZPXq06lVdIiILFiwQX19fsbS0lCJFisiiRYt0XvWT0bzTfqd9XU5iYqIMHTpUSpUqJfb29mJrayulSpWS2bNn60wXHh4uZcqUEUtLS8mZM6d06NBB7ty5o8qjXZb09NUxI7NmzZIiRYqIubm5uLm5Se/eveXZs2d6y8vOK8r05Y2LixNHR0cJCAiQEiVKSIECBTItq2bNmuLq6ipv3rxR0q5duya9e/cWHx8fsbKyEmtraylSpIj06tVLzpw5k2X9MnpFmYjIqVOnJDAwUOzs7MTGxkZq1aqlehWeyP+/ouz48eNZzkvk7WvyatSoIbly5RJLS0vx9vaWoUOHSlxcnCrfw4cPpW/fvpI/f34xNzeX3LlzS506dWT+/PlKHu0rmlatWmVQnfT9FuvXr5eSJUuKlZWVeHp6yuTJk5VXh924cUPJl9F+lv7VWSIiv/zyi3h5eYmpqanqFVL68iYkJMhXX32l7Oe5c+eWoKAg1avhMtOhQwcBIHXr1tX5bufOndK8eXPJkyePWFhYSJ48eaR9+/Zy5cqVLMvNaHlFRBYvXiwAZNGiRUra/PnzpVy5cmJtbS329vZSokQJGTZsmNy7d0817fr166VKlSpibW0tDg4O4u/vL7///rsqzw8//CB58+YVS0tLqVq1qpw4cSLDV5Sl/+1FRMaPHy/+/v7i5OSk7A8TJkxQvcYuszahatWqel8RmZahy5tWTEyM9O3bV4oUKSK2trbi6OgoFStWlJUrV6ryadf9tm3bpGTJkkp7m35ZDXlFmYjIoUOHpFy5cmJhYaFqf5OTk6V///7i4uIiGo1GZ30YsoyZbSdE/0QakU8wygcREf3nhISEICIiQu9I20REZFyenp4oXrw4Nm7c+KmrQvSfw2fCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISPhNOREREREREZCS8E05ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREbCIJyIiIiIiIjISBiEExERERERERkJg3AiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCciIiIiIiIyEgbhREREREREREZi9qkrQPRPduvWLcTExHzqahDRJ5SYmAhLS8tPXQ0i+sTYFhCRs7MzChQokGU+BuFE7+jWrVsoWrQoEhISPnVViOgTMjU1RUpKyqeuBhF9YmwLiMjGxgYXL17MMhBnEE70jmJiYpCQkIBly5ahaNGin7o6RPQJbN68GaNGjWI7QPQfx7aAiC5evIiOHTsiJiaGQTjRx1a0aFGULVv2U1eDiD6BixcvAmA7QPRfx7aAiLKDA7MRERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiMhEE4ERERERERkZEwCCci+gj27NkDjUaDPXv2fOqqENFHotFoMGbMmE9dDSIi+odhEE5EH825c+cQFBQEDw8PWFlZIW/evKhXrx5mzpz5qav2wcyePRuLFy/+qPPQBvTaP1NTU7i6uiIoKEh5LY4+mzdvhkajQZ48eZCampphvufPn2PChAkoX748HB0dYWlpCQ8PD7Rr1w6bNm36GItE/wGLFy9WbbdmZmbImzcvQkJCcPfu3U9dvX+0tOtVo9HAwcEBAQEBme6vsbGxsLKygkajybTdSE1NxZIlS1CvXj04OzvD3Nwcrq6uqF+/PubPn4/ExMSPsUj0H6NtH06cOPGpq0J6JCQkYMyYMbyR8BHxPeFE9FEcOnQItWrVQoECBdC9e3fkzp0bt2/fxpEjRzBjxgz079//U1fxg5g9ezacnZ0REhKiSq9RowZevXoFCwuLDzavAQMGoEKFCnjz5g3+/PNPzJ07F3v27MH58+eRO3dunfxhYWHw9PREdHQ0du3ahbp16+rkuXbtGgIDA3Hz5k20bNkSnTp1gp2dHW7fvo3NmzejSZMmWLJkCf73v/99sOWg/5Zx48ahYMGCeP36NY4cOYLFixfjwIEDOH/+PKysrD519d7Lq1evYGb2aU6l6tWrh06dOkFEcPPmTcyZMwdNmzbFli1bEBgYqJN/1apV0Gg0yJ07N8LCwjB+/HidPK9evULLli2xbds2VKlSBUOGDIGbmxuePn2KvXv3ok+fPjh69CgWLFhgjEUkok8kISEBY8eOBQDUrFnz01bmX4pBOBF9FBMmTICjoyOOHz8OJycn1XePHj36NJUyIhMTkw8eYFSvXh1BQUHK58KFC6N3795YsmQJhg0bpsr78uVLrFu3DhMnTsSiRYsQFhamE4QnJyejZcuWePjwIfbu3YuqVauqvh89ejS2b9+OlJSUD7oc9N/SsGFDlC9fHgDQrVs3ODs7Y/LkyVi/fj3atm37iWunKyEhATY2Ngbl/ZQXEQoVKoSOHTsqn1u3bo1ixYphxowZeoPwZcuWoVGjRvDw8MDy5cv1BuGDBw/Gtm3bMH36dAwcOFD13RdffIGrV68iMjLywy8M0SckInj9+jWsra0/dVXoP4Td0Ynoo4iKioKfn59OAA4Arq6uOmnLli1DuXLlYG1tjZw5cyI4OBi3b99W5alZsyaKFy+OP//8EwEBAbCxsYGPjw8iIiIAAHv37kXFihVhbW2NwoULY8eOHarpb968iT59+qBw4cKwtrZGrly50KZNG0RHR6vyabvJHTx4EJ9//jlcXFxga2uLli1b4vHjx0o+T09PXLhwAXv37lW6hWqvGGf0TPjRo0fRqFEj5MiRA7a2tihZsiRmzJhh4FpVq169OoC36zq9tWvX4tWrV2jTpg2Cg4OxZs0avH79WpVn1apVOH/+PEaNGqUTgGvVr18fDRs2fKf6EemT0XZ76dIlBAUFIWfOnLCyskL58uWxfv16neljY2MxePBgeHp6wtLSEvny5UOnTp0QExMD4P/33/T7tb59UtumnDx5EjVq1ICNjQ1GjhwJADhx4gQCAwPh7OwMa2trFCxYEF26dFGVmfaZ8IiICGg0Guzdu1enzvPmzYNGo8H58+ezvbyGKlq0KJydnfW2B7du3cL+/fsRHByM4OBg3LhxA4cOHVLluX37Nn799Vc0aNBAJwDX8vX1RZ8+fd65jkQZCQkJgZ2dHW7duoUmTZrAzs4OefPmxc8//wzg7eNttWvXhq2trXIhKS3tfr9v3z707NkTuXLlgoODAzp16oRnz56p8np6eqJJkybYtm0bypcvD2tra8ybNw8AcP36dbRp0wY5c+aEjY0NKlWqpHrM4+HDhzAzM1PuEqd1+fJlaDQazJo1S0mLjY3FoEGDkD9/flhaWsLHxweTJ09WPSIWHR0NjUaDqVOn4ueff4aXlxdsbGxQv3593L59GyKCb7/9Fvny5YO1tTWaN2+Op0+f6sx/y5YtqF69OmxtbWFvb4/GjRvjwoULetfz3bt30aJFC9jZ2cHFxQVDhgxRLrhHR0fDxcUFADB27Fjl/IbjX3xYDMKJ6KPw8PDAyZMnVSedGZkwYQI6deoEX19fTJs2DYMGDcLOnTtRo0YNxMbGqvI+e/YMTZo0QcWKFTFlyhRYWloiODgY4eHhCA4ORqNGjTBp0iS8fPkSQUFBiI+PV6Y9fvw4Dh06hODgYPz000/o1asXdu7ciZo1ayIhIUGnXv3798fZs2cxevRo9O7dGxs2bEC/fv2U76dPn458+fKhSJEiWLp0KZYuXYqvvvoqw+WMjIxEjRo18Ndff2HgwIH44YcfUKtWLWzcuNGANapLG2TkyJFD57uwsDDUqlULuXPnRnBwMOLj47FhwwZVHu3ntHfTiD42fdvthQsXUKlSJVy8eBHDhw/HDz/8AFtbW7Ro0QJr165V8r148QLVq1fHzJkzUb9+fcyYMQO9evXCpUuXcOfOnXeqz5MnT9CwYUOULl0a06dPR61atfDo0SPUr18f0dHRGD58OGbOnIkOHTrgyJEjGZbTuHFj2NnZYeXKlTrfhYeHw8/PD8WLF8/W8mZHXFwcnj17prc9+P3332Fra4smTZrA398f3t7eCAsLU+XZsmULUlJS2B7QJ5OSkoKGDRsif/78mDJlCjw9PdGvXz8sXrwYDRo0QPny5TF58mTY29ujU6dOuHHjhk4Z/fr1w8WLFzFmzBh06tQJYWFhaNGiBUREle/y5cto37496tWrhxkzZqB06dJ4+PAhqlSpgm3btqFPnz6YMGECXr9+jWbNmin7pZubGwICAjLcz01NTdGmTRsAb3vVBAQEYNmyZejUqRN++uknVK1aFSNGjMDnn3+uM31YWBhmz56N/v3744svvsDevXvRtm1bfP3119i6dSu+/PJL9OjRAxs2bMCQIUNU0y5dulRpgyZPnoxRo0bhr7/+QrVq1XQuSKakpCAwMBC5cuXC1KlTERAQgB9++AHz588HALi4uGDOnDkAgJYtWyrnN61atTLwlySDCBG9k5MnTwoAOXny5Keuyt/S9u3bxdTUVExNTaVy5coybNgw2bZtmyQlJanyRUdHi6mpqUyYMEGVfu7cOTEzM1OlBwQECABZvny5knbp0iUBICYmJnLkyBElfdu2bQJAFi1apKQlJCTo1PPw4cMCQJYsWaKkLVq0SABI3bp1JTU1VUkfPHiwmJqaSmxsrJLm5+cnAQEBOuXu3r1bAMju3btFRCQ5OVkKFiwoHh4e8uzZM1XetPPQR1vWwoUL5fHjx3Lv3j3ZunWr+Pj4iEajkWPHjqnyP3z4UMzMzOSXX35R0qpUqSLNmzdX5StTpow4OTnpzO/Fixfy+PFj5S8uLi7T+v2XLVu2jO1ABrT70Y4dO+Tx48dy+/ZtiYiIEBcXF7G0tJTbt28reevUqSMlSpSQ169fK2mpqalSpUoV8fX1VdK++eYbASBr1qzRmZ92P9LO98aNG6rv0++TIv/fpsydO1eVd+3atQJAjh8/nukyApDRo0crn9u3by+urq6SnJyspN2/f19MTExk3Lhx2V7ezObbtWtXefz4sTx69EhOnDghDRo0EADy/fff6+QvUaKEdOjQQfk8cuRIcXZ2ljdv3ihpgwcPFgBy5swZ1bSJiYmq9iAmJibL+v0XsS3IHu1+qt3HOnfuLADku+++U/I8e/ZMrK2tRaPRyIoVK5R07XE/7b6nLa9cuXKq84wpU6YIAFm3bp2S5uHhIQBk69atqjoNGjRIAMj+/fuVtPj4eClYsKB4enpKSkqKiIjMmzdPAMi5c+dU0xcrVkxq166tfP7222/F1tZWrly5oso3fPhwMTU1lVu3bomIyI0bNwSAuLi4qM4vRowYIQCkVKlSqn21ffv2YmFhobQf8fHx4uTkJN27d1fN58GDB+Lo6KhK167ntO2RyNvzgXLlyimfHz9+rLOOKWvZiQ14J5yIPop69erh8OHDaNasGc6ePYspU6YgMDAQefPmVXW5XLNmDVJTU9G2bVvExMQof7lz54avry92796tKtfOzg7BwcHK58KFC8PJyQlFixZFxYoVlXTt/69fv66kpX3e682bN3jy5Al8fHzg5OSEU6dO6SxDjx49oNFolM/Vq1dHSkoKbt68me31cfr0ady4cQODBg3S6aKfdh6Z6dKlC1xcXJAnTx40aNAAcXFxWLp0KSpUqKDKt2LFCpiYmKB169ZKWvv27bFlyxZVt7znz5/Dzs5OZz5fffUVXFxclL/PPvssG0tKpFa3bl24uLggf/78CAoKgq2tLdavX498+fIBAJ4+fYpdu3ahbdu2iI+PV9qAJ0+eIDAwEFevXlVGU1+9ejVKlSqFli1b6szH0P0oPUtLS4SGhqrStPvoxo0b8ebNG4PLateuHR49eqTq8h4REYHU1FS0a9cOQPaWNzMLFiyAi4sLXF1dUb58eezcuRPDhg3TucP2559/4ty5c2jfvr2S1r59e8TExGDbtm1K2vPnzwFAp03YvHmzqj3w8PAweH0QZVe3bt2U/zs5OaFw4cKwtbVVjR+hPe6nPb5r9ejRA+bm5srn3r17w8zMDJs3b1blK1iwoM7YCZs3b4a/vz+qVaumpNnZ2aFHjx6Ijo7GX3/9BQBo1aoVzMzMEB4eruQ7f/48/vrrL2U/B94+8lW9enXkyJFDdX5Tt25dpKSkYN++far5t2nTBo6Ojspn7XlMx44dVQNAVqxYEUlJSUo7ERkZidjYWGW/1v6ZmpqiYsWKOudRANCrVy/V5+rVq+tdn/TxMAgnoo+mQoUKWLNmDZ49e4Zjx45hxIgRiI+PR1BQkHIwu3r1KkQEvr6+qhM9FxcXXLx4UWcQt3z58umcbDs6OiJ//vw6aQBUQeerV6/wzTffKM9mOTs7w8XFBbGxsYiLi9Opf4ECBVSftd080z9fZgjtc5ra7qjv4ptvvkFkZCTWrl2LTp06IS4uDiYmus34smXL4O/vjydPnuDatWu4du0aypQpg6SkJKxatUrJZ29vjxcvXuhM36dPH0RGRiIyMhJubm7vXF8iAPj5558RGRmJiIgINGrUCDExMbC0tFS+v3btGkQEo0aN0mkDRo8eDeD/B3OMiop6r31In7x58+q8xSAgIACtW7fG2LFj4ezsjObNm2PRokVZvp6rQYMGcHR0VJ2ch4eHo3Tp0ihUqBCA7C1vZpo3b47IyEhs2rQJY8aMgUajQUJCgk6bsGzZMtja2sLLy0tpD6ysrODp6anqkm5vbw8AOm1C1apVlfagfv36WdaL6F1ZWVkpzyJrOTo6Znjc13cs9vX1VX22s7ODu7u7TpfsggUL6kx78+ZNFC5cWCe9aNGiyvcA4OzsjDp16qi6pIeHh8PMzEzVZfvq1avYunWrzn6uHSQ1/X6e/pxDex6T1fnN1atXAQC1a9fWmdf27dt15qNvPefIkeOdzm3o3XF0dCL66CwsLFChQgVUqFABhQoVQmhoKFatWoXRo0cjNTUVGo0GW7Zsgampqc606e/K6MuTWbqkeQ6sf//+WLRoEQYNGoTKlSvD0dERGo0GwcHBet+jbUiZxlSiRAnl4N2iRQskJCSge/fuqFatmnKQvnr1Ko4fPw5A92QEePvMWY8ePQAARYoUwZkzZ3D37l3kzZtXyVOoUCElYPinv0KKPj1/f39ldPQWLVqgWrVq+Oyzz3D58mXY2dkp+96QIUP0juoNAD4+PgbPL6M74hmN8q9vRGSNRoOIiAgcOXIEGzZswLZt29ClSxf88MMPOHLkiN4eJMDbu+ra57pnz56Nhw8f4uDBg/juu++UPB9qefPly6e0B40aNYKzszP69euHWrVqKYGAiOD333/Hy5cvUaxYMZ0yHj16hBcvXsDOzg5FihQB8PaOXqlSpZQ8aYOGZcuWZVkvonf1Psf37HrfkdCDg4MRGhqKM2fOoHTp0li5ciXq1KkDZ2dnJU9qairq1aun8/YSLe1xVutdl1/bpixdulTv60rTv0Yxo/LIuBiEE5FRaU/G79+/DwDw9vaGiKBgwYI6B6QPLSIiAp07d8YPP/ygpL1+/Vpn8LfsMLQLrLe3N4C3J7j63tf9LiZNmoS1a9diwoQJmDt3LoC3Qba5uTmWLl2qc6A9cOAAfvrpJ9y6dQsFChRAkyZNsGLFCoSFhWV4kkD0IZmammLixImoVasWZs2aheHDh8PLywsAYG5unuW+4e3tneVgj9oeK+n363d5jKRSpUqoVKkSJkyYgOXLl6NDhw5YsWKFqstseu3atcNvv/2GnTt34uLFixARVRfV7CxvdvTs2RM//vgjvv76a7Rs2VIZqf3OnTsYN26ccjdP69mzZ+jRowf++OMPdOzYEQ0bNoSpqSnCwsLQoUOHD1YvImO6evUqatWqpXx+8eIF7t+/j0aNGmU5rYeHBy5fvqyTfunSJeV7rRYtWqBnz55Kr5crV65gxIgRqum8vb3x4sWLD7qf66M9v3B1df1g83rXx3vIcOyOTkQfxe7du/VepdY+l6Xt8tWqVSuYmppi7NixOvlFBE+ePPlgdTI1NdWZx8yZM9/rPdi2trYGBfFly5ZFwYIFMX36dJ3873o139vbG61bt8bixYvx4MEDAG+D8OrVq6Ndu3YICgpS/Q0dOhTA25GSAaBt27YoVqwYvv322wxHff5Ud/3p36tmzZrw9/fH9OnT8fr1a7i6uqJmzZqYN2+ecnEurbSvBWzdujXOnj2rdwRx7baqPSFN+7xlSkqKMvKvIZ49e6az7ZcuXRoAsuySXrduXeTMmRPh4eEIDw+Hv7+/qutrdpY3O8zMzPDFF1/g4sWLWLduHYD/74o+dOhQnfage/fu8PX1VbqkFyhQAF26dMGWLVtUr1hKi+0B/d3Nnz9fNY7DnDlzkJycbNCrNhs1aoRjx47h8OHDStrLly8xf/58eHp6qnqTODk5ITAwECtXrsSKFStgYWGBFi1aqMpr27YtDh8+rBp7QSs2NhbJycnvsIS6AgMD4eDggO+++07vGBbv0qbY2NgA0L2YSR8O74QT0UfRv39/JCQkoGXLlihSpAiSkpJw6NAhhIeHw9PTUxkIydvbG+PHj8eIESMQHR2NFi1awN7eHjdu3MDatWvRo0cPnVdxvKsmTZpg6dKlcHR0RLFixXD48GHs2LEDuXLleucyy5Urhzlz5mD8+PHw8fGBq6srateurZPPxMQEc+bMQdOmTVG6dGmEhobC3d0dly5dwoULF/QepA0xdOhQrFy5EtOnT0fLli1x7do11WvU0sqbNy/Kli2LsLAwfPnllzA3N8fatWsRGBiIatWqoVWrVso7Ru/evYv169fj1q1baNy48TvVjSgjQ4cORZs2bbB48WL06tULP//8M6pVq4YSJUqge/fu8PLywsOHD3H48GHcuXMHZ8+eVaaLiIhAmzZt0KVLF5QrVw5Pnz7F+vXrMXfuXJQqVQp+fn6oVKkSRowYgadPnyJnzpxYsWJFtk54f/vtN8yePRstW7aEt7c34uPj8csvv8DBwSHLO2rm5uZo1aoVVqxYgZcvX2Lq1Kk6eQxd3uwKCQnBN998g8mTJ6Nhw4ZYvXo16tWrl+FjJc2aNcOMGTPw6NEjuLq6Yvr06bhx4wb69++PFStWoGnTpnB1dUVMTAwOHjyIDRs26H1mlujvIikpCXXq1EHbtm1x+fJlzJ49G9WqVUOzZs2ynHb48OH4/fff0bBhQwwYMAA5c+bEb7/9hhs3bmD16tU64y20a9cOHTt2xOzZsxEYGKgz6OrQoUOxfv16NGnSBCEhIShXrhxevnyJc+fOISIiAtHR0aru6+/KwcEBc+bMwf/+9z+ULVsWwcHBcHFxwa1bt7Bp0yZUrVo1wwtrGbG2tkaxYsUQHh6OQoUKIWfOnChevPgHH5PjP+1DD81O9F/BV5RlbsuWLdKlSxcpUqSI2NnZiYWFhfj4+Ej//v3l4cOHOvlXr14t1apVE1tbW7G1tZUiRYpI37595fLly0qegIAA8fPz05nWw8NDGjdurJMOQPr27at8fvbsmYSGhoqzs7PY2dlJYGCgXLp0STw8PKRz585KvvSvTtHS94qjBw8eSOPGjcXe3l4AKK8r05dXROTAgQNSr149sbe3F1tbWylZsqTMnDkzs1WplLVq1Sq939esWVMcHBwkJCREAEhUVFSGZY0ZM0YAyNmzZ5W02NhYGTdunJQpU0b5rfLnzy9BQUGyYcOGTOv2X8fXEmUso/1IRCQlJUW8vb3F29tbeZ1XVFSUdOrUSXLnzi3m5uaSN29eadKkiURERKimffLkifTr10/y5s0rFhYWki9fPuncubPq1VlRUVFSt25dsbS0FDc3Nxk5cqRERkbqfUWZvjbl1KlT0r59eylQoIBYWlqKq6urNGnSRE6cOKHKhwxe4aOdl0ajUb2KLS1Dl1ef9G1bWtp9fPXq1QJAFixYkGE5e/bsEQAyY8YMJS05OVkWLVoktWvXlpw5c4qZmZk4OztLnTp1ZO7cufLq1ass6/dfxLYge/S9oszW1lYnn6HHfW15e/fulR49ekiOHDnEzs5OOnToIE+ePMl02rSioqIkKChInJycxMrKSvz9/WXjxo168z5//lysra0FgCxbtkxvnvj4eBkxYoT4+PiIhYWFODs7S5UqVWTq1KnKq9S0ryhL/3rBjI79mZ2jBAYGiqOjo1hZWYm3t7eEhISo2q2M1vPo0aMlfVh46NAhKVeunFhYWPB1ZQbKTmygEWHfIqJ3cerUKZQrVw4nT55E2bJlP3V1iOgTCAsLQ8eOHdkOEP3HsS34tBYvXozQ0FAcP35cGXuGyNiyExvwmXAiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBERERER/WOFhIRARPg8OP1jMAgnIiIiIiIiMhIG4URERERERERGwiCciIiIiIiIyEgYhBMREREREREZCYNwIiIiIiIiIiMxMzTjrVu3EBMT8zHrQvSPcvHiRQDA5s2blf8T0X/LwYMHAbAdIPqvY1tARDdu3DA4r0ZEJKtMt27dQtGiRZGQkPBeFSP6tzExMUFqauqnrgYRfUJsB4gIYFtARICpqSn279+PypUrZ5rPoDvhMTExSEhIwLJly1C0aNEPUkGif7rNmzdj1KhR3C+I/sPYDhARwLaAiN72ku3YsSMsLS2zzGtwd3QAKFq0KMqWLfvOFSP6N9F2N+N+QfTfxXaAiAC2BUSUPRyYjYiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjYRBOREREREREZCQMwomIiIiIiIiM5JMH4cePH0eVKlVga2sLjUaDM2fOGDTd4sWLodFoEB0dbXDeEydOGFT2999/Dy8vL5iamqJ06dIGTfMu9SIiIiIiIqL/lk8ahL958wZt2rTB06dP8eOPP2Lp0qXw8PB45/Jmz56NxYsXv1edtm/fjmHDhqFq1apYtGgRvvvuu/cqj8gYEhMT8eWXXyJPnjywtrZGxYoVERkZmeV0ly9fxuDBg1GlShVYWVllegHpxYsXGDRoEPLlywdLS0sULVoUc+bM0cmnvRCl7+/BgwdKvj179mSYT6PRYMKECR+1zPS6d+8OjUaDJk2aZLrOoqKilHWV/sKeofUEgNevX2PixIkoVqwYbGxskDdvXrRp0wYXLlxQ5du3bx+aNWuG/Pnzw8rKCrlz50aDBg1w8OBBnbrVrFlT77wbNGiQ6TJNmDABGo0GxYsXV6VHR0dnuj67d++u5A0JCck07927d1VlHzp0CNWqVYONjQ1y586NAQMG4MWLF6o8mf2eR44cyXSZ/ovetR1Ir169etBoNOjXr5/e7xcsWICiRYvCysoKvr6+mDlz5nuVGRcXh2HDhsHX1xfW1tbw8PBA165dcevWLZ28O3bsQK1ateDs7AwnJyf4+/tj6dKl71zmmDFj9G5fVlZWqny3b9/G2LFj4e/vjxw5csDZ2Rk1a9bEjh07dOZ9//59DB8+HLVq1YK9vT00Gg327NmT4boxZF/QOnXqFJo1a4acOXPCxsYGxYsXx08//ZRh2bGxsXB1dYVGo0FERITO9ydPnkSDBg3g4OAAe3t71K9fX+/NkDdv3mDs2LHw8vKCpaUlvLy8MH78eCQnJ6vyvXjxAqNHj0aDBg2QM2dOaDSaDM/LMmozihQposqX0W+k/dPXFtJbsbGx6NGjB1xcXGBra4tatWrh1KlTWU6XmpqKxYsXK8ceW1tbFC9eHOPHj8fr16/1TmNou7BixQqULVsWVlZWcHFxQdeuXRETE6OTb86cOWjTpg0KFCgAjUaDkJAQveVl5xi5fft2dO3aFcWLF4epqSk8PT0zXQdTpkxBwYIFYWVlhZIlS+L333/XyZfZtlmvXj0lX2bH0xUrVqjma+i6z067BACRkZFKW5MjRw4EBQXpPefz9PTUW89evXqp8mW3rdPKrF0y9LifkJCAn3/+GfXr14e7uzvs7e1RpkwZzJkzBykpKVnW4e8gW+8J/9CioqJw8+ZN/PLLL+jWrVu2pv3f//6H4OBg1cvQZ8+eDWdn5wx3VEPs2rULJiYmWLBgASwsLN65HCJjCgkJQUREBAYNGgRfX18sXrwYjRo1wu7du1GtWrUMpzt8+DB++uknFCtWDEWLFs2wJ0pKSgoCAwNx4sQJ9O3bF76+vti2bRv69OmDZ8+eYeTIkTrTjBs3DgULFlSlOTk5Kf8vWrSo3pPnpUuXYvv27ahfv75RygSAEydOYPHixTon3voMHjwYZmZmSExMzDBPVvUEgA4dOmD9+vXo3r07ypYti3v37uHnn39G5cqVce7cOeWC5JUrV2BiYoJevXohd+7cePbsGZYtW4YaNWpg06ZNOgF2vnz5MHHiRFVanjx5MqzrnTt38N1338HW1lbnOxcXF73rc+vWrQgLC1Otz549e6Ju3bqqfCKCXr16wdPTE3nz5lXSz5w5gzp16qBo0aKYNm0a7ty5g6lTp+Lq1avYsmWLzvwGDBiAChUqqNJ8fHwyXKb/qndtB9Jas2YNDh8+nOH38+bNQ69evdC6dWt8/vnn2L9/PwYMGICEhAR8+eWX2S4zNTUV9erVw19//YU+ffqgUKFCuHbtGmbPno1t27bh4sWLsLe3BwCsX78eLVq0QOXKlZXAbOXKlejUqRNiYmIwePDgbJepNWfOHNjZ2SmfTU1NVd+vW7cOkydPRosWLdC5c2ckJydjyZIlqFevHhYuXIjQ0FAl7+XLlzF58mT4+vqiRIkSma7P7OwL27dvR9OmTVGmTBmMGjUKdnZ2iIqKwp07dzIs/5tvvkFCQoLe706dOoVq1aohf/78GD16NFJTUzF79mwEBATg2LFjKFy4sJK3Y8eOWLVqFbp06YLy5cvjyJEjGDVqFG7duoX58+cr+WJiYjBu3DgUKFAApUqVyvKE3NLSEr/++qsqzdHRUfW5VatWevf3kSNH4sWLFzptA72VmpqKxo0b4+zZsxg6dCicnZ0xe/Zs1KxZEydPnoSvr2+G0yYkJCA0NBSVKlVCr1694OrqisOHD2P06NHYuXMndu3aBY1Go+Q3tF2YM2cO+vTpgzp16ijb+4wZM3DixAkcPXpUdQyePHky4uPj4e/vj/v372dY1+wcI5cvX47w8HCULVs20+MiAHz11VeYNGkSunfvjgoVKmDdunX47LPPoNFoEBwcrOTTd4w8ceIEZsyYofeco3379mjUqJEqrXLlysr/s7Pus9Mubdy4Ec2bN0fZsmUxadIkPH/+HDNmzEC1atVw+vRpuLi4qOpUunRpfPHFF6q0QoUKqT5np61LK7N2SSur4/7169fRv39/1KlTB59//jkcHByU89IjR47gt99+M6gun5QY4OTJkwJATp48aUh2g+3du1cAyKpVqz5IeX5+fhIQEKCTvmjRIgEgx48fz7KM0NBQsbW1fec6aOd148aNdy6D/hmWLVv2UfaL7Dp69KgAkO+//15Je/XqlXh7e0vlypUznfbJkyfy/PlzERH5/vvvM9x2V65cKQBkwYIFqvTWrVuLlZWVPHz4UEnLzv6mj4+Pj/j6+qrSPkaZWqmpqVK5cmXp0qWLeHh4SOPGjTMsZ+vWrWJhYSFff/213voYWs87d+4IABkyZIgqfdeuXQJApk2blun0L1++FDc3NwkMDFSlBwQEiJ+fX6bTpteuXTupXbt2tqatU6eOODg4yKtXrzLNt3//fgEgEyZMUKU3bNhQ3N3dJS4uTkn75ZdfBIBs27ZNSdu9e/cHPUZ8DP+GdiBtfk9PTxk3bpwAkL59+6q+T0hIkFy5cunsIx06dBBbW1t5+vRptss8ePCgAJBZs2ap0hcuXCgAZM2aNUpavXr1JE+ePPL69Wsl7c2bN+Lt7S0lS5Z8pzJHjx4tAOTx48eZrpvz58/r5Hn9+rUUKVJE8uXLp0p//vy5PHnyREREVq1aJQBk9+7dess1dF+Ii4sTNzc3admypaSkpGRaV61z586JmZmZsu7T70eNGjWSHDlySExMjJJ27949sbOzk1atWilpx44dEwAyatQo1fRffPGFaDQaOXv2rGqd3L9/X0REjh8/LgBk0aJFeuvXuXPndz7funXrlmg0Gunevfs7Tf8x/F3aAq3w8HCd3/3Ro0fi5OQk7du3z3TaxMREOXjwoE762LFjBYBERkYqaYa2C4mJieLk5CQ1atSQ1NRUJd+GDRsEgPz000+q6aOjo5V8tra20rlzZ8MWXDI+Rt69e1eSkpJERKRx48bi4eGhd/o7d+6Iubm5qr1KTU2V6tWrS758+SQ5OTnT+Xft2lU0Go3cvn1bSbtx44ZOG61PdtZ9dtqlYsWKiY+PjyQmJippZ86cERMTE/n8889VebM6F9LKTlunlVW7ZOhx//Hjx3L+/Hmd9NDQUAEgV69ezbL+H0N2YuZP1h09JCQEAQEBAIA2bdpAo9GgZs2a+PPPPxESEgIvLy+lW0mXLl3w5MkT1fTpn7329PTEhQsXsHfvXqXbQs2aNVXTJCYm4vPPP1e65bRs2RKPHz9WvtdoNFi0aBFevnyplLF48WKlC4m+LlUajQZjxozJdFk9PT3RpEkTHDhwAP7+/rCysoKXlxeWLFmikzc2NhaDBg1C/vz5YWlpCR8fH0yePBmpqamqfCtWrEC5cuVgb28PBwcHlChRAjNmzFC+13Yd8/X1hZWVFXLlyoVq1aq9U9dE+nuLiIiAqakpevTooaRZWVmha9euOHz4MG7fvp3htDlz5tS5I6TP/v37AUB19Vf7+fXr11i3bp3e6eLj47PVLejYsWO4du0aOnTokGGeD13m0qVLcf78+Uy7qgNv96mBAwdi4MCB8Pb2znK+mdUzPj4eAODm5qZKd3d3BwBYW1tnWraNjQ1cXFwQGxur9/vk5OQMu7OmtW/fPkRERGD69OlZ5tW6f/8+du/ejVatWmXZc2D58uXQaDT47LPPlLTnz58jMjISHTt2hIODg5LeqVMn2NnZYeXKlXrLio+P1+n6Sv/vfdoBrSlTpiA1NRVDhgzR+/3u3bvx5MkT9OnTR5Xet29fvHz5Eps2bcp2mc+fPwdg2L7w/Plz5MiRQ9UDzszMDM7Ozjr5DC1TS0Tw/PlziIjeevr5+cHZ2VmVZmlpiUaNGuHOnTvKPg0A9vb2yJkzp95y0srOvrB8+XI8fPgQEyZMgImJCV6+fKlzXpDewIED0bJlS1SvXl3v9/v370fdunWRK1cuJc3d3R0BAQHYuHGj0oZk1v6LCMLDw5U0S0tL5M6dO8tlTyslJUX5zQz1+++/Q0QyPVb810VERMDNzQ2tWrVS0lxcXNC2bVusW7cu095cFhYWqFKlik56y5YtAQAXL15U0gxtF86fP4/Y2Fi0a9dOdRe9SZMmsLOzU3XJBgAPDw9VvuzI6BiZJ08emJubZzn9unXr8ObNG9UyaTQa9O7dG3fu3Mn0jm9iYiJWr16NgIAA5MuXT2+ely9fIikpSe932Vn3hrZLT58+xV9//YWWLVuqevmWKlUKRYsW1Vn3WklJSXj58mWGy2poW5dWVu1SWpkd952dneHn56eTrm89/V19siC8Z8+eShfWAQMGYOnSpfjqq68QGRmJ69evIzQ0FDNnzkRwcDBWrFiBRo0aZXhwBIDp06cjX758KFKkCJYuXaqUl1b//v1x9uxZjB49Gr1798aGDRtUz6ctXboU1atXh6WlpVJGjRo1PsjyXrt2DUFBQahXrx5++OEH5MiRAyEhIarnPxMSEhAQEIBly5ahU6dO+Omnn1C1alWMGDECn3/+uZIvMjIS7du3R44cOTB58mRMmjQJNWvWVD3/MmbMGIwdOxa1atXCrFmz8NVXX6FAgQIGPQtE/yynT59GoUKFVCdxAODv7w8ABg92mJnExESYmprqPKJhY2MD4O1zhenVqlULDg4OsLGxQbNmzXD16tUs5xMWFgYAGZ5Yfegy4+Pj8eWXX2LkyJFZnjhOnz4dz549w9dff53lPLOqp7e3N/Lly4cffvgBGzZswJ07d3Ds2DH06tULBQsW1DnZBd6esMfExODSpUsYOXIkzp8/jzp16ujku3LlCmxtbWFvb4/cuXNj1KhRePPmjU6+lJQU9O/fH926dUOJEiWyXCatFStWIDU1NcuT3zdv3mDlypWoUqWK6rm7c+fOITk5GeXLl1flt7CwQOnSpXH69GmdskJDQ+Hg4AArKyvUqlXL4EE2/0vetx24desWJk2ahMmTJ2d4EUj726T/7cqVKwcTExOd386QMsuXLw9bW1uMGjUKu3btwt27d7F3714MGzYMFSpUUD3iULNmTVy4cAGjRo3CtWvXEBUVhW+//RYnTpzAsGHD3qlMLS8vLzg6OsLe3h4dO3bEw4cPM11fWg8ePICNjY3SFmZHdvaFHTt2wMHBAXfv3kXhwoVhZ2cHBwcH9O7dW+8zuqtWrcKhQ4cwZcqUDOefmJio93exsbFBUlISzp8/r+QDdC9eZNb+GyohIQEODg5wdHREzpw50bdvX4MuIIaFhSF//vwf7Bzt3+j06dMoW7YsTEzUp/r+/v5ISEjAlStXsl2mdmyTtIGfoe1CRtuRNu306dNZXljKjKHHSEOcPn0atra2KFq0qCpd257qO05pbd68GbGxsRkeI8eOHQs7OztYWVmhQoUK2L59u0F10rfuM8ubtl3KbN3b2Njg3r17OuPW7Nq1CzY2NrCzs4Onp6fqRt+7MqRd0nrX43521tOn9smeCa9cuTISExPx3XffoXr16ggKCgIAVKtWTecZhEqVKqF9+/Y4cOBAhldOWrRoga+//hrOzs7o2LGj3jy5cuXC9u3blStrqamp+OmnnxAXFwdHR0d07NgRO3bswKlTp1RlfIiRzi9fvox9+/Yp9W/bti3y58+PRYsWYerUqQCAadOmISoqCqdPn1ae1enZsyfy5MmD77//Hl988QXy58+PTZs2Kc8+pH9uTWvTpk1o1KiR6lkt+ne6f/++cocnLW3avXv33nsehQsXRkpKCo4cOaJ6tlR7hyTtoFs2NjYICQlRAtGTJ09i2rRpqFKlCk6dOoX8+fPrnUdKSgrCw8Ph7++v8/zfxygTePvstrW1tfIsaUYePHiAb7/9FlOnTtUJct6lnubm5li9ejU+++wzNGvWTJm+XLlyOHTokM7z48DbNmPbtm0A3p6k9+zZE6NGjVLl8fb2Rq1atVCiRAm8fPkSERERGD9+PK5cuaK6WwUAc+fOxc2bNzMcwCUjYWFhcHd3R+3atTPNt23bNjx58kTnRET7bF9G26x2m9IuZ+vWrdGoUSM4Ozvjr7/+wtSpU1G9enUcOnQIZcqUyVbd/83etx344osvUKZMGb0XgNLOw9TUFK6urqp0CwsL5MqVS2cehpTp7OyM8PBwdO/eXXXCHBgYiIiICJiZ/f9pyqhRo3Djxg1MmDAB48ePB/B2n1u9ejWaN2/+TmXmyJED/fr1Q+XKlWFpaYn9+/fj559/xrFjx3DixIlM9/dr165hzZo1aNOmTYbH4sxkZ1+4evUqkpOT0bx5c3Tt2hUTJ07Enj17MHPmTMTGxqoGjHr16hWGDBmCwYMHw9PTM8NzmMKFC+PIkSNISUlR6p+UlISjR48C+P92Xfts+MGDB1VjXehr/7PD3d0dw4YNQ9myZZGamoqtW7di9uzZOHv2LPbs2aP6ndK6cOEC/vzzTwwbNuyd75T+F9y/f1/vRYq0bUJ2LsACb3u2ODg4oGHDhqr5GNIu+Pr6KgPppR9DQdsr9dmzZ6qeGdlhyDHSUPfv34ebm5vO9mVIexoWFgZLS0slrtEyMTFB/fr10bJlS+TNmxfXr1/HtGnT0LBhQ6xfvx6NGzfOtE761r0++tolNzc3ODk56QxW9+TJE/z1118A3u7H2psRJUuWRLVq1VC4cGE8efIEixcvxqBBg3Dv3j1Mnjw50/lnxNB26X2O+0lJSZg+fToKFiz4zxgr4kP3b8+OrPr9v3r1Sh4/fqw8RzF9+nTlO33PXmf1TPjKlStV6WvWrBEAqueZ9D2jpJ2/vueaAMjo0aMzrZeHh4cUK1ZMZ9qSJUtKy5YtVZ8bNGggjx8/Vv3t2LFDAMiyZctE5O0zbKamprJlyxadMrUCAgLE09NTrly5kmEeej9/l+e/vLy8pGHDhjrpUVFRAkB+/PFHg8rJ7Jnw+/fvi6Ojo/j6+sr27dvlxo0bMm/ePHFwcBAAUqdOnUzL3r9/v2g0GunZs2eGebZt2yYAZMaMGQbV933LvHz5spibm0tERISSltFzUJ06dZJSpUopz2Jm5xn1jOp55coVad26tQwfPlz++OMPmTp1quTKlUuqVaum91nr06dPy/bt22XBggVSo0YNCQ0Nlfj4+Czn3717dwEghw8fVtJiYmIkZ86cMnXqVCXNkGfCL1++LABk8ODBWc63ffv2Ym5urnreVERkyZIlAkCOHj2qM83//vc/cXR0zLTcq1evirW1tc6zfp/Kv6Ed2LVrl2g0Gjl27JiSBj3Pb3fp0kWsra31lpE/f35p3rx5tssUefs8e6NGjWTChAnyxx9/yJgxY8TGxkaCgoJU+d68eSNff/21tGnTRn7//XdZtmyZ1KhRQ+zs7FTbd3bK1CcsLEwAyMSJEzPM8/LlSyldurTkyJFD7t69m2G+zJ6TzM6+4OXlJQCkV69eqnw9e/YUAKpj/TfffCPu7u5K+5DRudacOXMEgHTu3FkuXLgg586dk3bt2om5ubkAkKVLl4rI23MxDw8PcXNzk9WrV0t0dLSEh4dLrly5xMzMTLy9vfUue1bPhOszYcIEASC///57hnlGjBihc+72d/B3aQu0TExMpHfv3jrpO3fuFACydu3abJWn/W1mz56tSs9Ou9CuXTsxMzOTqVOnSlRUlOzbt09KlSqlbHNpn6FOy5BnwrN7jMzsmfDatWtL0aJFddJTUlIEgAwcOFDvdHFxcWJlZaU6t8/MkydPxM3NTQoXLpxpvozWfXqZtUtffvmlAJDhw4fLlStX5MSJE1K7dm1l3e/fvz/DclNTUyUwMFDMzMwy/I2yeibc0HZJH0OP+9rznU2bNmVZ5seSnZj5bxeEP3nyRAYMGCCurq4CQPU3duxYJd+7BOFHjhzRO/89e/YoaR8rCG/QoIHOtAEBAVKzZk3ls7W1tc4yp/3TDtb08OFDKVq0qACQvHnzSmhoqE5AvnfvXnFychIAUrx4cRkyZMjf7oD1T/d3OeD6+flJ7dq1ddIvXLggAGTu3LkGlZNZEC7ydpsqUKCAsj06ODjIb7/9JgBUB9mMVKpUKcOTNZG3ga6pqak8ePDAoPq+b5kNGjTQaS/0BeGHDx8WjUYju3btUtKyO1Bc+nrGxsaKm5ubKggWEdmzZ49BB9rExETx8/OT1q1bZznvS5cuCQD59ttvlbRevXrpDNBiSBD+zTffCAA5ceJEpvni4+PFxsZGmjRpovOd9kC9b98+ne/atGkjuXPnzmqRJDg4WCwsLLIcHMcY/untwJs3b6R48eLSqVMnVbq+gLlv375iamqqtxwXFxcJDg7OdplRUVFiY2OjuhgmIrJ48WIBIJs3b1bSevbsqboYJiKSlJQkvr6+4u/v/05lZiR37twZXlxMTk6Wpk2bioWFhezcuTPTcjI7Mc3OvuDn5ycAZO/evap82gFuf/vtNxF5e75ibW0tCxcuVPJkdrI7cuRI5SQcgJQvX16++uornSDt/PnzUqxYMSWfpaWlzJgxQ1xdXaVUqVJ6l/1dgvCEhAQxMTGRrl276v0+NTVVPDw8pHjx4gaXaSyfqi1ITEyU+/fvq/6Sk5PF1tZWunTpopN/06ZNAkC2bt1q8DxWrFghGo1G7+9iaLsg8vbY16xZM9W5bceOHaVVq1YCQJ49e6a3nOwOzGbIMTKzILxx48bi5eWlk/7y5UslkNVHO/hj+rYnM8OHD8/0AkRm6z6trNqlxMRE6dq1q5iYmCjrvn79+tKrVy8BIKdPn860/K1bt6ouzqWXWVuX3XZJn6yO+1OmTNE51/kU/hEDs2Wkbdu2+OWXX9CrVy+sWbMG27dvx9atWwHgvZ4VAXRfOaIlmTxrDiDD7k7ZGRzKkHlrX6sSGRmp969169YAAFdXV5w5cwbr169Hs2bNsHv3bjRs2BCdO3dWyqpRowaioqKwcOFCFC9eHL/++ivKli2r8yoQ+udzd3fX+/oObVpWr+EwVI0aNXD9+nWcPn0aBw4cwN27d1GpUiUAuq+t0Cd//vx4+vSp3u9evXqFtWvXom7dujqDKX2MMnft2oWtW7di4MCBiI6OVv6Sk5Px6tUrREdHKwMFDRs2DNWrV0fBggWVfNp3mt6/f1/vu4yzqufq1avx8OFDVVd0AAgICICDg0OW7721sLBAs2bNsGbNGrx69SrLeQNQ5n/16lXMnz8fAwYMwL1795Rlev36Nd68eYPo6OgM1+ny5ctRuHBhlCtXLtN5/vHHH0hISND7TJy2O19G26wh22v+/PmzHDDmv+Zd24ElS5bg8uXL6Nmzp2pfAN6OmRAdHa28Ssbd3R0pKSl49OiRqoykpCQ8efJEmUd2yly8eDFev36NJk2aqMrU7hvafSEpKQkLFixA48aNVc+4mpubo2HDhjhx4oQy0JGhZWYms7ale/fu2LhxIxYvXpzlYxmZyc6+oP1/+rZM2wX42bNnAN6++idv3ryoWbOmst61z0g+fvwY0dHRqnOpCRMm4OHDh9i/fz/+/PNPHD9+XPk+bbvu5+eH8+fP4/z589i/fz/u3buH7t27IyYmxqD231DW1tbIlStXhuv+4MGDuHnzJgdkS+PQoUNwd3dX/d2+ffuDnRtERkaiU6dOaNy4MebOnavzvaHtAvD29XPr1q3DzZs3sXfvXkRHR2Pp0qW4f/8+XFxc9D6K9S6yc4zUx93dHQ8ePNCJD7Jad2FhYXB0dNRpezKT/hidVlbrPq2s2iULCwv8+uuvuHfvHvbt24fLly9j27ZtiIuLg4mJSZav/cysnlnJbruU0fwzOu4vXrwYX375JXr16mXQuD1/F5/0PeHpPXv2DDt37sTYsWPxzTffKOmGDL4EZBwsv68cOXIAgM4oizdv3vyg8/H29saLFy/0DhqTnoWFBZo2bYqmTZsiNTUVffr0wbx58zBq1ChlR8qZMydCQ0MRGhqKFy9eoEaNGhgzZky238lOf2+lS5fG7t278fz5c9Xzi9rn+kqXLv3B5mVqaqoqT/s8sSHb7PXr13XeQ6m1fv16xMfHZ/vE6l3L1AbOaUeN1bp79y4KFiyIH3/8EYMGDcKtW7dw8+ZNnfd+A29P6h0dHTMcpTyjemoHfUp/IU9EkJKSYtAo4K9evYKIID4+PtPR1K9fvw4Ayvzv3r2L1NRUDBgwAAMGDNDJX7BgQQwcOFBnxPSjR4/i2rVrGDduXJZ1CwsLg52dnc5FBgAoXrw4zMzMcOLECbRt21ZJT0pKwpkzZ1RpmS2TlZWV6r3O/3Xv2g7cunULb968QdWqVXW+W7JkCZYsWYK1a9eiRYsWShknTpxQvef2xIkTSE1NVb7PTpkPHz5Utvu0tIMJaveFJ0+eIDk5We/F7zdv3iA1NVX5ztAyMyIiiI6O1vvs4dChQ7Fo0SJMnz4d7du3z7ScrGRnXyhXrhwiIyOVgdm0tM+mavfvW7du4dq1a/Dy8tKZn3ak52fPnqmCnRw5cqjG+tixY4cy0G1aGo1GNRrx5s2bkZqaalD7b6j4+HjExMRk2K6HhYXpvHHhv65UqVI6b77JnTs3Spcujf379yM1NVV14ero0aOwsbEx6OLJ0aNH0bJlS5QvXx4rV67U+5y+oe1CWgUKFECBAgUAvD23PnnypHKj6UMx9BipT+nSpfHrr7/i4sWLKFasmJKeWXuqfXNISEiI6g0OWUl/jE47r6zWvVZ22iU3NzflYl5KSgr27NmDihUrZnk8zaiehniXdknf/PUd99etW4du3bqhVatW+Pnnn7Ndt0/pb3UnXHu3OP2VJ0Nfn2Nra5vlyfC7cHBwgLOzM/bt26dKnz179gedT9u2bXH48GFlYIm0YmNjVScjaZmYmKBkyZIA/n8ExPR57Ozs4OPjk+krKeifKSgoCCkpKapB+BITE7Fo0SJUrFhRuXp569YtXLp06YPN9/Hjx5g8eTJKliypOglL+9o/rc2bN+PkyZNo0KCB3rKWL18OGxsb5dUS+ub1IcusXbs21q5dq/Pn4uKC8uXLY+3atWjatCkAYP78+Tr5+vfvDwCYOnWqMvp6duqpPflJ/1qQ9evX4+XLl6qT//R3F4C37cHq1auRP39+5U7Y8+fPdfZvEVEGsAoMDATw9sRf37L7+fmhQIECWLt2Lbp27ap3fQLI8uT38ePH2LFjB1q2bKl3xGhHR0fUrVsXy5YtU73WaenSpXjx4gXatGmjKiu9s2fPYv369ahfv77OqL//Ze/aDgQHB+vdHgCgUaNGWLt2LSpWrAjg7X6TM2dOzJkzRzXvOXPmwMbGRhlYKDtlFipUCCKi82o67UBj2n3B1dUVTk5OWLt2rerVPi9evMCGDRtQpEgR5UTb0DIB/dvYnDlz8PjxY5225fvvv8fUqVMxcuRIDBw4UGe67MrOvqANyBcsWKAq49dff4WZmZnyStbx48frrPdvv/0WwNtePWvXroWtrW2GdQoPD8fx48cxaNCgTPevV69eYdSoUXB3d3+nixGvX79WLbPWt99+CxHR266/efMGq1atQrVq1ZQAjt5eRKlbt67qz8rKCkFBQXj48CHWrFmj5I2JicGqVavQtGlTVaAYFRWFqKgoVbkXL15E48aN4enpiY0bN2YYyBraLmRkxIgRSE5OznKA1IwYeozMjubNm8Pc3Fx1ni8imDt3LvLmzav3FWJZvTlEX1tz9+5dLFy4ECVLllQN0Gjougfer12aOnUq7t+/rxoQ++nTp3ovYE6aNAkWFhaoVatWtuYBZK9dys5xf9++fQgODkaNGjUQFhb2jzsn+FvdCXdwcECNGjUwZcoUvHnzBnnz5sX27dtx48YNg6YvV64c5syZg/Hjx8PHxweurq7v1VUsrW7dumHSpEno1q0bypcvj3379r3T6x0yM3ToUKxfvx5NmjRBSEgIypUrh5cvX+LcuXOIiIhAdHQ0nJ2d0a1bNzx9+hS1a9dGvnz5cPPmTcycOROlS5dWXqdQrFgx1KxZE+XKlUPOnDlx4sQJREREqF7JRv8OFStWRJs2bTBixAg8evQIPj4++O233xAdHa06YevUqRP27t2rusgVFxeHmTNnAvj/LpqzZs2Ck5MTnJycVNtLQEAAKleuDB8fHzx48ADz58/HixcvsHHjRlXDV6VKFZQpUwbly5eHo6MjTp06hYULFyJ//vzKawnTevr0KbZs2YLWrVtneCX2Q5eZ9ip8WoMGDYKbmxtatGihpNWvX18nn/ZiX0BAgOq1LIbWs2nTpvDz88O4ceNw8+ZNVKpUCdeuXcOsWbPg7u6uCoIbNmyIfPnyoWLFinB1dcWtW7ewaNEi3Lt3TzXi+alTp9C+fXu0b98ePj4+Snf8gwcPokePHihbtiyAtyNHp10+Le3FTn3faUeZr1SpUpbvSA8PD0dycnKmvRomTJiAKlWqICAgAD169MCdO3fwww8/oH79+qqT73bt2sHa2hpVqlSBq6sr/vrrL8yfPx82NjaYNGlSpvX4r3nXdqBIkSI6dzy1ChYsqNoerK2t8e2336Jv375o06YNAgMDsX//fixbtgwTJkxQ3hebnTJDQkIwdepU9OzZE6dPn4afnx9OnTqFX3/9FX5+fspFNFNTUwwZMgRff/01KlWqhE6dOiElJQULFizAnTt3sGzZsmyXCbx9F3G7du1QokQJWFlZ4cCBA1ixYgVKly6Nnj17KvnWrl2LYcOGwdfXF0WLFlXNDwDq1aun6iquvfilfQ3p0qVLceDAAQBQdZc0dF8oU6YMunTpgoULFyI5ORkBAQHYs2cPVq1ahREjRijdY9Pe0dbS3l2qUKGCat3v27cP48aNQ/369ZErVy4cOXIEixYtQoMGDXRO5tu2bYs8efKgWLFieP78ORYuXIjr169j06ZNsLe3V+WdNWsWYmNjlbv02tcwAm9fFevo6IgHDx6gTJkyaN++vbKtbNu2DZs3b0aDBg1Uo91rZfTGBdIvKCgIlSpVQmhoKP766y84Oztj9uzZSElJwdixY1V5tW8RSPvYSGBgIJ49e4ahQ4cq7/rW8vb2RuXKlQEY3i4AwKRJk3D+/HlUrFgRZmZm+OOPP7B9+3aMHz9eZzTrDRs24OzZswDeBoF//vmnsl81a9ZMuflk6DESAP7880+sX78ewNtRxOPi4pQyS5UqpVx8z5cvHwYNGoTvv/8eb968QYUKFfDHH39g//79CAsL0/uIaVhYGPLkyaNcEEtv2LBhiIqKQp06dZAnTx5ER0dj3rx5ePnyper1X9lZ99lpl5YtW4bVq1ejRo0asLOzw44dO7By5Up069ZN1Qth/fr1GD9+PIKCglCwYEE8ffoUy5cvx/nz5/Hdd9/pvM7VkLYuO+2Socf9mzdvolmzZtBoNAgKCsKqVatU5ZcsWVLZRv62PvRD5tmh76H8O3fuSMuWLcXJyUkcHR2lTZs2cu/ePYMGQHvw4IE0btxY7O3tBYAy6FJGgyhp5592EAF9A7OJvB0spGvXruLo6Cj29vbStm1befTokcEDs+kbcTkgIEBnYKj4+HgZMWKE+Pj4iIWFhTg7O0uVKlVk6tSpkpSUJCIiERERUr9+fXF1dRULCwspUKCA9OzZU+7fv6+UM378ePH39xcnJyextraWIkWKyIQJE5Qy6P39XQZkEnk7eu2QIUMkd+7cYmlpKRUqVNAZdCUgIEDS7/LaQQf1/aUfsGTw4MHi5eUllpaW4uLiIp999plERUXp1OWrr76S0qVLi6Ojo5ibm0uBAgWkd+/eGQ64NnfuXAEg69evz3D5PkaZ+mS0r6aXUZuSnXo+ffpUBg8eLIUKFRJLS0txdnaW4OBguX79uirfrFmzpFq1auLs7CxmZmbi4uIiTZs21RnM6fr169KmTRvx9PQUKysrsbGxkXLlysncuXMlNTU1y2XKbGA27YAsP/30U5blVKpUSVxdXbMcNG3//v1SpUoVsbKyEhcXF+nbt688f/5clWfGjBni7+8vOXPmFDMzM3F3d5eOHTvK1atXs6yHsfwb2gF9kMFI5iIi8+fPl8KFC4uFhYV4e3vLjz/+aNA2llGZd+7ckS5dukjBggXFwsJC3N3dpXv37vL48WOdvGFhYapjW8WKFfUOgmRomd26dZNixYqJvb29mJubi4+Pj3z55Zc62+Lo0aMzHTg1/WBEmeVNz5B9QeTtIHRjxowRDw8Ppa6GvP0iowGQrl27JvXr1xdnZ2extLSUIkWKyMSJE1UDNmpNnjxZihQpIlZWVpIjRw5p1qxZhgM5eXh4ZLjs2nOjZ8+eSceOHcXHx0dsbGzE0tJS/Pz85LvvvsvwPCU4OFjMzc3lyZMnWS7zp/B3agu0nj59Kl27dpVcuXKJjY2NBAQE6B1Q1MPDQ3XMz+zcAIDeQdIMaRc2btwo/v7+Ym9vLzY2NlKpUiWdNxdpde7cOcP5px3sz9BjpMj/H7sNWaaUlBT57rvvxMPDQywsLMTPz095S1F62gFQP//8c73fi4gsX75catSoIS4uLmJmZibOzs7SsmVLne0lO+s+O+3S0aNHpUaNGpIjRw6xsrKSUqVK6T0/OHHihDRt2lTy5s0rFhYWYmdnJ9WqVcvwd8pOW5dWRu2Socd97fQZ/aWNzYwpOzGzRiSLUcnw9g5LuXLlcPLkSeVuCtF/XVhYGDp27Mj9gug/jO0AEQFsC4goezHzP6vzPBEREREREdE/GINwIiIiIiIiIiNhEE5ERERERERkJAzCiYiIiIiIiIyEQTgRERERERGRkTAIJyIiIiIiIjISBuFERERERERERsIgnIiIiIiIiMhIGIQTERERERERGQmDcCIiIiIiIiIjMctO5s2bN+PixYsfqy5E/ygHDx4EwP2C6L+M7QARAWwLiAi4ceOGwXk1IiJZZTp8+DCqV6+OlJSU96oY0b+NiYkJUlNTP3U1iOgTYjtARADbAiICTE1NsX//flSuXDnTfAbdCbe0tERKSgqWLVuGokWLfpAKEv3Tbd68GaNGjeJ+QfQfxnaAiAC2BUQEXLx4ER07doSlpWWWebPVHb1o0aIoW7bsO1eM6N9E292M+wXRfxfbASIC2BYQUfZwYDYiIiIiIiIiI2EQTkRERERERGQkDMKJiIiIiIiIjIRBOBEREREREZGRMAgnIiIiIiIiMhIG4f8y0dHR0Gg0WLx48aeuChEREREREaXDIJzoXyAxMRFffvkl8uTJA2tra1SsWBGRkZFZTrdmzRq0a9cOXl5esLGxQeHChfHFF18gNjZWJ294eDg6duwIX19faDQa1KxZU2+ZISEh0Gg0Gf7dvXtXyZuamoq5c+eidOnSsLOzg5ubGxo2bIhDhw69U5nai1AZ/XXv3l0p8/jx4+jXrx/8/Pxga2uLAgUKoG3btrhy5YrOMh07dgx9+vRBuXLlYG5uDo1Gk+l6ffjwIXr27Im8efPCysoKnp6e6Nq1q9684eHhqFy5MmxtbeHk5IQqVapg165dqjxz5sxBmzZtUKBAAWg0GoSEhOgta/HixRku+4MHDzKsb1RUFKysrKDRaHDixAnVd/v27UOzZs2QP39+WFlZIXfu3GjQoAEOHjyoU86bN28wduxYeHl5wdLSEl5eXhg/fjySk5N18p48eRINGjSAg4MD7O3tUb9+fZw5c0Yn33fffYdKlSrBxcUFVlZW8PX1xaBBg/D48eMMl4f+37u2DenVq1cPGo0G/fr10/nO0O2zZs2aGW6f5ubmqrwvXrzAoEGDkC9fPlhaWqJo0aKYM2eOTpn379/H8OHDUatWLdjb20Oj0WDPnj0ZLsehQ4dQrVo12NjYIHfu3BgwYABevHihyvMx2pv0JkyYAI1Gg+LFi2eYBwBiY2Ph6uoKjUaDiIgIvXlOnTqFZs2aIWfOnLCxsUHx4sXx008/vdOyA8DVq1cRHByMfPnywcbGBkWKFMG4ceOQkJCg5ElISMDPP/+M+vXrw93dHfb29ihTpgzmzJmDlJQUVXmXLl3CsGHDULp0adjb28Pd3R2NGzfWaWuA7B2TKGuxsbHo0aMHXFxcYGtri1q1auHUqVNZTpeamorFixcrbb+trS2KFy+O8ePH4/Xr13qnWbBgAYoWLaq00zNnztSb7+7du2jbti2cnJzg4OCA5s2b4/r16+9c5tq1axEYGIg8efLA0tIS+fLlQ1BQEM6fP6+Td/DgwShbtqyyrxQtWhRjxozR2Q/27NmT4X595MgRVd6M2rUGDRqo8n2Mc6P0MmtXDK3nhQsX0KZNG2UfdHZ2Ro0aNbBhwwa981y5ciUqVaoEJycn5MqVCwEBAdi0aZMqz8dsA+Lj4zFs2DAULFgQlpaWyJs3L4KCglTt1d9Vtt4TTkR/TyEhIYiIiMCgQYPg6+uLxYsXo1GjRti9ezeqVauW4XQ9evRAnjx50LFjRxQoUADnzp3DrFmzsHnzZpw6dQrW1tZK3jlz5uDkyZOoUKECnjx5kmGZPXv2RN26dVVpIoJevXrB09MTefPmVdKHDh2KadOmoWPHjujTpw9iY2Mxb948BAQE4ODBg/D3989WmS4uLli6dKlOnbZu3YqwsDDUr19fSZs8eTIOHjyINm3aoGTJknjw4AFmzZqFsmXL4siRI6qD2ObNm/Hrr7+iZMmS8PLy0huoa92+fRtVq1YFAPTq1Qt58+bFvXv3cOzYMZ28Y8aMwbhx4xAUFISQkBC8efMG58+fVx2MtXWNj4+Hv78/7t+/n+G8tcaNG4eCBQuq0pycnDLMP3jwYJiZmSExMVHnuytXrsDExAS9evVC7ty58ezZMyxbtgw1atTApk2bVAfwjh07YtWqVejSpQvKly+PI0eOYNSoUbh16xbmz5+v5Dt16hSqVauG/PnzY/To0UhNTcXs2bMREBCAY8eOoXDhwkrekydPonTp0ggODoa9vT0uXryIX375BZs2bcKZM2dga2ub5fr4L3vXtiGtNWvW4PDhwxl+b+j2+dVXX6Fbt26qtJcvX6JXr16qfTMlJQWBgYE4ceIE+vbtC19fX2zbtg19+vTBs2fPMHLkSCXv5cuXMXnyZPj6+qJEiRKZ1vPMmTOoU6cOihYtimnTpuHOnTuYOnUqrl69ii1btij5PkZ7k9adO3fw3XffGbTtfvPNN5meTG7fvh1NmzZFmTJlMGrUKNjZ2SEqKgp37tx5p2W/ffs2/P394ejoiH79+iFnzpw4fPgwRo8ejZMnT2LdunUAgOvXr6N///6oU6cOPv/8czg4OCi/0ZEjR/Dbb78pZf76669YsGABWrdujT59+iAuLg7z5s1DpUqVsHXrVtW6zs4xiTKXmpqKxo0b4+zZsxg6dCicnZ0xe/Zs1KxZEydPnoSvr2+G0yYkJCA0NBSVKlVCr1694OrqqmwHO3fuxK5du1QXo+fNm4devXqhdevW+Pzzz7F//34MGDAACQkJ+PLLL5V8L168QK1atRAXF4eRI0fC3NwcP/74IwICAnDmzBnkypUr22WeO3cOOXLkwMCBA+Hs7IwHDx5g4cKF8Pf3x+HDh1GqVCkl7/Hjx1G9enWEhobCysoKp0+fxqRJk7Bjxw7s27cPJibqe5MDBgxAhQoVVGk+Pj466ytfvnyYOHGiKi1Pnjyqzx/j3CgtQ9oVQ+p58+ZNxMfHo3PnzsiTJw8SEhKwevVqNGvWDPPmzUOPHj2UvDNnzsSAAQPQuHFjTJo0Ca9fv8bixYvRpEkTrF69Gq1atQLw8dqAuLg4BAQE4M6dO+jRowd8fHzw+PFj7N+/H4mJibCxsclwXfwtiAFOnjwpAOTkyZOGZKd0Xr16JSkpKe80bUpKirx69crg/Ddu3BAAsmjRoneaHxlu2bJlf4v94ujRowJAvv/+eyXt1atX4u3tLZUrV8502t27d+uk/fbbbwJAfvnlF1X6rVu3lO3Yz89PAgICDK7j/v37BYBMmDBBSXvz5o1YW1tLUFCQKu/169cFgAwYMCDbZWakTp064uDgoNqXDh48KImJiap8V65cEUtLS+nQoYMq/cGDB5KQkCAiIn379pXMms6GDRtKwYIFJSYmJtM6HT58WDQajUybNi3L+kdHR0tqaqqIiNja2krnzp315lu0aJEAkOPHj2dZptbWrVvFwsJCvv76a4Onffnypbi5uUlgYKCSduzYMQEgo0aNUuX94osvRKPRyNmzZ5W0Ro0aSY4cOVTr6N69e2JnZyetWrXKcv4RERECQH7//XdDFvGj+ru0A/q8T9uQNr+np6eMGzdOAEjfvn118hi6feqzdOlSASBhYWFK2sqVKwWALFiwQJW3devWYmVlJQ8fPlTSnj9/Lk+ePBERkVWrVgkAve2ayNt9093dXeLi4pS0X375RQDItm3bMq3n+7Y3abVr105q164tAQEB4ufnl2E5586dEzMzM2Xdr1q1SvV9XFycuLm5ScuWLbM8xzB02SdMmCAA5Pz586rpO3XqJADk6dOnIiLy+PFjnTwiIqGhoQJArl69qqSdOHFC4uPjVfliYmLExcVFqlatqkrPzjHp7+bv1haEh4frbDePHj0SJycnad++fabTJiYmysGDB3XSx44dKwAkMjJSSUtISJBcuXJJ48aNVXk7dOggtra2yjYjIjJ58mQBIMeOHVPSLl68KKampjJixIh3KlOfBw8eiJmZmfTs2TPTfCIiU6dOFQBy+PBhJW337t169zl9stqPM/Mhz42yalfep57JyclSqlQpKVy4sCrd19dXKlSooLT/Im/bJTs7O2nWrJmS9rHagN69e4uTk5Ncv379nZbrY8hOzPxJu6PfvHkTffr0QeHChWFtbY1cuXKhTZs2iI6OVuXTdrE8ePAgPv/8c6VbTcuWLXW6JJ44cQKBgYFwdnaGtbU1ChYsiC5duijfly1bVrkyo1WiRAloNBr8+eefSlp4eDg0Gg0uXryopN29exddunSBm5sbLC0t4efnh4ULF6rK0nZhWbFiBb7++mvkzZsXNjY2eP78uUHrRNvdLywsDH5+frC0tMTWrVsNnn9GLl26hKCgIOTMmRNWVlYoX7481q9fr1pvGo1GdfVaa9u2bdBoNNi4cSOAj/O7AcCWLVsQEBAAe3t7ODg4oEKFCli+fDkAYPTo0TA3N9c7XY8ePeDk5JRhF6l/u4iICJiamqquTlpZWaFr1644fPgwbt++neG0+rqUt2zZEgBU2z4A5M+fX+cqsaGWL18OjUaDzz77TEl78+YNXr16BTc3N1VeV1dXmJiYZHnHQ1+Z+ty/fx+7d+9Gq1atYGVlpaRXqVIFFhYWqry+vr7w8/PTWXY3NzeD7sBcunQJW7ZswdChQ5ErVy68fv0ab9680Zt3+vTpyJ07NwYOHAgR0dstVMvDwyPLLvDpxcfH63QLTe/NmzcYOHAgBg4cCG9vb4PLtrGxgYuLi6p72P79+wEAwcHBqrzBwcEQEYSHh6vy1q1bV3XXw93dHQEBAdi4cWOm6wIAPD09AYBdVLPwPm2D1pQpU5CamoohQ4ZkmOddtk+t5cuXw9bWFs2bN1fSMtuWXr9+rdyNBQB7e3vkzJkzy/k8f/4ckZGR6NixIxwcHJT0Tp06wc7ODitXrsyynu/T3mjt27cPERERmD59epZ1HjhwIFq2bInq1atnWKeHDx9iwoQJMDExwcuXL5GamqqTLzvLrj1XSd8uu7u7w8TERGkznZ2d4efnpzMvfcePcuXKwc7OTpUvV65cqF69uk5bm51jEmUuIiICbm5uqnNeFxcXtG3bFuvWrdPb80nLwsICVapU0UnX91vs3r0bT548QZ8+fVR5+/bti5cvX6q6JkdERKBChQqqu8tFihRBnTp1VNthdsrUx9XVFTY2NgYdI7I6nsTHx+t9pCq95OTkLI9d6X2oc6PstCvvUk9TU1Pkz59fZx09f/5ceVxGy8HBAXZ2dqp6fow2IDY2FosWLUKPHj1QsGBBJCUlZbpN/x190iD8+PHjOHToEIKDg/HTTz+hV69e2LlzJ2rWrKm3+1X//v1x9uxZjB49Gr1798aGDRtUz6c9evQI9evXR3R0NIYPH46ZM2eiQ4cOquc3qlevjgMHDiifnz59igsXLsDExEQ58ANvTwJcXFxQtGhRAG+f8axUqRJ27NiBfv36YcaMGfDx8UHXrl31bvTffvstNm3ahCFDhuC7777TOdnPzK5duzB48GC0a9cOM2bMgKenZ7bnn9aFCxdQqVIlXLx4EcOHD8cPP/wAW1tbtGjRAmvXrgUAlC9fHl5eXnpPRMLDw5EjRw4EBgYC+PC/G/A2YG/cuDGePn2KESNGYNKkSShdurRyAeJ///sfkpOTVSfyAJCUlISIiAi0bt1a7wnPf8Hp06dRqFAh1ckVAKW7kr7nbDOjfXbY2dn5g9TvzZs3WLlyJapUqaIc7AAoz6cuXrwYYWFhuHXrFv7880+EhIQgR44cqsDB0DL1WbFiBVJTU9GhQ4cs6yoiePjw4Tsv+44dOwC8PYGtU6cOrK2tYW1tjYYNG+pcpNq5cycqVKiAn376CS4uLspzUrNmzXqneadVq1YtODg4wMbGBs2aNcPVq1f15ps+fTqePXuGr7/+Ossynz9/jpiYGFy6dAkjR47E+fPnUadOHeV77cEv/QmCtjvYyZMnVXn1nUjY2NggKSlJ51k+EUFMTAwePHigdEk0NTXNcFwCeut924Zbt25h0qRJmDx58kfpBvz48WNERkaiRYsWqi6UiYmJMDU11Tlu6tuWDHXu3DkkJyejfPnyqnQLCwuULl0ap0+fznDaD9XepKSkoH///ujWrRtKlCiRaTmrVq3CoUOHMGXKlAzz7NixAw4ODrh79y4KFy4MOzs7ODg4oHfv3qqL0tlZdu0+1bVrV5w5cwa3b99GeHg45syZgwEDBmTZhT47x48HDx4YnM/QMun/nT59GmXLltW5eO7v74+EhIRMH6vKiL7fQrv9pN++ypUrBxMTE+X71NRU/Pnnnzr5tHWKiopCfHx8tspMKzY2Fo8fP8a5c+fQrVs3PH/+XHWM0kpOTkZMTAzu3buH7du34+uvv4a9vb3eLt6hoaFwcHCAlZUVatWqpfcZZuDtY1u2trawt7dH7ty5MWrUqAwvwGt9qHOj7LQr2anny5cvERMTg6ioKPz444/YsmWLzvqsWbMmtm7dipkzZyI6OhqXLl1C3759ERcXh4EDB2ZaF+D92oADBw7g9evX8PHxQVBQEGxsbGBtbY2qVatm+7z3k/nQt9azQ9u9M63Dhw8LAFmyZImSpu1iWbduXVWXh8GDB4upqanExsaKiMjatWuz7E6p7a72119/iYjI+vXrxdLSUpo1aybt2rVT8pUsWVJatmypfO7atau4u7vrdDENDg4WR0dHZVm0XVi8vLz0Ll9WAIiJiYlcuHBBlW7o/PV1R69Tp46UKFFCXr9+raSlpqZKlSpVxNfXV0kbMWKEmJubq7r5JCYmipOTk3Tp0kVJ+9C/W2xsrNjb20vFihV1uu+lna5y5cpSsWJF1fdr1qzJtPvhx/R36Xrm5+cntWvX1km/cOGCAJC5c+dmq7yuXbuKqampXLlyJdN5GtodfcOGDQJAZs+erfPd1atXpWzZsgJA+fPy8pJLly69c5nplStXTtzd3Q16JETbNTZ9N9i0MuuOPmDAAAEguXLlkgYNGkh4eLh8//33YmdnJ97e3vLy5UsREXn69KmSz87OTr7//nsJDw+XBg0aZPmbZdbdNzw8XEJCQuS3336TtWvXytdffy02Njbi7Owst27dUuW9f/++2Nvby7x580Qk667sgYGBym9kYWEhPXv2VO2vq1evFgCydOlS1XRz584VAFK8eHElrUSJElKoUCFJTk5W0hITE6VAgQICQCIiInTqmnYbyZcvn4SHh2e4jozp79IO6PO+bUNQUJBUqVJF+YwMuqOnlZ3u6DNnzhQAsnnzZlX6Dz/8IABk//79qvThw4cLAGnSpIne8jLrjq79bt++fTrftWnTRnLnzp1hPT9UezNr1ixxdHSUR48eiUjG3UMTEhKkQIECSvfcjLrGlixZUmxsbMTGxkb69+8vq1evlv79+wsACQ4Ofudl//bbb8Xa2lq1z3311VdZLntiYqIUK1ZMChYsKG/evMk07759+0Sj0eg8vqKPIcekv4O/W1tga2urOnfT2rRpkwCQrVu3ZrvMunXrioODgzx79kxJ69u3r5iamurN7+LiomyLjx8/FgAybtw4nXw///yzAFCO/YaWmVbhwoWV7dXOzk6+/vprvfuh9nxV+1e4cGGdNuPgwYPSunVrWbBggaxbt04mTpwouXLlEisrKzl16pQqb5cuXWTMmDGyevVqWbJkiTRr1kwASNu2bfXWX+tDnRsZ2q5kt549e/ZU5m1iYiJBQUE6jwE8fPhQ6tSpo6qns7OzHDp0KNNlF3n/NmDatGnKeZS/v7+EhYXJ7Nmzxc3NTXLkyCH37t3LstyPITsx89/mmfCkpCSJiYmRx48fi5OTkwwaNEj5TntyuHLlStU02gBM+6yh9kA1evRoSUpK0jufBw8eqE4+hg4dKtWqVZOZM2dKnjx5RETk2bNnYmJiojyrmZqaKk5OTtKjRw95/Pix6k9btwMHDqjqMHbs2HdaDwCkVq1aqrTszD99EP7kyRPRaDTy7bff6kyrfbbnzp07IiJy5swZASC//vqrMm9tI5HR83If4nfTniCsXbs203UzZ84cASDXrl1T0lq3bi358+dXBevG8nc54Hp5eUnDhg110qOiogSA/PjjjwaXFRYWJgBk2LBhmebLThDevn17MTc31/uM9IMHD+R///uf9O3bV9asWSOzZ8+WAgUKSJEiReTx48fvVGZaly9fFgAyePDgLOt58eJFcXBwkMqVK6uCw/QyC8K7dOkiAMTPz0918P/9999VzzPdunVLOWCtWLFCyZeSkiLFihWTfPnyZTj/7D5zu3//ftFoNDrPxnXq1ElKlSql1DOrIPz06dOyfft2WbBggdSoUUNCQ0NVz3i9evVKPDw8xM3NTVavXi3R0dESHh4uuXLlEjMzM/H29lbyavflzp07y4ULF+TcuXPSrl07MTc31xvIJyYmSmRkpGzYsEHGjRsnpUuXzvRCiTH9XdoBfd6nbdi1a5doNBrVs5sfOgivXLmyuLi46ARs9+/fF0dHR/H19ZXt27fLjRs3ZN68eeLg4CAApE6dOnrLyywIX7JkiQCQo0eP6nz3v//9TxwdHTOs54dob2JiYiRnzpwydepUJS2jk+VvvvlG3N3dlf0royDcy8tLAEivXr1U6dqTZ+0Ja3aXfenSpRIYGCjz58+X1atXS5cuXUSj0cjMmTMzXf7u3bsLANm0aVOm+R4+fCj58uUTLy8vnedE0zP0mPR38HdrC0xMTKR379466Tt37jTonCs97XgB6YPGLl26iLW1td5p8ufPL82bNxeR/z/uTZ48WSffggULBICcPn06W2WmdejQIdm6davMnj1bKlSoIF988YXeWCAuLk4iIyPljz/+kGHDhknZsmVlw4YNmSz5W1evXhVra2vVWCgZ0e4LaZ8zT+9DnBtlp13Jbj0vXrwokZGR8ttvv0njxo2lZcuW8uDBA1We+Ph46dOnj3Tu3FlWrVolCxculBIlSkju3LlV40Kk9yHaAO1YGc7OzqoytBdZDLlw+DH8Y4LwhIQEGTVqlOTLl080Go3qSkpoaKiST3tyeOTIEdX02gPTnj17RORtsNq6dWsBIA4ODtKsWTNZuHCh6g6wyNuBBLQDL1WqVElGjBghZ8+eFQASFRUlGzduVJ2MPnz4UFU3fX9r1qxR1SntHeHsAKBz5TI7808fhGsH5snsL+1VvSJFiki9evWUzx07dhRnZ2fVSdKH/t0mTZokADLdYUXe3j20tLRULnDExsaKpaWlDB8+3NDV+0H9XQ64H+pO+L59+8TKykoCAwOzvIthaBAeHx8vNjY2eu9cvXnzRooXLy79+vVTpV+5ckXMzc0zPOnKrMz0vvnmGwEgJ06cyDTf/fv3xcvLS/Lnzy93797NNG9mQbj2u/QX4ZKTk8XMzEzZP7R3BMzNzXUCfu3FsZs3b+qdR3aDcJG37VzaIFg7KNyuXbuUtOwM6paYmCh+fn7SunVrVfr58+elWLFiSntgaWkpM2bMEFdXVylVqpQq78iRI5WgG4CUL19evvrqK4NODg8ePCgADDpx+tj+Lu2APu/aNmj3zU6dOqnSP2QQrr0QkH7/19q7d6/SM0J7TNcOzqPvBFzk49wJ/1DtTa9evcTHx0c1GKS+k+UbN26ItbW1LFy4UEnLKAj38/MTALL3/9g777AojjeOf492x9GbVEFAVMCCqIioFHslFlDshVhiN7YYNbbYe8MWWxS7Ym9YosaC3ahgRUAUERUp0uH9/cFz+2PZu+MOjZpkPs9zj/Luu7MzuzPv7Oy888758zz5+fPnCQBt2bKFiNQr+44dO0hXV5devHjB0+vbty9JpVKFHyLmz59PAGjmzJlyj8vIzMykevXqkZGREd27d0+prjp90rfA17IFubm5lJSUxPsVFBR81pnwnTt3kkgkotDQUMGxb2UmvCTv378nS0tLGjNmjFI9ouJBnoaGBt25c6dM3ZCQENLR0VH6oZ6I6OHDh0rbw+d6N1LVrpQ3nyVp3ry5IAhbq1atBGV49+4dmZqaKpxh/1w2YMGCBYJxhwxHR0fBhOaXQp0x81fdomz48OHYtGkTRo0ahQYNGsDIyAgikQghISFyg4toamrKTYeIAIDbR/Pq1as4fPgwTp48if79+2PRokW4evUqFxSgUaNGOHPmDLKzs3Hz5k388ssvqF69OoyNjXHx4kXExMRAX18ftWvXBgAuLz179kSfPn3k5qFmzZq8vz9l/Vzpc8tz/dLnjh07llvTXZqS2y107doVs2bNwtu3b2FgYIBDhw6hW7du0NL6f1X53M9NVUxMTNCuXTuEh4fjl19+wd69e5Gbm4uePXuqlc6/DWtra8GWVgC4rYJKbz8hj7t37yIwMBDVq1fH3r17ec/7Uzhw4ACysrLkro+8cOEC7t+/j8WLF/PkLi4ucHV1lbsPdVlplmb79u2oWrUq6tSpo1AnLS0NrVu3xocPH3Dx4kWV7pciZOeWDqiiqakJMzMzpKamAgAXINHY2FjQPipUqAAASE1Nhb29fbnzUpKKFSvi0aNH3N/jx49H48aN4ejoyK1Vf/v2LYDiepOQkKD02jo6OggMDMTcuXORnZ3N2Sx3d3fcv38f0dHRSE1NhZubG3R1dTF69Gj4+fnx0pg1axbGjh2LBw8ewMjICDVq1OC2nqpSpYrS8vj4+MDa2hrh4eFo166d2vfjv0J5bcPvv/+OR48eYe3atYJYBhkZGYiLi+MCH5UXWdBNRe3Y19cXsbGxuHfvHj5+/IhatWrh1atXAMquH/KwtrYGALlbqCUlJSm8F5/D3jx58gTr1q3D0qVLuTIA4AI3xsXFwdDQEKampvjll19ga2sLf39/7t7L1kOmpKQgLi4O9vb20NDQgI2NDR48eCA3gBMAzt6oU/awsDDUrl0bdnZ2PL3AwEBs3rwZt2/fFmyztHnzZkyYMAGDBw9WGl8iLy8PnTp1wl9//YWTJ08q3SP97+qT/o1cvnwZAQEBPNnz589hbW2t8JkDqr0bAEBkZCR69+6Ntm3bYs2aNYLj1tbWKCwsxJs3b7i6BxQ/73fv3nHXMTU1hVgsVilPqqapCBMTEzRp0gTh4eFYuHChUt1OnTqhV69e2LlzJ287M3lUrFgReXl5+PjxoyDWRmk9oDjulDw+x7uROnalvPksSVBQEAYNGoTHjx+jatWqiI2NxYkTJ3jbjwLFz7lRo0Zy3+E+pw1Q9L4FFNtAmf37lvmqVm3v3r3o06cPFi1axMlycnI+OeKtt7c3vL29MWvWLGzfvh09evTAzp07uf1JGzdujE2bNmHnzp0oLCyEj48PNDQ00KhRI24Q7uPjw70cy4ImFRYWCjqfL8GnXN/JyQkAoK2trdK5Xbt2xfTp07Fv3z5YWloiPT1dEKH2cz83WVTm+/fvy91/sSS9e/fGd999h+vXryM8PBy1a9eWG6H1v4SHhwfOnTuH9PR0XqcQFRXFHVfGs2fP0KpVK1SoUAHHjh0TRLD8FMLDw6Gvr4/AwEDBseTkZACQG8E7Pz9fYTRSZWmWJCoqCk+fPsWMGTMU6uTk5KB9+/Z4/PgxTp8+DTc3N6VploXs5bv0wCcvLw9v376FhYUFAEBDQwMeHh64fv068vLyeAGoZJ2pTPdzEBsby0svISEB8fHxgr3EgeKXbSMjozLbc3Z2NogIGRkZvA+HIpGI1yaPHTuGoqIiufbHxMSEt1f16dOnYWdnh2rVqpVZppycHKSlpZWp91+mvLYhISEB+fn53H73Jfn999/x+++/IyIiAh06dCh33rZv3w5nZ2d4e3sr1NHU1OTlURb4sDz9cPXq1aGlpYUbN26gS5cunDwvLw937tzhyUryOezNy5cvUVRUhBEjRmDEiBGC446Ojhg5ciSWLl2KhIQEPH36lOu7SyKLFJ2amgpjY2PUqVMHkZGRXGA2GaVtiDplT05OhomJieDasuBNpe3ywYMH8f3336NTp05YtWqVwvtTVFSE3r1748yZM9i9e7fgo1xJ/s4+6d9IrVq1EBkZyZNZWVnBw8MDFy9eRFFRES84W1RUFKRSqUofs6KiotCxY0fUrVsXu3fvlvsxRNZGb9y4gTZt2nDyGzduoKioiDuuoaGBGjVqyA1uFhUVBScnJxgYGKiVpjKys7NV6iNyc3NRVFSkkm5sbCwkEkmZdTI2NhaA4n78c7wbqWNXypvPkmRnZwMAd5/UfYf73DZA0fsWUGwDVXmP+Op87ql1dTA1NaW+ffvyZDKXppLubIrcJGUuWjLXs/fv3wvWBsvc7lauXMnJnjx5QkBxMAYPDw9OPm/ePKpUqRLp6OgIXDP69u1LOjo6cl0nZMEQSuZJlb0F5QEF7n6qXl9eYDZ/f38yNTWVG6Sg5LkyatSoQQEBARQSEiI3wMznfm5paWlkYGBAXl5eSgOzERWvQTc3N6fOnTuThoYGLVq0SJD/L8W34oZ69epVAvh7Aefk5FDlypV5gezi4+MpJiaGd67MDdvGxoaeP3+u8jVVcUd/8+YNaWlpUa9eveQev3HjhqDOEBXbGw0NDcE6R1XSLIksSFrJGAIlKSgooMDAQNLS0ipzDWNJlLmj5+TkUIUKFcjJyYlXl9euXUsAPz7CkiVLCACtW7eOk2VnZ5OTkxO5ubkpvL4yd1957Vnmelhyb9GTJ09SREQE7ycL6LRw4UI6cuQIp1tyT2YZqampVLFiRapYsaLCfBIVL13x9PQka2trSk9PV6q7c+dO7voyMjMzuWB2JZHtE65KQJe/m2/FDsijvLYhJiZGUD9kgU/btGlDERERCoPeqOKOfuvWLbWf35s3b8je3p5q1qypMMhiWfuEt2rVSlAXf/vtNwJAx48fl3vNz2FvUlJS5N5Pd3d3sre3p4iICPrrr7+IqDiGQ2m9mTNncmsiIyIiuHWusvvYvXt33vW6detGWlpavKU1qpa9Xbt2pKOjQ48ePeKl2aFDB9LQ0OClef78eZJIJBQQECBY9leaIUOGEAAuEKQiytsnfQt8a7ZAZlNLvo/K4veUDERMRPT06VNB3Y2OjiYzMzNyd3dXui93VlYWmZqaCtySe/bsSVKplN69e8fJZMsPS74XPnz4kDQ1NWnChAnlSlNeH/X8+XMyMDCgxo0bc7LU1FS5a8Rl+4SXjDMiry+9c+cOaWtr8/a/TktLE9T9oqIi6tq1q8K68LnejdSxK+rkU979zMvLI09PT9LV1eXWX79584Y0NDTI39+f967+4sUL0tfXp1atWvHS+DtsQK1atcjQ0JC3Tv7kyZMEgObPn6/03L+Lf4w7ert27bB161YYGRnBzc0NV65cwenTp3l7x6rDli1bEBYWho4dO8LZ2RkZGRlYv349DA0NeV/SKleuDCsrKzx69AjDhw/n5L6+vpgwYQIACPblnDt3Ls6dO4f69etjwIABcHNzw/v373Hr1i2cPn1aJVeOT+FTrr9q1So0atQINWrUwIABA+Dk5ITk5GRcuXIFiYmJuHv3Lk+/a9eu+OWXX7j9ZEtvb/G5n5uhoSGWLFmC77//HvXq1UP37t1hYmKCu3fvIisri7d3uba2NkJCQrBy5UpoamqiW7du5brmv4n69esjODgYEydOxJs3b1C5cmVs2bIFcXFx2LBhA6fXu3dvnD9/nrcMoFWrVoiNjcX48ePx559/8rbvs7S0RPPmzbm/L1y4gAsXLgAodov8+PEjfv31VwDFbcfX15eXr127dqGgoEChG2edOnXQvHlzbNmyBenp6WjRogWSkpKwYsUK6OrqYtSoUYJzykpTRmFhIXbt2gVvb2+F+1+PGTMGhw4dQvv27fH+/Xts27aNd7zkMof4+Hhs3boVALiv+LKyOzg4oFevXgAAsViMBQsWoE+fPvD19UWvXr2QkJCAZcuWoXHjxrz9WgcNGoTffvsNQ4cOxePHj2Fvb4+tW7ciPj4ehw8f5uXl8OHDXDvNz8/HX3/9xV0/MDCQW47i4+OD2rVro27dujAyMsKtW7ewceNGVKxYkXP1BoAWLVoI7ods5tvPz4+3LUzr1q1hZ2eH+vXro0KFCkhISMCmTZvw6tUrwZaBXbp0gY2NDdzc3JCeno6NGzciNjYWR48e5WY4gOK6NGPGDLRo0QJmZma4evUqNm3ahFatWvG2NXny5AmaNWuGrl27olq1atDQ0MCNGzewbds2VKpUSaUtUP7LlNc2VKtWTeEsgqOjo2AGXNX6KSM8PByAYld0oLgeNmjQAJUrV8br16+xbt06ZGZm4siRI4I+SXatBw8eAAC2bt3K2bKS7tGzZs2Cj48P/Pz8MHDgQCQmJmLRokVo0aIFWrVqJcjD57I35ubmcr0GZDNUJY+V9AyRYWxsDACoV68eT7d27dro378/Nm7ciIKCAvj5+eGPP/7Anj17MHHiRJ7LrqplHzduHI4fP47GjRtj2LBhMDMzw5EjR3D8+HF8//33XJrx8fEIDAyESCRCUFAQ9uzZw8tzzZo1uee+dOlShIWFoUGDBpBKpQJb27FjR27rM3X6JIZygoKC4O3tjX79+iE6Ohrm5uYICwtDYWEhpk+fztOVbT0lWwKRkZGBli1bIjU1FePGjRPsy+3s7IwGDRoAKF5COXPmTAwdOhTBwcFo2bIlLl68iG3btmHWrFk8d+ghQ4Zg/fr1aNu2LcaOHQttbW0sXrwYlpaWGDNmDKenTpo1atRA06ZN4eHhARMTEzx58gQbNmxAfn4+5s6dy+n98ccfGDFiBIKCguDi4oK8vDxcvHgR+/fvR926dXl9fteuXaGrqwsfHx9UqFAB0dHRWLduHaRSKS/NW7duoVu3bujWrRsqV66M7OxsRERE4NKlSxg4cCA8PT0Fz+VzvRupY1fUyeegQYOQnp4OX19f2Nra4vXr1wgPD8fDhw+xaNEiblbawsIC/fv3x2+//YamTZuiU6dOyMjIQFhYGLKzszFx4kRenv4OG7BkyRI0b94cjRo1wqBBg5CWlobFixejSpUq+OGHH+Te32+Kzz2qV4fU1FTq168fmZubk76+PrVs2ZIePnxIDg4O5ZpRvXXrFnXr1o3s7e1JLBZThQoVqF27dnKDpAQHBxMA3jY3eXl5JJVKSUdHRzAjS1T8dWjo0KFUsWJF0tbWJisrK2ratClvJuvvmglX9fryZsKJioPg9O7dm6ysrEhbW5tsbW2pXbt2gq2AiP7vKQD8P+p6ST73c5Nx6NAh8vHxIV1dXTI0NCQvLy/asWOH4PrXrl0jANSiRQu59+lL8S199c7OzqaxY8eSlZUVicViqlevniDoip+fn2AGV/ac5f1Kz3RPnTpVoe7UqVMFefL29qYKFSooDWCSlZVFM2bMIDc3N9LV1SUjIyNq164dFyG1PGkSEZ04cYIA0PLlyxXqyO6Hol9JZHVWlftEVBzcqFatWiQWi8nS0pKGDRsmdyY4OTmZ+vTpQ6ampiQWi6l+/fpyg+X06dNH4fVLtvVJkyaRh4cHGRkZkba2Ntnb29MPP/wgiGgqD0XtdeXKldSoUSMyNzcnLS0tsrCwoPbt28sN8jRv3jyqVq0aSSQSMjExocDAQLnP8unTp9SiRQsyNzcnsVhM1apVozlz5vCCyxAVf+kfOHAgVatWjfT09EhHR4dcXFxo1KhRSqPnf0m+JTsgj/LaBnko6p9UrZ9ExTsA2Nrakqenp9JrjR49mpycnEgsFpOFhQV1796dnj17pjBfqrRjouKZZh8fH5JIJGRhYUFDhw5V6KXxOe2NPFQNoKTsvSIvL4+mTZtGDg4OpK2tTZUrV1YY9V7VskdFRVHr1q2594UqVarQrFmzeIGRlNnE0n2CsvoBgDfbpU6f9K3xLdqC9+/fU2hoKJmZmZFUKiU/Pz+5wTcdHBzIwcGB+1v2LqnoJ8/bZd26dVS1alXS0dEhZ2dnWrJkidyda168eEFBQUFkaGhI+vr61K5dO4WBeVVJc+rUqVS3bl0yMTEhLS0tsrGxoZCQEG4WWMbTp0+pd+/e5OTkRLq6uiSRSMjd3Z2mTp1KmZmZPN1ly5aRl5cXmZqakpaWFllbW1PPnj0F+YyNjaXg4GCqVKkSSSQSkkqlVKdOHVqzZo3CXXv+jnejksizK+rkc8eOHdSsWTOytLQkLS0tMjExoWbNmtHBgwcF18rPz6cVK1aQh4cH6evrk76+PgUEBPCCvhL9vTYgMjKSvL29SSKRkKmpKfXq1YuSkpLKvE9/F+qMmUVEZUfHunXrFurUqYObN2/K/arDYHxJ7t69Cw8PD/z+++/cDOTXIDw8HD179mTtgsH4D8PsAIPBAJgtYDAY6o2ZNZQeZTC+QdavXw99fX2eay+DwWAwGAwGg8Fg/BNgez58IWRbjChCV1cXRkZGXyg3/0wOHz7MrcsZNmwYt36EwWAwGAwGg8FgMP4psEH4F0K2T6ci+vTpg82bN3+ZzPxDGT58OJKTk9GmTRtBUBEGg8FgMBgMBoPB+CfABuFfiNJ7OJamZBRThnxkUTsZDAaDwWAwGAwG458KG4R/IZo1a/a1s8BgMBgMBoPBYDAYjK8MC8zGYDAYDAaDwWAwGAzGF4INwhkMBoPBYDAYDAaDwfhCsEE4g8FgMBgMBoPBYDAYXwg2CGcwGAwGg8FgMBgMBuMLoVZgtmPHjiEmJubvyguD8Y/i0qVLAFi7YDD+yzA7wGAwAGYLGAwG8Pz5c5V1RUREZSlduXIFjRs3RmFh4SdljMH4t6GhoYGioqKvnQ0Gg/EVYXaAwWAAzBYwGAxAU1MTFy9eRIMGDZTqqTQTLhaLUVhYiG3btsHV1fWzZJDB+Kdz7NgxTJkyhbULBuM/DLMDDAYDYLaAwWAAMTEx6NmzJ8RicZm6armju7q6wtPTs9wZYzD+TcjczVi7YDD+uzA7wGAwAGYLGAyGerDAbAwGg8FgMBgMBoPBYHwh2CCcwWAwGAwGg8FgMBiMLwQbhDMYDAaDwWAwGAwGg/GFYINwBoPBYDAYDAaDwWAwvhBsEM5gMBgMBoPBYDAYDMYXgg3CGSqxefNmiEQixMXFcTJ/f3/4+/t/tTwxGAwGg8FgMBgMxj+N/+QgfPv27Vi6dOnffp3o6GhMmzaNN3BlMP4OcnNzMWHCBNjY2EBXVxf169dHZGRkmeft378fXbt2hZOTE6RSKapWrYoxY8bgw4cPAt3MzEyMGjUKdnZ2EIvFcHV1xerVq+Wm++HDBwwcOBAWFhbQ09NDQEAAbt26JdDLycnBnDlz4ObmBqlUCltbWwQHB+PBgwc8vaSkJPz0008ICAiAgYEBRCIR/vjjD4XlysvLw+zZs1GtWjVIJBJYWlqibdu2SExM5HQePHiA4OBgruzm5ubw9fXF4cOHeWkVFRVh8+bNCAwMRMWKFaGnp4fq1avj119/RU5OjtzrJycnY9CgQbC1tYVEIkGlSpUQGhrK05k2bRpEIpHgJ5FIeHrZ2dkIDQ1F9erVYWRkBH19fdSqVQvLli1Dfn5+ue89AGRkZGD8+PFwdHSEWCyGra0tgoKCkJWVpfa9z8rKwqpVq9CiRQtYW1vDwMAAtWvXxurVq1FYWMjTffjwIcaPHw8PDw8YGBjA2toabdu2xY0bNwTpRkREoGXLlrCxsYFYLIadnR2CgoJw//59uWVi8CmvbVC1fspQpc6ram/evXuHBQsWwNfXFxYWFjA2Noa3tzd27dpVZr5nzZoFkUiE6tWrK9X78OEDKlSoAJFIhL179wqOP3nyBCEhIbCzs4NUKkW1atUwY8YMXtsAgPz8fEyfPh1OTk4Qi8VwcnLCr7/+ioKCAp5e37595d5P2e/ly5dqp/nHH38oTO/q1avlymdmZiamTp2KVq1awdTUFCKRCJs3b1Z4H3fv3g1vb28YGxvDzMwMfn5+OHr0KE/n1atX6NmzJ6pWrQoDAwMYGxvDy8sLW7ZsARHJTXfXrl1o0KAB9PT0YGxsDB8fH5w9e1ZhPhiqo04fURJ1+kLZxI2iX3h4OKf76NEjjB49Gj4+PpBIJIIJH0U8e/aM0y/dd5w5cwb9+/dHlSpVIJVK4eTkhO+//x5JSUlyy7VmzRp4eHhAX18flpaWaN26NS5fvszTU7W9xcXFKS37gAEDOF1125uM/Px8uLm5QSQSYeHChbxj6vSvAHD69GkEBATA3Nyca5tbt24V6Ckqz9y5c3l6lSpVUqjr4uLC0129ejWCg4Nhb28PkUiEvn37lll2ABgwYABEIhHatWunkv63ilr7hP9b2L59O+7fv49Ro0b9rdeJjo7G9OnT4e/vj0qVKv2t12L8t+nbty/27t2LUaNGwcXFBZs3b0abNm1w7tw5NGrUSOF5AwcOhI2NDXr27Al7e3vcu3cPK1euxLFjx3Dr1i3o6uoCAAoLC9GyZUvcuHEDQ4cOhYuLC06ePIkhQ4YgNTUVP//8M5dmUVER2rZti7t372LcuHEwNzdHWFgY/P39cfPmTZ4R7tGjBw4dOoQBAwbA09MTr169wqpVq9CgQQPcu3cPDg4OAIo76Xnz5sHFxQU1atTAlStXFJYpPz8fbdu2xeXLlzFgwADUrFkTqampiIqKQlpaGuzs7AAA8fHxyMjIQJ8+fWBjY4OsrCzs27cPgYGBWLt2LQYOHAigeHDZr18/eHt7Y/DgwahQoQKuXLmCqVOn4syZMzh79ixEIhF3/RcvXqBhw4YAgMGDB8PW1havXr3CtWvX5OZ39erV0NfX5/7W1NTkHc/OzsaDBw/Qpk0bVKpUCRoaGrh8+TJGjx6NqKgobN++vVz3Pi0tDX5+fkhMTMTAgQNRuXJlpKSk4OLFi8jNzYVUKlXr3sfGxmL48OFo2rQpfvzxRxgaGnJ15OrVq9iyZQun+9tvv2HDhg3o3LkzhgwZgrS0NKxduxbe3t44ceIEmjVrxuneu3cPJiYmGDlyJMzNzfH69Wts3LgRXl5euHLlCmrVqqWwLjDKbxtklFU/AdXrvKr25sqVK5g0aRLatGmDyZMnQ0tLC/v27UNISAjXr8ojMTERs2fPhp6eXpnl+uWXXwQD6pLl8fLygpGREYYNGwZTU1Ouzd+8eRMHDx7kdHv27Ik9e/agf//+qFu3Lq5evYopU6YgISEB69at4/QGDRrEq9cAQEQYPHgwKlWqBFtbW7XTlDFixAjUq1ePJ6tcuTLvb1XTfPv2LWbMmAF7e3vUqlVL6cfOFStWYMSIEWjbti3mzp2LnJwcbN68Ge3atcO+ffvQqVMnLs3ExEQEBQXB3t4e+fn5iIyMRN++ffHo0SPMnj2bl+60adMwY8YMBAUFoW/fvsjPz8f9+/d5HyoY5UOdPqI06vSFvr6+cgdyS5Yswd27d9G0aVNOduXKFSxfvhxubm5wdXXFnTt3VCrL6NGjoaWlhdzcXMGxCRMm4P379wgODoaLiwtiY2OxcuVKHDlyBHfu3IGVlRWnO27cOCxevBg9e/bEkCFD8OHDB6xduxZ+fn64dOkSvLy8eGmX1d4sLCzklv3EiRMIDw9HixYtOJk67a0kK1asQEJCgtxj6vSvhw4dQocOHdCgQQPuw+vu3bvRu3dvvH37FqNHj+al3bx5c/Tu3Zsnq127Nu/vpUuXIjMzkyeLj4/H5MmTeWUHgHnz5iEjIwNeXl5yP5DI48aNG9i8ebPCD8L/KEgFbt68SQDo5s2bqqh/87Rt25YcHBz+9uvs2bOHANC5c+c+S3qZmZmfJZ3ysGnTJgJAz58/52R+fn7k5+f31fL0tdm2bds30S6ioqIIAC1YsICTZWdnk7OzMzVo0EDpufLq5pYtWwgArV+/npPt3r2bANCGDRt4up07dyaJRELJycmcbNeuXQSA9uzZw8nevHlDxsbG1K1bN06WmJhIAGjs2LG8NM+ePUsAaPHixZwsPT2d3r17R0Rlt6t58+aRtrY2RUVFKS27PAoKCqhWrVpUtWpVTpabm0uXLl0S6E6fPp0AUGRkJE/eunVrcnR0pLdv3yq91tSpUwkApaSkqJ1PIqJhw4YRAEpKSuJkqt57IqIffviBjI2NKTY2Vul1VL33KSkpdP/+fYG8X79+BICePHnCyW7cuEEZGRk8vbdv35KFhQU1bNhQaX6IiF6/fk1aWlo0aNCgMnX/br4VOyCPT7EN6tRPVeu8qvYmNjaW4uLieHpFRUXUpEkTEovFCvvCrl27UpMmTcjPz4/c3d0V5uPevXukpaVFM2bMELQXIqJZs2YRAEF97t27NwGg9+/fExHRtWvXCABNmTKFpzdmzBgSiUR09+5dhXkgIrp48SIBoFmzZnEyddI8d+6c3PyXRp00c3JyOJty/fp1AkCbNm2Sm66LiwvVq1ePioqKOFlaWhrp6+tTYGCg0jwREbVr14709PSooKCAk125coVEIhHP/v9T+JZtgQx1+ojSqNsXliYrK4sMDAyoefPmPPm7d+8oPT2diIgWLFggeNeUx4kTJ0hHR4cmT55MAOj69eu84+fPn6fCwkKBDABNmjSJk+Xn55Ouri4FBQXxdGNjYwkAjRgxgpOp2t4U0bRpUzI0NKTs7GxOpk57k5GcnExGRkac/Spp34nU61+bN29ONjY2lJOTw8ny8/PJ2dmZatasydMFQEOHDlW5vCWZOXMmARDUn7i4OM5+6OnpUZ8+fZSmU1RURA0aNKD+/fuTg4MDtW3btlz5+TtRZ8z8Tbijv3z5EqGhoZy7oaOjI3744Qfk5eUBKJ5hCQ4OhqmpKaRSKby9vQXuTjI3kd27d2PWrFmws7ODRCJB06ZN8fTpU07P398fR48eRXx8POceUXKWOjc3F1OnTkXlypUhFotRsWJFjB8/nvelrU+fPpBIJIiJieHloWXLljAxMcGrV6+wefNmBAcHAwACAgK4a6n6lUv2RSo6Ohrdu3eHiYkJb9Zi27ZtqFOnDnR1dWFqaoqQkBC8ePFCkE5UVBTatGkDExMT6OnpoWbNmli2bBl3/K+//kLfvn3h5OQEiUQCKysr9O/fH+/evVMpn4yvz969e6GpqcnN3AKARCJBaGgorly5IrdeyJC3pr9jx44AwKvfFy9eBACEhITwdENCQpCTk8ObGdq7dy8sLS25WRCg+Mtwly5dcPDgQa4tZWRkAAAsLS15aVpbWwMANysGAAYGBjA1NVVYDhlFRUVYtmwZOnbsCC8vLxQUFCic7ZKHpqYmKlasyHOP1dHRgY+Pj0BX3n16+PAhjh8/jnHjxsHMzAw5OTlyXcZLQkRIT09X6JapCJndKplXVe/9hw8fsGnTJgwcOBCOjo7Iy8uTO5sAqH7vzc3N4e7uLpDLu0916tThza4CgJmZGRo3biywq/KoUKECpFKp3GUTjP/zKbZBRln1U506r6q9cXR05LxgZIhEInTo0AG5ubmIjY0VpHPhwgXs3btXpaVmI0eORMeOHdG4cWO5x9PT0wHIt00aGhrQ0dEBoNwuElGZ7vPbt2+HSCRC9+7dOVl508zIyBC4lpcnTbFYzJslVEZ6ejrn0i/D0NAQ+vr6PPutiEqVKiErK4t71wOKZ9GsrKwwcuRIEJFgRo3xaajaR8hDnb5QHocPH0ZGRgZ69OjBk5uamsLAwEDlMuTn52PkyJEYOXIknJ2d5er4+vpCQ0NDIDM1NeXlMz8/H9nZ2YK2XqFCBWhoaCisx8ramzySkpJw7tw5dOrUiTeDq057k/HTTz+hatWq6Nmzp9zj6vSv6enpMDExgVgs5mRaWlowNzdXWPbs7GyFS/EUsX37djg6Ogrqj4ODA89+lMXWrVtx//59zJo1S63rf6t89UH4q1ev4OXlhZ07d6Jr165Yvnw5evXqhfPnzyMrKwvJycnw8fHh3BpnzZqFnJwcBAYGIiIiQpDe3LlzERERgbFjx2LixIm4evUqr8FPmjQJHh4eMDc3x9atW7F161au0y4qKkJgYCAWLlyI9u3bY8WKFejQoQOWLFmCrl27cmksW7YMFhYW6NOnD7fWce3atTh16hRWrFgBGxsb+Pr6YsSIEQCAn3/+mbuWq6urWvcnODgYWVlZmD17NreOZNasWejduzdcXFywePFijBo1CmfOnIGvry/vpTQyMhK+vr6Ijo7GyJEjsWjRIgQEBODIkSM8ndjYWPTr1w8rVqxASEgIdu7ciTZt2qg9KGB8HW7fvo0qVarA0NCQJ5e5UKnq2iXj9evXAIoHVTJyc3OhqanJvXzKkLks37x5k5cfT09PQQfo5eWFrKwsPH78GADg7OwMOzs7LFq0CIcPH0ZiYiKuXbuGwYMHw9HRUfDCqArR0dF49eoVatasiYEDB0JPT4/7+HTu3Dm553z8+BFv377Fs2fPsGTJEhw/fpznKqcIeffp9OnTAIpf3ps2bQpdXV3o6uqidevWCte4OTk5wcjICAYGBujZsyeSk5Pl6uXl5eHt27d48eIFIiIisHDhQjg4OPDc4FS993/++SdycnJQuXJlBAUFQSqVQldXFw0bNlS7vpSFvPukTFeR3ocPH5CSkoJ79+7h+++/R3p6ukrP6b/M57ANZdXP8tT5kqhbP+TpFhYWYvjw4fj+++9Ro0YNpWns2bMHly9fxvz58xXqyD4WhIaG4s6dO3jx4gV27dqF1atXY8SIEZy7u2zAUvplVZ5dLE1+fj52794NHx8fwUSAumn269cPhoaGkEgkCAgIEKz9/JR8KsPf3x8nTpzAihUrEBcXh4cPH2Lo0KFIS0vDyJEjBfrZ2dl4+/Yt4uLisGXLFmzatAkNGjTg5evMmTOoV68eli9fDgsLC25N68qVK8uVRwYfVfsIdVC1DYeHh0NXV5f3AaA8LF26FKmpqZg8ebJa52VmZiIzM5OXT1mcjM2bNyM8PBwJCQnc5JSJiQnvA6aMstqbPHbu3ImioiLBBwh1uXbtGrZs2YKlS5eqNXgF5Pev/v7+ePDgAaZMmYKnT5/i2bNnmDlzJm7cuIHx48cL0ti8eTP09PSgq6sLNzc33nI4Rdy+fRsxMTG8j43lISMjAxMmTMDPP/+s9oeLb5bPPbWuLr179yYNDQ2BKwlRsdvBqFGjCABdvHiRk2dkZJCjoyNVqlSJczeRuYm4urpSbm4up7ts2TICQPfu3eNkitzRt27dShoaGrxrERGtWbNG4EZx8uRJAkC//vorxcbGkr6+PnXo0IF33qe4o8tcAUu7B8XFxZGmpibPfY3o/+51MnlBQQE5OjqSg4MDpaam8nRLuo5lZWUJrr1jxw4CQBcuXOBkzB1dyLfieubu7k5NmjQRyB88eEAAaM2aNWqlFxoaSpqamvT48WNOtmjRIkE7JCL66aefCAC1a9eOk+np6VH//v0F6R49epQA0IkTJzhZVFQUOTs7EwDuV6dOHZ6LdWmUtav9+/cTADIzMyMXFxfatGkTbdq0iVxcXEhHR0eue+igQYO4a2toaFBQUBDnbqqMZs2akaGhIa99jRgxgrt+q1ataNeuXbRgwQLS19cnZ2dn+vjxI6e7dOlSGjZsGIWHh9PevXtp5MiRpKWlRS4uLpSWlia4nqxdyn5169alv/76i6ej6r1fvHgxl08vLy8KDw+nsLAwsrS0JBMTE3r16pXcMqtr03Jzc8nNzY0cHR0pPz9fqe6FCxdIJBIJXGZlVK1alSu7vr4+TZ48WeBu+DX4VuyAPD7FNqhaP9Wp8/KQZ2/k8e7dO6pQoQI1btxYcGzlypVkZGREb968ISJS6I6elZVF9vb2NHHiRCJS7l46c+ZM0tXV5bW5km6sRET79u0jALR161aeXPbOUL16dYXlOXz4MAGgsLCwcqd56dIl6ty5M23YsIEOHjxIc+bMITMzM5JIJHTr1q1PzmdZ7rHJycnUtGlT3j0yNzeny5cvy9WfM2cOT7dp06aUkJDAHX///j1Xl/T19WnBggW0a9cuatWqVbn6si/Nt2wLZKjTP6uKvL6wNO/evSMdHR3q0qWL0rTKckdPSkoiAwMDWrt2LRH9/91U3hiiNDKX6DNnzvDkT548IU9PT17ddHJyoocPH/L0VG1v8qhTpw5ZW1sr7bPKam9FRUXk5eXFjQueP38u1x1dHor618zMTOrSpQuJRCKu7FKplA4cOCBIw8fHh5YuXUoHDx6k1atXU/Xq1eXasNKMGTOGAFB0dLRSvbLc0ceOHUuOjo6c6/y/wR39qw7CCwsLydDQkL777juFOlWqVCEvLy+BXGbMZYNrWWc6f/58nt6tW7cIAB08eJCTKRqEBwYGkru7O6WkpPB+jx8/5gbcJRk0aBDp6OiQh4cHmZub89bFEn2eQfj58+d58sWLF5NIJKInT54I8unq6krNmjUjov835iVLlqh8zezsbEpJSeEa9tKlS7ljbBAu5FvpcJ2cnKh169YC+bNnz9SuA+Hh4QSAxo8fz5MnJSWRkZERubi40KlTp+j58+e0du1aMjQ05F6mZGhoaNAPP/wgSPvMmTMEgCIiIjjZ48ePqXPnzvTTTz/RgQMHaOHChWRmZkaNGjXirZsqibJ29fvvvxMA0tHR4b3cxcfHk7a2NvXo0UNwTkxMDEVGRtKWLVuobdu21LFjR3r9+rXS+yRbM1q68+nfvz8BIHd3d15nKxtAl1z3Kg/Z/Z8zZ47g2OvXrykyMpL27NlDgwcPpgYNGtCVK1d4Oqree9laMnNzc97asStXrsgdbMhQ16YNGDCAANDRo0eV6iUnJ5OdnR05OTkJ1rLJuHz5Mp04cYLCwsKoXr16NGbMGMrLy1MpH38n34odkMfntA1E8uvnp9R5RfamNIWFhdSqVSvS0dGhO3fu8I69ffuWTE1NaeHChZxM0SD8l19+IWtra66OKRuEb926lVq2bEnr1q2jffv2Uf/+/UkkEtGKFSs4nezsbHJwcCBLS0vat28fxcXF0a5du8jMzIy0tLTI2dlZYZm6detG2tragnX0n5ImUfGAQldXl1q2bPnJaZY1KMjIyKAhQ4ZQnz59aM+ePbRx40aqUaMGWVlZ8WJAyIiLi6PIyEjavn07de/enZo2bUqPHj3ijickJHADgZ07d3LywsJCcnNzIzs7O6Vl/9p8y7ZAhjr9syoo6gtLs3btWsG7uDzKGoT37t2batWqxdkaVQfh58+fJy0tLbkfAV6/fk29evWioUOH0v79+yksLIzs7e2pWrVqZcbEkNfeSvPo0SMCQKNHj1aaVlntbePGjaSrq8u926g6CFfWv+bn59PkyZMpODiYduzYQdu2bSNfX1/S19cXvF+UJjc3l6pXr07GxsZyJ/SIituura0t1a5dW2laRMoH4Y8ePSJtbW3au3cvJ2OD8E/k9evXSl/4iIjEYjH16tVLID9w4AABoCNHjhDR/zvTkoab6P+VdPPmzZxM0SDc1dWV9yWs9K9kgAai4g7IysqKAND27dsF6X2OQXjJgQRRcTAlZXmUBVLYuXMnAWUHynj37h2NGDGCKlSoIEhr+vTpnB4bhAv5VjrczzUTfuHCBZJIJNSyZUu5s5bnz58ne3t7rn4YGhpyQZVKfkhT9Uv7hw8fyNLSkvfyTET0xx9/KO3UlbUr2bGAgADBsYCAAHJ0dFR2C4ioOFBJ6WBDJdm5cyeJRCIKDQ0VHBs6dKig7RAVe6ZoaWlRv379yry+lZUV76OGImbNmkX6+vo8rwFV773sRUdefhwdHeXePyL1bNr8+fMJAM2cOVOpXmZmJtWrV4+MjIx4HkvKeP/+PVlaWtKYMWNU0v87+VbsgDw+t5cMkbB+lrfOl2VvSjJkyBACQL///rvg2ODBg6ly5co8Dzh5g/Dnz5+Trq4ubdy4kZMpGoTv2LGDdHV16cWLFzx53759SSqV8gbO9+/fJzc3N84uisViWrZsGVWoUIFq1aoltzwZGRkklUp5HkQlKU+aJQkJCSEdHR1ewLPypFnWoKBVq1aCMrx7945MTU3LnPEkKv5IV7FiRe4FPiUlhQCQtrY2L+9E/w/+FR8fX2a6X4tvyRbk5uZSUlIS71dQUPBZZ8KV9YWl8fX1JVNT0zI/nCobhMuC9p09e5aTqTIIj4mJIVNTU/Lw8OACwMnIz8+n6tWr07Bhw3jyx48fk7a2dpkfCInkt7eS/PLLLwSAbty4oTQdZe0tLS2NLC0t6ZdffuFkqgzCy+pfBw0axPuoQUSUl5dHLi4ucidASyPzpintJSlDFmi39HuePJQNwlu1aiUYb/wbBuH/ui3K5G2fAkCl9c1FRUWoUaMGFi9eLPd4xYoVeX/fvn0bb968AVC8jU63bt3UzG3ZlF7DVVRUBJFIhOPHj8sta+lgDGXRpUsXXL58GePGjeP2SCwqKkKrVq1QVFT0SXlnfBmsra3lbt0i2+7BxsamzDTu3r2LwMBAVK9eHXv37oWWltA0+Pr6IjY2Fvfu3cPHjx9Rq1YtvHr1CgBQpUoVXn7kbTVROj/79u1DcnIyAgMDeXp+fn4wNDTEpUuX8MMPP5SZ95LI0i4dZAUoDrRy+/btMtMICgrCoEGD8PjxY1StWpV3LDIyEr1790bbtm2xZs0ala+vqakJMzMzpKamlnn9ihUr4v379yrlc9KkSTh48CAGDRoEQPV7X9Z9UiWfyti8eTMmTJiAwYMHK123l5eXh06dOuGvv/7CyZMny9zbWYaJiQmaNGmC8PBwwR6pjP/zOWxDaUrXz/LUeVXsjYzp06cjLCwMc+fORa9evXjHnjx5gnXr1mHp0qWcLQLABYeLi4uDoaEhTE1N8csvv8DW1hb+/v7cWnXZWtaUlBTExcXB3t4eGhoaCAsLQ+3atbntDGUEBgZi8+bNuH37NrfNj7u7O+7fv4/o6GikpqbCzc0Nurq6GD16NPz8/OSW6cCBA8jKylK4PrQ8aZakYsWKyMvLw8ePH7l4AJ+aZmliY2Nx4sQJwZZppqamaNSoES5dulRmGkFBQVi/fj0uXLiAli1bwtTUFBKJBMbGxoL3mwoVKgAAUlNTYW9vr3Z+/2tcvnwZAQEBPNnz589V7iPKoqy+sCQJCQm4ePEiBg4cCG1tbRVLIGT8+PFo3LgxHB0duTb89u1bLv8JCQmCuvHixQu0aNECRkZGOHbsmCAA3IULF3D//n3Be7+LiwtcXV1Vqsfy2ltJtm/fjqpVq6JOnTrqFJfHwoULkZeXh65du3JlT0xMBFDcJuLi4mBjY8OL21NW/5qXl4cNGzZg/PjxvBgB2traaN26NVauXIm8vDxBLKDSZQeg8J0lPDwcGhoanzQ+Onv2LE6cOIH9+/fz4owUFBQgOzsbcXFxMDU1lXvvv3W+6iDcwsIChoaGuH//vkIdBwcHPHr0SCB/+PAhd1xdFAUzcHZ25vYvLCvgwcePH9GvXz+4ubnBx8cH8+fPR8eOHXl7B6obNEEVnJ2dQURwdHTkDXzk6QHA/fv3BXuTykhNTcWZM2cwffp0/PLLL5z8yZMnnzfTjL8VDw8PnDt3Dunp6TwjFBUVxR1XxrNnz9CqVStUqFABx44dU/ohR1NTk5eeLChTyTrm4eGBixcvoqioiGfYo6KiIJVKuXorC/AkC24og4hQWFioVuRRGTVq1IC2trbcgcerV69gYWFRZhrZ2dkAivfRLklUVBQ6duyIunXrYvfu3XIHDrJOtvT1ZUHVyro+ESEuLk6w76aq+VT13ivKJ1B8n6pVq1bm9RVx8OBBfP/99+jUqRNWrVqlUK+oqAi9e/fGmTNnsHv3brUHAtnZ2YJnxODzqbahNPLqp7p1Xh17s2rVKkybNg2jRo3ChAkTBMdfvnyJoqIijBgxgguEWhJHR0eMHDkSS5cuRUJCAp4+fQonJyeB3pAhQwAU94nGxsZITk6GiYmJQE8W9b20bRKJRLydAY4dO4aioiKFfW94eDj09fUFHyA/Jc2SxMbGQiKRCO7tp6RZGkX2Gyi+T6rY79I2TENDAx4eHrh+/brg5V/2kUUVG84AatWqhcjISJ7MyspK5T5CGar0hSXZsWMHiOiTg5IlJCQgPj4ejo6OgmOBgYEwMjLiBSd+9+4dWrRogdzcXJw5c4bbeaUkn6MeK2pvQPG9evr0KWbMmFFmOspISEhAamqq3B1IZs+ejdmzZ+P27ducTVelf3337h0KCgoUlr2oqEjusZLIdqqQ1y5zc3Oxb98++Pv7l+uDrwzZfujyAvq9fPkSjo6OWLJkCUaNGlXua3w1PvfUurqoGpitZKCPzMxMcnJykhuYrbRbmcxdo6R7R9euXcnY2Fhwvc2bNxMALuBDSbKysnh7kw4dOpS0tbXp5s2blJmZSc7OzuTq6srba+/48ePlWmNDpHiP1qdPn5KmpiZ1795d4C5bVFTEuckVFhaWGZgtLS2NANC0adN4x2Wuf1OnTuVkzB1dyLfienb16lWBS1JOTg5VrlyZ6tevz8ni4+MpJiaGd25SUhI5OTmRjY1NmftylubNmzdkb29PNWvW5LkyyZZClGyLKSkpZGxsTF27duVke/fuFdQzov8vNZk7d67c65blEv3dd9+RpqYmr6zR0dGkqalJQ4YM4WSlYzgQFbtheXp6kq6uLm/tVHR0NJmZmZG7u7vSoG05OTlUoUIFcnJy4q1pl62H2717NyeTBZEqyapVqwjg75GekpIi1zVetk94ySAzqt57IqJatWqRoaEhz8bIAk6Wjq0ho6x7f/78eZJIJBQQEMCzhfKQ2Rl59rYk8p7T8+fPycDAQG6Qri/Nt2IH5PEptkHV+qlOnVfH3uzcuZM0NDSoR48eCpeGpKSkUEREhODn7u5O9vb2FBERwQUvvHjxokBPFqhp/PjxFBERwbnKtmvXjnR0dHjrlYmIOnToQBoaGvTy5UuF+c7KyiJPT0+ytrYWuL4SFd9XLS0tucvs1E1T3jO6c+cOaWtrl7lPd1n5JFLuHvvmzRvS0NAgf39/3vN58eIF6evrU6tWrZTmk4ioffv2XIwbGUuWLCEAtG7dOk6WnZ1NTk5O5ObmprRMX5tv2RbIUKePePr0KT19+pQnU7UvLEnNmjXJ3t5eYTsuiTJ39JMnTwra8PDhwzl3Z9nyVKLicYKXlxcZGBgodQO/ceMGARC4Qd+8eZM0NDRo8ODBnKw87U0WuLL0fZSHsvZ28+ZNQdllNrZv374UERFBHz584PRV6V8LCgrI2NiYqlSpwlvOk5GRQXZ2dlStWjWlZU9PTydnZ2cyNzfnnS9DFih3w4YNZZadSLE7enx8vFw7b2FhQXXr1qWIiAiV7u+X4h/ljj579mycOnUKfn5+GDhwIFxdXZGUlIQ9e/bgzz//xE8//YQdO3agdevWGDFiBExNTbFlyxY8f/4c+/btE2yzoAp16tTBrl278OOPP6JevXrQ19dH+/bt0atXL+zevRuDBw/GuXPn0LBhQxQWFuLhw4fYvXs3Tp48ibp16+Ls2bMICwvD1KlT4enpCQDYtGkT/P39MWXKFG7rEw8PD2hqamLevHlIS0uDWCxGkyZNOLeq8uDs7Ixff/0VEydORFxcHDp06AADAwM8f/4cERERGDhwIMaOHQsNDQ2sXr0a7du3h4eHB/r16wdra2s8fPgQDx48wMmTJ2FoaAhfX1/Mnz8f+fn5sLW1xalTp/D8+fNy54/x5alfvz6Cg4MxceJEvHnzBpUrV8aWLVsQFxeHDRs2cHq9e/fG+fPneUszWrVqhdjYWIwfPx5//vkn/vzzT+6YpaUlmjdvzv3t5+eHBg0aoHLlynj9+jXWrVuHzMxMHDlyhNcOg4KC4O3tjX79+iE6Ohrm5uYICwtDYWEhpk+fzum1b98e7u7umDFjBuLj4+Ht7Y2nT59i5cqVsLa2RmhoKK+cv/76KwDgwYMHAIr3i5Tlt6TL8+zZs3HmzBk0adKEmx1bvnw5TE1N8fPPP3N6gwYNQnp6Onx9fWFra4vXr18jPDwcDx8+xKJFi7iv2hkZGWjZsiVSU1Mxbtw4HD16lJcvZ2dnNGjQAEDxnp8LFixAnz594Ovri169eiEhIQHLli1D48aNeV9yHRwc0LVrV9SoUQMSiQR//vkndu7cCQ8PD869HAC2bduGNWvWoEOHDnByckJGRgZOnjyJyMhItG/fHk2aNFH73gPAkiVL0Lx5czRq1AiDBg1CWloaFi9ejCpVqgiWAahy7+Pj4xEYGAiRSISgoCDs2bOHl0bNmjVRs2ZNAMVbzISFhaFBgwaQSqXYtm0bT7djx47cNlA1atRA06ZN4eHhARMTEzx58gQbNmxAfn4+5s6dC4ZiPsU2qFo/1anzqtqba9euoXfv3jAzM0PTpk0RHh7OK5ePjw+cnJxgbm6ODh06CMot23a05LFGjRoJ9IyNjQEA9erV4+mOGzcOx48fR+PGjTFs2DCYmZnhyJEjOH78OL7//nverE6XLl1gY2MDNzc3pKenY+PGjYiNjcXRo0fl7n28a9cuFBQUKJ0VVDXNrl27QldXFz4+PqhQoQKio6Oxbt06SKVSQdtQJ58rV67Ehw8fuNln2RaSADB8+HAYGRnBwsIC/fv3x2+//YamTZuiU6dOyMjIQFhYGLKzszFx4kQuvVmzZuHSpUto1aoV7O3t8f79e+zbtw/Xr1/H8OHDedssDho0CL/99huGDh2Kx48fw97eHlu3bkV8fDwOHz6s8J4xVEOdPkK2BaTMBVidvlDG/fv38ddff+Gnn35S6B2alpaGFStWAADn/r1y5UoYGxvD2NgYw4YNAwC0aNFCcK5s5tvPzw9169bl5D169MC1a9fQv39/xMTE8PbH1tfX59p7nTp10Lx5c2zZsgXp6elo0aIFkpKSsGLFCujq6vJmV9Vpb0Dx7PquXbvg7e2tcD9zWVnLam+enp7ceEOG7Lm4u7vz7Jeq/aumpibGjh2LyZMnw9vbG71790ZhYSE2bNiAxMRE3nmrVq3CgQMH0L59e9jb2yMpKQkbN25EQkICtm7dKtdlPTw8HGKxGJ07d1ZY9sOHD+Pu3bsAimff//rrL+59IzAwEDVr1oS9vb3cJSijRo2CpaWl3D7gH8PnHtWXh/j4eOrduzdZWFiQWCwmJycnGjp0KPdl5dmzZxQUFETGxsYkkUjIy8uL98WLSL2Z8MzMTOrevTsZGxsTAF6Qtry8PJo3bx65u7uTWCwmExMTqlOnDk2fPp3S0tIoPT2dHBwcyNPTUxBMZvTo0aShocGLKLh+/XpycnIiTU1NtYK0KZoJl7Fv3z5q1KgR6enpkZ6eHlWrVo2GDh0q+HL/559/UvPmzcnAwID09PSoZs2avOiuiYmJ1LFjRzI2NiYjIyMKDg6mV69esZlwFfiWvnpnZ2fT2LFjycrKisRiMdWrV08QYMXPz49KN3koCfJX+tmOHj2anJycSCwWk4WFBXXv3p2ePXsmNz/v37+n0NBQMjMzI6lUSn5+fnK9Xd6/f0+jR4+mKlWqkFgsJnNzcwoJCaHY2FiBrrK8lubmzZvUrFkz0tPTIwMDA/ruu+8EWyDt2LGDmjVrRpaWlqSlpUUmJibUrFkzQfRWmQ1R9JP35XbHjh1Uq1YtEovFZGlpScOGDRPMNn3//ffk5uZGBgYGpK2tTZUrV6YJEyYI9K5fv07BwcFkb29PYrGY9PT0yNPTkxYvXiw3oJWq956IKDIykry9vUkikZCpqSn16tVL7vZwqtx7mQ1W9CtpT/r06aNUt6SdmTp1KtWtW5dMTExIS0uLbGxsKCQkRLA929fiW7ID8iivbVC1fspQpc6ram9k/Y2in6JAYSXLIy86emmURUePioqi1q1bk5WVFWlra1OVKlVo1qxZgjY3b948qlatGkkkEjIxMaHAwEC6ffu2wmt6e3tThQoVFAZxUifNZcuWkZeXF5mampKWlhZZW1tTz5495UYmVyefDg4OKrXN/Px8WrFiBXl4eJC+vj7p6+tTQEAAL3AWEdGpU6eoXbt2ZGNjQ9ra2mRgYEANGzakTZs2yZ0dTU5Opj59+pCpqSmJxWKqX79+ubbO+tJ867ZAhqp9hIODA+/9uDx9oWwbU2X2Wlm68oIol0RRYDZldbh0mllZWTRjxgxyc3MjXV1dMjIyonbt2gnahzrtjYjoxIkTBICWL1+utAyqtrfSKArMpk7/SlS8S4WXlxcZGxuTrq4u1a9fnxeFnKi4DTdv3pyzh8bGxtSiRQvBdm8y0tLSSCKRUKdOnZSWXVley7Lz/4bAbCKisiOW3bp1C3Xq1MHNmzcFX2IYjP8q4eHh6NmzJ2sXDMZ/GGYHGAwGwGwBg8FQb8ysvi83g8FgMBgMBoPBYDAYjHLx1deE/9fIzMxEZmamUh0LCwuFW60xGAwGg8FgMBgMBuOfCxuEf2EWLlwoCH5RmufPn6NSpUpfJkMMBoPBYDAYDAaDwfhisEH4F6Z3795yo7SWxMrK6gvlhsFgMBgMBoPBYDAYXxI2CP/CODk5wcnJ6Wtng8FgMBgMBoPBYDAYXwEWmI3BYDAYDAaDwWAwGIwvBBuEMxgMBoPBYDAYDAaD8YVgg3AGg8FgMBgMBoPBYDC+EGwQzmAwGAwGg8FgMBgMxhdCrcBsx44dQ0xMzN+VFwbjH8WlS5cAsHbBYPyXYXaAwWAAzBYwGIzibaZVRUREVJbSlStX0LhxYxQWFn5SxhiMfxsaGhooKir62tlgMBhfEWYHGAwGwGwBg8EANDU1cfHiRTRo0ECpnkoz4WKxGIWFhdi2bRtcXV0/SwYZjH86x44dw5QpU1i7YDD+wzA7wGAwAGYLGAwGEBMTg549e0IsFpepq5Y7uqurKzw9PcudMQbj34TM3Yy1CwbjvwuzAwwGA2C2gMFgqAcLzMZgMBgMBoPBYDAYDMYXgg3CGQwGg8FgMBgMBoPB+EKwQTiDwWAwGAwGg8FgMBhfCDYIZzAYDAaDwWAwGAwG4wvBBuEMBoPBYDAYDAaDwWB8IdggnKEyIpEI06ZNU+ucvn37olKlSn9LfhgMBoPBYDAYDAbjn8Z/chC+fft2LF269G+/TnR0NKZNm4a4uLi//VoMRmlyc3MxYcIE2NjYQFdXF/Xr10dkZGSZ50VERKBly5awsbGBWCyGnZ0dgoKCcP/+fbn6GRkZGD9+PBwdHSEWi2Fra4ugoCBkZWVxOv7+/hCJRHJ/2travPQqVaokV2/w4ME8vc2bNytM8/Xr1zzdnJwczJkzB25ubpBKpbC1tUVwcDAePHggKM/NmzfRrl07WFlZQV9fHzVr1sTy5ctRWFio8J49e/YMEokEIpEIN27cKHeamZmZGDVqFOzs7CAWi+Hq6orVq1cL0ktKSsJPP/2EgIAAGBgYQCQS4Y8//pCbt9mzZ8Pb2xsWFhaQSCRwcXHBqFGjkJKSorA8ABAeHg6RSAR9fX2levn5+XBzc4NIJMLChQt5x+Li4hQ+o507dwrSWrlyJVxdXbl69OOPP+Ljx49Kr89QTnntQGmaN28OkUiEYcOG8eTZ2dkIDQ1F9erVYWRkBH19fdSqVQvLli1Dfn4+T1dVO/Du3TssWLAAvr6+sLCwgLGxMby9vbFr1y6F+bt16xYCAwNhamoKqVSK6tWrY/ny5dzxrKwsrFq1Ci1atIC1tTUMDAxQu3ZtrF69Wm7bTkpKwsCBA+Ho6AhdXV04Ozvjxx9/xLt373h669evh5+fHywtLSEWi+Ho6Ih+/fop7PeTk5MxaNAg2NraQiKRoFKlSggNDeXpKLKBIpEILi4ugjQ3bNgAV1dXrn2vWLFC7rV37twJT09PSCQSWFhYIDQ0FG/fvpWbx379+qFChQrQ1dWFp6cn9uzZIzfNly9fokuXLjA2NoahoSG+++47xMbGytVVJZ+PHj3C6NGj4ePjw9lU9g71+fnw4QMGDhwICwsL6OnpISAgALdu3VL5/JiYGLRq1Qr6+vowNTVFr169FPYpz549Q/fu3bn65OLigkmTJgn0VLH/06ZNU9g2RCIRLl26pHY+X716hZ49e6Jq1aowMDCAsbExvLy8sGXLFhCRIJ+qtqO0tDSMHz8eLi4u0NXVhYODA0JDQ5GQkMDTU7W9K3vnEYlECA8PL1c+AdXsUmkU9QkAFOZx7ty55UrzxYsXmD59Ory8vGBiYgJzc3P4+/vj9OnTStP71lFrn/B/C9u3b8f9+/cxatSov/U60dHRmD59Ovz9/f8Vs8HZ2dnQ0lKvyqxfvx5FRUV/U44Yyujbty/27t2LUaNGwcXFBZs3b0abNm1w7tw5NGrUSOF59+7dg4mJCUaOHAlzc3O8fv0aGzduhJeXF65cuYJatWpxumlpafDz80NiYiIGDhyIypUrIyUlBRcvXkRubi6kUikAYNKkSfj+++951/n48SMGDx6MFi1aCPLg4eGBMWPG8GRVqlSRm98ZM2bA0dGRJzM2Nub93aNHDxw6dAgDBgyAp6cnXr16hVWrVqFBgwa4d+8eHBwcABQPln18fODi4oIJEyZAKpXi+PHjGDlyJJ49e4Zly5bJzcPo0aOhpaWF3NxcwTFV0ywsLETLli1x48YNDB06FC4uLjh58iSGDBmC1NRU/Pzzz1yajx49wrx58+Di4oIaNWrgypUrcvMlu76HhwdCQkJgYGCAmJgYrF+/HkePHsWdO3egp6cnOCczMxPjx4+Xe6w0K1asELxUlKZbt25o06YNT9agQQPe3xMmTMD8+fMRFBSEkSNHIjo6GitWrMCDBw9w8uTJMvPBkE957UBJ9u/fr7COZWdn48GDB2jTpg0qVaoEDQ0NXL58GaNHj0ZUVBS2b9/O6apqB65cuYJJkyahTZs2mDx5MrS0tLBv3z6EhIRw/WpJTp06hfbt26N27dqYMmUK9PX18ezZMyQmJnI6sbGxGD58OJo2bYoff/wRhoaGXPu6evUqtmzZwulmZmaiQYMG+PjxI4YMGYKKFSvi7t27WLlyJc6dO4ebN29CQ6N4DuP27dtwdHREYGAgTExM8Pz5c6xfvx5HjhzB3bt3YWNjw6X74sULNGzYEAAwePBg2Nra4tWrV7h27RqvPEuXLkVmZiZPFh8fj8mTJwvs5dq1azF48GB07twZP/74Iy5evIgRI0YgKysLEyZM4PRWr16NIUOGoGnTpli8eDESExOxbNky3LhxA1FRUZBIJACA9PR0NGrUCMnJyRg5ciSsrKywe/dudOnSBeHh4ejevTvvPgUEBCAtLQ0///wztLW1sWTJEvj5+eHOnTswMzNTO59XrlzB8uXL4ebmBldXV9y5cweMz0tRURHatm2Lu3fvYty4cTA3N0dYWBj8/f1x8+ZNuR96SpKYmAhfX18YGRlh9uzZyMzMxMKFC3Hv3j1cu3YNOjo6nO6dO3fg7+8PW1tbjBkzBmZmZkhISMCLFy94aapq/zt16oTKlSsL8vTzzz8jMzMT9erVUzufb9++RWJiIoKCgmBvb4/8/HxERkaib9++ePToEWbPns2lqWo7KioqQvPmzREdHY0hQ4agSpUqePr0KcLCwnDy5EnExMTAwMAAgOrt3dfXF1u3bhWUfcmSJbh79y6aNm2qdj4B1e1SSZT1CTKaN2+O3r1782S1a9cuV5oHDx7EvHnz0KFDB/Tp0wcFBQX4/fff0bx5c2zcuBH9+vVTmpdvFlKBmzdvEgC6efOmKurfPG3btiUHB4e//Tp79uwhAHTu3Lm//VolKSwspOzs7C96zf8i27Zt+2bbRVRUFAGgBQsWcLLs7GxydnamBg0aqJ3e69evSUtLiwYNGsST//DDD2RsbEyxsbFqp7l161YCQOHh4Ty5g4MDtW3btszzN23aRADo+vXrSvUSExMJAI0dO5YnP3v2LAGgxYsXc7IBAwaQjo4OvXv3jqfr6+tLhoaGctM/ceIE6ejo0OTJk+XmR9U0d+/eTQBow4YNPL3OnTuTRCKh5ORkTpaens6lVx47s3fvXgJAO3bskHt8woQJVLVqVerRowfp6ekpTCc5OZmMjIxoxowZgvpGRPT8+XO58tK8evWKtLS0qFevXjz5ihUrCAAdOnRIxZJ9ef7tdiA7O5sqVarEPeOhQ4eqdN6wYcMIACUlJSnVk2cHYmNjKS4ujqdXVFRETZo0IbFYTJmZmZw8LS2NLC0tqWPHjlRYWKjwOikpKXT//n2BvF+/fgSAnjx5wsnCw8MJAB05coSn+8svvxAAunXrltIy3bhxgwDQnDlzePLWrVuTo6MjvX37Vun58pg5cyYBoEuXLnGyrKwsMjMzE9hLWbt9//49ERHl5uaSsbEx+fr6UlFREad3+PBhAkDLly/nZPPnzycAdObMGU5WWFhI9erVIysrK8rNzeXk8+bNIwB07do1ThYTE0Oampo0ceJEtfNJRPTu3TtKT08nIqIFCxYQAHr+/Lla9+pr8S3bgpLs2rWLANCePXs42Zs3b8jY2Ji6detW5vk//PAD6erqUnx8PCeLjIwkALR27VpOVlhYSNWrV6f69etTVlaWwvQ+1f4nJCSQSCSiAQMGlCufimjXrh3p6elRQUEBEanXji5dukQAaOXKlbw0N27cSABo//79Sq8tr73LIysriwwMDKh58+acTJ18Eqlvl1TpE9TpK1RJ8/79+5SSksKT5eTkULVq1cjOzk7l63wJ1BkzfxPu6C9fvkRoaCjn/uro6IgffvgBeXl5AIq/YAcHB3NuZt7e3jh69CgvjT/++AMikQi7d+/GrFmzYGdnB4lEgqZNm+Lp06ecnr+/P44ePYr4+HjOPaLkLHVubi6mTp2KypUrQywWo2LFihg/fjxvhqtPnz6QSCSIiYnh5aFly5YwMTHBq1evsHnzZgQHBwMAAgICuGspchktjczl5uHDh+jSpQsMDQ1hZmaGkSNHIicnh6crc90IDw+Hu7s7xGIxTpw4wd3b/v37c65y7u7u2Lhxo+B6OTk5mDZtGqpUqQKJRAJra2t06tQJz549412n5JrwjIwMjBo1CpUqVYJYLEaFChXQvHlznkuTvDXhHz9+xJgxY1CxYkWIxWJUrVoVCxcuFLj9yMp14MABVK9encu/rGwMxezduxeampoYOHAgJ5NIJAgNDcWVK1cEX6HLokKFCpBKpfjw4QMn+/DhAzZt2sS5bObl5cmdCVbE9u3boaenh++++07u8by8PJVdkTMyMhS6i2dkZAAALC0teXJra2sAgK6uLidLT0+HRCIRzKRbW1vz9GTk5+dj5MiRGDlyJJydneVeX9U0L168CAAICQnh6YWEhCAnJwcHDx7kZAYGBjA1NZV7PVWQtcmSz1PGkydPsGTJEixevLhMz5effvoJVatWRc+ePcu85sePHzmbXporV66goKBAbtkByHVdZ5TN57AD8+fPR1FREcaOHavWtZXVsZLIswOOjo6cd4oMkUiEDh06IDc3l+fuvH37diQnJ2PWrFnQ0NDAx48f5XpfmZubw93dXSDv2LEjAPD68/T0dACq2Qx5yCv7w4cPcfz4cYwbNw5mZmbIyckRuOsrY/v27XB0dISPjw8nO3fuHN69e4chQ4bwdIcOHYqPHz9y70n379/Hhw8f0LVrV4hEIk6vXbt20NfX57WvixcvwsLCAk2aNOFkGhoa6NKlC16/fo3z589z8r1796JevXq82cdq1aqhadOm2L17t9r5BABTU1NuhpDx97B3715YWlqiU6dOnMzCwgJdunTBwYMHy+zH9+3bh3bt2sHe3p6TNWvWDFWqVOE991OnTuH+/fuYOnUqdHV1kZWVJbef/lT7v2PHDhARevToUa58KqJSpUrIysri+i112tGn2hB57V0ehw8fRkZGBq/s6uSzPHZJnT4hOztbMGYpT5ru7u4wNzfnycRiMdq0aYPExETuPe+fxlcfhL969QpeXl7YuXMnunbtiuXLl6NXr144f/48srKykJycDB8fH85tbNasWcjJyUFgYCAiIiIE6c2dOxcREREYO3YsJk6ciKtXr/Iq56RJk+Dh4QFzc3Ns3boVW7du5daHFxUVITAwEAsXLkT79u2xYsUKdOjQAUuWLEHXrl25NJYtWwYLCwv06dOHMyhr167FqVOnsGLFCtjY2MDX1xcjRowAUOwmI7uWq6urWvenS5cu3HrWNm3aYPny5bwXKhlnz57F6NGj0bVrVyxbtgyVKlVCcnIyvL29cfr0aQwbNgzLli1D5cqVERoaylsTX1hYiHbt2mH69OmoU6cOFi1ahJEjRyItLU3hOmCg2G1l9erV6Ny5M8LCwjB27Fjo6uoKPk6UhIgQGBiIJUuWoFWrVli8eDGqVq2KcePG4ccffxTo//nnnxgyZAhCQkIwf/585OTkoHPnzoJ1eQw+t2/fRpUqVWBoaMiTe3l5AYBKLn4fPnxASkoK7t27h++//x7p6ek8d6c///wTOTk5qFy5MoKCgiCVSqGrq4uGDRuWmX5KSgoiIyPRoUMHuS7PZ8+ehVQqhb6+PipVqqTQDRwo/shlaGgIqVSKwMBAPHnyhHfc2dkZdnZ2WLRoEQ4fPozExERcu3YNgwcPhqOjI6/j9/f3R3p6OgYNGoSYmBjEx8djzZo12L9/PyZOnCi49tKlS5GamorJkycrzJ+qaebm5kJTU5PnygeAc+m/efOmwmuUBRHh7du3eP36NecGqqmpCX9/f4HuqFGjEBAQIHAfL821a9ewZcsWLF26lNfRy2P69OnQ19eHRCJBvXr1cOrUKd5x2Utf6ReTz1H2/zKfagcSEhIwd+5czJs3r8yXxry8PLx9+xYvXrxAREQEFi5cCAcHB7muozLKsgOlkcV6KPkydvr0aRgaGuLly5eoWrUq9PX1YWhoiB9++EGllz95afr6+kJDQwMjR47E1atXkZiYiGPHjmHWrFno0KEDqlWrJkjn3bt3ePPmDW7cuMG5Rpa0l7K1i5aWlmjatCl0dXWhq6uL1q1bl7nm+fbt24iJieG5gsvkAFC3bl2evE6dOtDQ0OCOK2pfMtnt27e5Dxe5ubly9Uq3xaKiIvz111+CawPF9evZs2fci7Gq+WR8GW7fvg1PT09uSYUMLy8vZGVl4fHjxwrPffnyJd68eaPwuZd8lrI6LxaLUbduXejp6UEqlSIkJATv37/n9D7V/oeHh6NixYrw9fUtVz5lZGdn4+3bt4iLi8OWLVuwadMmNGjQgMuXOu1IVt4pU6bg7NmzePnyJc6fP4/x48ejXr16aNasmcLyKGrvisquq6vL+6CiTj7VtUvq9AmbN2+Gnp4edHV14ebmxluaVN40S/P69WtIpVKurvzj+NxT6+rSu3dv0tDQkOtSWlRURKNGjSIAdPHiRU6ekZFBjo6OVKlSJc797Ny5cwSAXF1dee5Sy5YtIwB07949TqbIHX3r1q2koaHBuxYR0Zo1awRuISdPniQA9Ouvv1JsbCzp6+tThw4deOd9ijv61KlTCQAFBgby5EOGDCEAdPfuXU4GgDQ0NOjBgwc83dDQULK2tha4mISEhJCRkRHnHiRzjynpliujpCsLAJo6dSr3t5GRUZnuJn369OHd6wMHDnD3rSRBQUEkEono6dOnvOvp6OjwZHfv3iUAtGLFCqXX/RJ8y65n7u7u1KRJE4H8wYMHBIDWrFlTZhpVq1YlAASA9PX1afLkyTx3z8WLFxMAMjMzIy8vLwoPD6ewsDCytLQkExMTevXqlcK0ZW5mx44dExxr3749zZs3jw4cOEAbNmygxo0bEwAaP348T2/Xrl3Ut29f2rJlC0VERNDkyZNJKpWSubk5JSQk8HSjoqLI2dmZKw8AqlOnjsBVtqCggIYNG0ba2tqcnqamJq1evVqQz6SkJDIwMODc2hS5x6ua5qJFiwS2jojop59+IgDUrl07ufdSFTuTlJTEK7udnR3t2rVLoHfkyBHS0tLibEmfPn3kuqMXFRWRl5cX57qoyO08Pj6eWrRoQatXr6ZDhw7R0qVLyd7enjQ0NHiuvrI+ZubMmbzzT5w4wdW/b5V/sx0ICgoiHx8f7m8ocTHcsWMHr47VrVuX/vrrL6XpK7MDpXn37h1VqFCBGjduzJPXrFmTpFIpSaVSGj58OO3bt4+GDx9OACgkJERpmrm5ueTm5kaOjo6Un5/PO/bbb7+RsbExr0x9+vQR6MkQi8WcnpmZmcDlc8SIEdyxVq1a0a5du2jBggWkr69Pzs7O9PHjR4X5HDNmDAGg6Ohonnzo0KGkqakp9xwLCwuu/CkpKSQSiSg0NJSn8/DhQy7PsveE4cOHk4aGhmA5QEhICAGgYcOGcWkCoBkzZgiuvWrVKgJADx8+VCufpWHu6H8Penp61L9/f4H86NGjBIBOnDih8Nzr168TAPr9998Fx8aNG0cAKCcnh4iIAgMDuTrfo0cP2rt3L02ZMoW0tLTIx8eHe7/8FPt///59ue8H6uRTxpw5c3jtvWnTprx3CXXaEVFxf2ptbc1Ls2XLlpSRkaGwPESK23tp3r17Rzo6OtSlSxeeXJ18qmuXVO0TfHx8aOnSpXTw4EFavXo1Va9enQBQWFiYQFedfqYkT548IYlEIljG8LVRZ8z8VQOzFRUV4cCBA2jfvr3cr1UikQjHjh2Dl5cXL4CMvr4+Bg4ciIkTJyI6OhrVq1fnjvXr1483k9S4cWMAxS7tJfXksWfPHri6uqJatWq8CIIyt6xz585xriEtWrTAoEGDMGPGDOzduxcSiQRr164tx11QztChQ3l/Dx8+HGFhYTh27Bhq1qzJyf38/ODm5sb9TUTYt28funTpws2CyWjZsiV27tyJW7duoWHDhti3bx/Mzc0xfPhwwfWVzXAZGxsjKioKr1694gWfUcaxY8egqanJeQnIGDNmDPbu3Yvjx4/zoiI2a9aM5+Zbs2ZNGBoaKoy+yigmOzsbYrFYIJcF48jOzi4zjU2bNiE9PR2xsbHYtGkTsrOzUVhYyH09lwUSEYlEOHPmDBdFu3bt2mjQoAFWrVqFX3/9VW7a27dvh4WFBZo3by44dujQId7f/fr1Q+vWrbF48WIMHz4cdnZ2AIq9RLp06cLpdejQAS1btoSvry9mzZqFNWvWcMdMTEzg4eGB4OBgeHt74+nTp5gzZw6Cg4MRGRnJ3RdNTU04OzujZcuWCA4OhkQiwY4dOzB8+HBYWVmhQ4cOXJoTJkyAk5OTINBUaVRNs3v37pgxYwb69++PVatWwcXFBadOnUJYWBgA1Z6ZIkxNTREZGYmcnBzcvn0b+/fvFwSCycvLw+jRozF48GCeLZHH5s2bce/ePezdu1epnr29vSCoWq9eveDm5oYxY8agbdu2AABPT0/Ur18f8+bNg62tLQICAhATE4MffvgB2tran1T2/zKfYgfOnTuHffv2ISoqSqVrBQQEIDIyEh8+fMCZM2dw9+7dMpeTKLMDJSkqKkKPHj3w4cMHQUTtzMxMZGVlYfDgwVw09E6dOiEvLw9r167FjBkzFAaaGjZsGKKjo3H06FHB0gtbW1t4eXmhTZs2cHBwwMWLF7F8+XKYm5sLdgEAgOPHjyMnJwcxMTHYtm2boOyy9mZlZYWjR49ydtTOzg7dunXD9u3b5dqSoqIi7Ny5E7Vr1xZ40mVnZws8Z2RIJBLu+Zqbm6NLly7YsmULXF1d0bFjR7x8+RLDhw+HtrY28vPzOd3vv/8ea9asQZcuXbBkyRJYWlpi9+7dnOehTE/2ryr1S9V8Mr4Mn2IXVH3uYrGYq/P16tXDtm3bAACdO3eGVCrFxIkTcebMGTRr1uyT7L8sInhpV3R18imjW7duqFu3LlJSUnDkyBEkJyfzrq1OOwKKXfxr166NYcOGwd3dHXfu3MH8+fPRr18/hbsNKGvvpdm7dy/y8vIEZVcnn+rYJXX6hNJR6vv37486derg559/Rt++fbkZb3X7GRlZWVkIDg6Grq5umRHXv2k+96heHV6/fk0AaNKkSQp1xGKx3K8cshlV2WyKbCZ8586dPD3ZDM3mzZs5maKZcFdXV94Xq9K/ESNG8PQzMjLIysqKAND27dsF6X2OmfDSAa/y8vJIQ0ODFyALgOCrZnJystKyoERgiGrVqlHDhg3LzBNKzYTv2rWLJBIJaWhoUL169Wjq1Kn07Nkz3jmlZ8JbtmxJFStWFKT94cMHAvjBswDQ4MGDBboODg7Ut2/fMvP7d/Mtf/X+HDPhJXn//j1ZWlrSmDFjOJlslqJfv34CfUdHRwoICJCb1rNnz3gzKqog+yK+devWMnW9vb3J2dmZ+/vDhw9kaWlJCxcu5On98ccfgi+zc+bMISsrK8GXan9/f7KxseFmwa5cuUIikYjOnj3L6SiaCVc1TSKi8+fPk729PddGDQ0NacuWLQSAvvvuO7nlLY+dkQWNOXz4MCebO3cumZiY8ALIyZsJlwXC+uWXXziZqgHYZMhm91+8eMHJEhMTqWHDhjxvgXHjxpGXlxcZGRmpXLYvzb/RDuTn51P16tWpd+/ePDlUnKEgIpo1axbp6+srDMymjh2QeYDJm9Vyd3cnAHT+/Hme/Pz58wSAtmzZIjdNWQCy0rNvRER//vknaWpqCtrytGnTSCQSCbzOSvP06VOSSCQ8j62hQ4cSAJo+fTpPt6CggLS0tOTaUaL/B5Asbb9kaao6w/zhwwduZlL269mzJ3Xq1IkAUGpqKqe7Z88eMjMz4/SsrKxo9erVBIBGjhxJRGwmXB7fmi3Izc2lpKQk3q+goOCLzYS3bdtWbhuMj48XtIXy2P+ioiJycHCg6tWrf1I+FTFgwACqWLEiL6icqu3o2bNnJJVKae/evbw0N2/erNT7R1l7L42vry+ZmppSXl6e4Jiq+VTVLn2OPkHmVSzz9itvmgUFBdS+fXvS0dHhBZD8VvjHBWb7nGhqasqVk5y9/kpTVFSEGjVqIDIyUu6vdFCR27dv482bNwCKt3X6EiiamS69jkK23qNnz54KyyPbkqC8dOnSBbGxsdw6+AULFsDd3R3Hjx//pHRL8inP87+MtbU1kpKSBHKZTFXPBRkmJiZo0qQJbx9KWRqlA48AxYHcUlNT5aYlWxdU+uutMipWrAgAvHVkynRL6u3btw/JyckIDAzk6fn5+cHQ0JD3xTYsLAxNmjQR7I0dGBiIV69ecWukxo8fj8aNG8PR0RFxcXGIi4vjvE2SkpJ4W3apmiZQvBY1NjYWt2/fxp9//omXL1/C29sbgOIt2sqDj48PrK2tueeZlpaGX3/9FQMGDEB6ejpXpszMTBAR4uLiOFu3cOFC5OXloWvXrpyebCuo1NRUxMXFKQzAJkPe87S1tcWff/6Jx48f48KFC0hMTMT8+fPx4sWLz1r2/xLltQO///47Hj16hEGDBnHPWFZPMzIyEBcXh6ysLKXXDgoKQmZmJi+gYElUtQPTp09HWFgY5s6di169egmOK7JDFSpUAAC5dmjz5s2YMGECBg8eLDeew9q1a2FpaSnw0AsMDAQR4fLly0rz7OzsjNq1a6tkLzU1NWFmZqbQXoaHh0NDQwPdunUTHLO2tkZhYSHXNmXk5eXh3bt3vOdrZGSEgwcPIj4+HufPn0dcXBy2bt2KpKQkbi92GUFBQdwWRVeuXEF8fDycnJwA/N8OmZqaQiwWq1S/1Mkn4/Nx+fJlWFtb834vXrz4pPcDWWAxRefL6kXJdFRpm+Wx/5cuXUJ8fLxcG6JOPhURFBSEFy9e4MKFC5xM1Xa0efNm5OTkoF27drw0Ze8hpWeKZShr7yVJSEjAxYsXERwcDG1tbcFxVfOpql36HH1C6X6/vGkOGDAAR44cwebNm3kBJP+JfFV3dAsLCxgaGioN/uXg4IBHjx4J5A8fPuSOq4uigayzszO3115ZgYY+fvyIfv36wc3NDT4+Ppg/fz46duzIixJaVhqq8OTJE94eyE+fPkVRUVGZ+45bWFjAwMAAhYWFSgNAAMXljoqKQn5+vtzGrAxra2sMGTIEQ4YMwZs3b+Dp6YlZs2ahdevWcvUdHBxw+vRpZGRk8CKgfsrzZAjx8PDAuXPnkJ6ezgvKJHP58fDwUDvN7OxspKWlcX/XqVMHQHEAlNK8evVKbvAioPjl29nZmRtcqoJs+YGFhYVKuiX1kpOTAUAQlZWIUFhYiIKCAp6uvOitsmihMt2EhATEx8cL9icHijtZIyMjLjKyqmnK0NTU5D0fWeCUstqxuuTk5HDPMzU1FZmZmZg/fz7mz58v0HV0dMR3332HAwcOICEhAampqXIjTc+ePRuzZ8/G7du3ldYxZc/TxcWFcx+Ojo5GUlIS+vbtW44SMsprBxISEpCfny/3Q+3vv/+O33//HREREbzlGaWRuTuWtBklUcUOrFq1CtOmTcOoUaN4e0mXpE6dOoiMjOQCs8l49eoVAGEdO3jwIL7//nt06tQJq1atkpumum1WHtnZ2bwo04rspSygnby2kJubi3379sHf31/uwEj2/G7cuMELpHjjxg0UFRXJfb729vZctOgPHz7g5s2b6Ny5s0BPR0eH9z5T2g5paGigRo0auHHjhuDcqKgoODk5cX18efLJ+HRq1aqFyMhInszKygoeHh64ePEiioqKeMHZoqKiIJVKlX70tLW1hYWFhdznfu3aNd6zrFOnDtavXy+o84raJqCe/Q8PD4dIJJIbwEydfCpCmQ0rqx0lJydz7xglUWZDymrvJVEUEV7dfKpqlz5Hn1C63y9PmuPGjcOmTZuwdOnSMj9U/CP43FPr6qJqYLbLly9z8szMTHJycpIbmK3kvodE/3eT3LRpEyfr2rUrGRsbC64ncxORt39gVlYWb2/SoUOHkra2Nt28eZMyMzPJ2dmZXF1dee4tx48fJwAUERGh8v2QUVZgtjt37nAyKHDd6Nu3L+no6PCC0sl48+YN9//yBGYrKCigDx8+CPTr1atHdevW5f5WFJht9uzZvPO6du0qNzCbvHI5ODhQnz59BPIvzbfmelaSq1evCtyDc3JyqHLlylS/fn1OFh8fTzExMbxzS+5HLeP58+dkYGAgCIpUq1YtMjQ05O3fKAtaOH/+fEE6t27dIgA0ZcoUufl+9+4dtyenjLy8PGrYsCHp6OjwXFtL1mEZMne6kktHZHtil1xKQfT/ujh37lxOVr16dTI1NeUFVykoKKA6deqQgYEB5/Z18uRJioiI4P1kwaAWLlzICzqmapryePPmDdnb21PNmjUV7oGszB09MzNTbsAn2T2RPYePHz8KyhMREUEBAQEkkUgoIiKCrl69SkTF/UFpvbVr1xIA6tu3L0VERHC2Qd4zSkxMJBMTE6pZs6bCchMV7zHbtm1bkkqlvH1evzX+jXYgJiZGbn0AQG3atKGIiAgu8GJKSgqvn5Ah2ydcnrtgWXaAiGjnzp2koaFBPXr0kJt+6bS6d+/Ok3fr1o20tLTo5cuXnOz8+fMkkUgoICBAqSuqLO+l25TsfUTWFvLz83l7XMuIiooiTU1N3lK6nJwcqlChAjk5OVF2djYnl7Wd3bt3C9LZv38/AaANGzbIzWdWVhaZmpoKgjb27NmTpFIpb2mJPAYPHkwaGhq8fb7l8fjxYzIwMBBcZ+7cuYIlOA8fPiRNTU2aMGHCJ+eTuaP/PezcuVPwvpySkkLGxsbUtWtXnu7Tp09572VExfVGV1eXF7Ts9OnTBIAXcDQpKYnEYjE1atSI139NnDiRACitd2XZ/7y8PDIzMxO8k5Qnn/L6KaLiILEikYiePHmi8Bqy65RuRwsXLhSMPYiIli5dSoBw6SxR2e29JDVr1iR7e3ultlGVfKpql9TpE+Tdz/T0dHJ2diZzc3MueLY6aRL9fwnRzz//rHKZvwbqjJm/+iA8MTGRrKysSCqV0qhRo2jt2rU0bdo0cnd3p9TUVHr9+jVZWlqSkZERTZkyhZYsWUIeHh4kEol4m92rMwiXPcjRo0fT9u3b6dChQ0RU3OjbtGlDIpGIQkJCaMWKFbR06VIaPHgwmZqach3NmTNnSCQS0bRp07g0L1y4QBoaGjRu3DhOlpSURJqamuTt7U2bN2+mHTt2yB3gyEM2CK9Rowa1b9+eVq1aRT179pT7sqFosPr69WtycHAgqVRKI0eOpLVr19KcOXMoODiYTExMOL2CggLy9/cnoDia7KpVq2j+/PnUokULOnDgAO86soFMamoq6enpUZ8+fWjx4sW0bt066tKlCwGgRYsWceeUHoQXFhZSQEAAiUQiGjhwIK1atYq+++47AkCjRo1SqVxsEK4awcHBpKWlRePGjaO1a9eSj48PaWlp8dZO+vn5UelvcRUqVKBu3brRvHnzaN26dTRu3DgyNTUliUTC2yGAqHj9kqamJlWtWpUWL15MU6dOJQMDA6pSpYrcCKCyqJ+ytYKl2bRpEzk7O9OECRNozZo1NHv2bC6qZukPN5UrV6bg4GCaN28erVmzhgYOHEhaWlpUsWJFev36NaeXm5tL7u7uJBKJqG/fvrRmzRoaO3YsSSQSsra25n1AkD1TZ2dnmjdvHi1fvpwaNGhAgDCiv7y8l34hVTdNX19fmjBhAq1fv55mzpxJFStWJBMTE7lRpmfOnEkzZ87kohb379+fk8m4ffs2mZmZ0ZAhQ2j58uW0cuVK6tu3L2lpaVGlSpUEOyeURlF09NIoWhPet29faty4MU2bNo3WrVtHP//8M5mZmZGOjo5ggDNixAgaOHAghYWF0bJly6h+/fokEonkrun7lvi32gF5yLPJS5YsoapVq9KECRNo7dq1tHDhQmrevDkBoPbt28tNpyw7EBUVRTo6OmRhYUEbN26krVu38n6lY4/079+fAFCXLl1o1apVFBwcTABo4sSJnE5cXBwZGRmRrq4urVq1SpBmyR1HHj58SHp6eqSvr08TJ06kNWvWULdu3QgANW/enNOT9YP9+/enRYsW0Zo1a2jo0KEklUrJ1NSUHj9+zMunLL5DvXr1aPny5TR27FjS1tamxo0bCz4+EhF17tyZxGKx3A/eMmTrr4OCgmj9+vXUu3dvAkCzZs3i6c2ZM4d69OhBy5cvp7CwMGrRooVCu+bq6kq//PIL/fbbbzRp0iQyNTUlBwcHSkxM5OnJXqwrVKhA8+fPpyVLllDFihXJxsZG8CKuaj4/fPjA2bFWrVoRABozZgzNnDnzm9gVRRnfui2QUVBQQN7e3qSvr0/Tp0+nVatWkbu7OxkYGAjapIODgyCGUkJCApmZmZGzszMtX76cZs+eTSYmJlSjRg3Bx60ZM2Zw7WbVqlU0cOBAEolE3M4aMtS1/4cPHyZAeXwbVfM5cuRIqlu3Lk2ePJnWrVtHc+fOpXr16hEAGj58OC9NVdvR27dvycrKinR0dGjEiBG0du1aGjRoEGlqapK7uztvFycZqrR3IqJ79+4RAPrpp58U6qjT3tW1SyWR1ydMnTqVatWqxd3P6dOnk4ODA4lEItq2bZvS9BSlKftA4eLiIrDdW7du5b3zfW3+UYNwouKv8L179yYLCwsSi8Xk5OREQ4cO5Srps2fPKCgoiIyNjUkikZCXlxdvpolIvUF4ZmYmde/endt+pKSBycvLo3nz5pG7uzuJxWIyMTGhOnXq0PTp0yktLY3S09PJwcGBPD09BVuVjB49mjQ0NOjKlSucbP369eTk5ESampoKZ6vkIRuER0dHU1BQEBkYGJCJiQkNGzaM97WKSHkQg+TkZBo6dChVrFiRtLW1ycrKipo2bUrr1q3j6WVlZdGkSZPI0dGR0wsKCuK97JQchOfm5tK4ceOoVq1aZGBgQHp6elSrVi3B9gOlB+FExQHtRo8eTTY2NqStrU0uLi60YMECwRc9Ngj/NLKzs2ns2LFkZWVFYrGY6tWrJwi4Iu/le+rUqVS3bl0yMTEhLS0tsrGxoZCQEIXbDUVGRpK3tzdJJBIyNTWlXr16yQ3GVFhYSLa2tuTp6akwzzdu3KD27duTra0t6ejokL6+PjVq1EjuLNGkSZPIw8ODjIyMSFtbm+zt7emHH36Qa4zfv39Po0ePpipVqpBYLCZzc3MKCQkRBD4kKg4C5+fnR+bm5qSjo0M1atRQKZCdokG4OmmOHj2anJycSCwWk4WFBXXv3l0w4JABKA66KCMlJYUGDhxI1apVIz09PdLR0SEXFxcaNWoU7+ODIj51EL59+3by9fUlCwsL0tLSInNzc+rYsaPcNrNp0yaqVasW6enpkYGBATVt2pQX+O5b5d9qB+QhzyZfv36dgoODyd7ensRiMenp6ZGnpyctXrxY7nZeqtgBWVtS9Cs9u5SXl0fTpk0jBwcH0tbWpsqVK9OSJUt4OrJ3BEW/0p4yDx8+pKCgIK7vdHBwoLFjx/I8S3Jzc2nkyJFUs2ZNMjQ05PRCQ0MVzt7u2LGDatWqRWKxmCwtLWnYsGGUnp4u0EtLSyOJREKdOnVSeJ9krFu3jqpWrUo6Ojrk7OxMS5YsEfSnR44cIS8vLzIwMCCpVEre3t5y7SpR8XZkFStWJB0dHbKxsaHBgwcrnEB48eIFBQUFkaGhIenr61O7du0Uzhyqkk+ZLZH3kxdQ91viW7cFJXn//j2FhoaSmZkZSaVS8vPzk9t3yRuEExVvDdaiRQuSSqVkbGxMPXr0kNv3FhUV0YoVK6hKlSqkra1NFStWpMmTJws8wNS1/yEhIaStrV2mt4cq+Tx16hS1a9eOeyc1MDCghg0b0qZNmz6pHSUmJlL//v3J0dGRdHR0yNramgYMGCC371WnvcsCmyrbAlKdfBKpbpdKI69POHXqFDVv3pysrKxIW1ubjI2NqUWLFioHUVM0sFdmv8sTAPvvQp0xs4io7AhXt27dQp06dXDz5k14enqWpc74DEybNg3Tp09HSkoKzM3Nv3Z2GHIIDw9Hz549WbtgMP7DMDvAYDAAZgsYDIZ6Y+Z/XXR0BoPBYDAYDAaDwWAwvlW+anT0/yKZmZnIzMxUqqNKBGgGg8FgMBgMBoPBYPzzYIPwL8zChQsxffp0pTrPnz//QrlhMBgMBoPBYDAYDMaXhA3CvzC9e/dGo0aNlOpYWVlh2rRpmDZt2pfJFIPBYDAYDAaDwWAwvghsEP6FcXJygpOT09fOBoPBYDAYDAaDwWAwvgIsMBuDwWAwGAwGg8FgMBhfCDYIZzAYDAaDwWAwGAwG4wvBBuEMBoPBYDAYDAaDwWB8IdggnMFgMBgMBoPBYDAYjC8EG4QzGAwGg8FgMBgMBoPxhVArOvqxY8cQExPzd+WFwfhHcenSJQCsXTAY/2WYHWAwGACzBQwGA3j+/LnKuiIiorKUrly5gsaNG6OwsPCTMsZg/NvQ0NBAUVHR184Gg8H4ijA7wGAwAGYLGAwGoKmpiYsXL6JBgwZK9VSaCReLxSgsLMS2bdvg6ur6WTLIYPzTOXbsGKZMmcLaBYPxH4bZAQaDATBbwGAwgJiYGPTs2RNisbhMXbXc0V1dXeHp6VnujDEY/yZk7masXTAY/12YHWAwGACzBQwGQz1YYDYGg8FgMBgMBoPBYDC+EGwQzmAwGAwGg8FgMBgMxheCDcIZDAaDwWAwGAwGg8H4QrBBOIPBYDAYDAaDwWAwGF8INghnMBgMBoPBYDAYDAbjC8EG4QwGg8FgMBgMBoPBYHwh2CCc8bfx6tUrTJs2DXfu3PnaWfnXk5ubiwkTJsDGxga6urqoX78+IiMjyzxv//796Nq1K5ycnCCVSlG1alWMGTMGHz58EOhmZmZi1KhRsLOzg1gshqurK1avXi3Qu3DhAgIDA1GxYkVIJBJYWVmhVatWuHTpkkB39uzZ8Pb2hoWFBSQSCVxcXDBq1CikpKTIze+zZ8/QvXt3VKhQAbq6unBxccGkSZN4OteuXcOQIUNQp04daGtrQyQSKSx/Wloaxo8fDxcXF+jq6sLBwQGhoaFISEjg6VWqVAkikUjuz8XFRZBucnIyBg0aBFtbW0gkElSqVAmhoaECvdOnTyMgIADm5uYwNjaGl5cXtm7dKjevGzZsgKurK3efVqxYobBcu3btQoMGDaCnpwdjY2P4+Pjg7NmzPJ3Vq1cjODgY9vb2EIlE6Nu3r8L0bt68iXbt2sHKygr6+vqoWbMmli9fjsLCQoXnPHv2DBKJBCKRCDdu3OAd27x5s8L7+fr1a57u6NGj4enpCVNTU0ilUri6umLatGnIzMxUeG3G/ymvbShN8+bNIRKJMGzYMLnHVamf6tgbdZ77kydPEBISAjs7O0ilUlSrVg0zZsxAVlaWQDcvLw+zZ89GtWrVIJFIYGlpibZt2yIxMZHT6du3r8L6KRKJ8PLlS07X399frk6rVq14171+/TqGDRsGd3d36Onpwd7eHl26dMHjx4/l3s+ioiKsXr0aHh4e0NXVhZmZGZo0aYK7d+9yOnFxcQrzuHPnznKl+fDhQ4wfPx4eHh4wMDCAtbU12rZtK2jDJSnL3ihr7yKRCOHh4bz01LGLDOV8+PABAwcOhIWFBfT09BAQEIBbt26pdK46fSmgeh+1c+dOeHp6QiKRwMLCAqGhoXj79m2501THrpREWR8FqNbv/fHHH0rr9qxZszhddfq9nJwczJkzB25ubpBKpbC1tUVwcDAePHjA01MnTQA4dOgQd+/t7e0xdepUFBQUCPRUrTeq5lPVSXWyEAAAMdZJREFU98KsrCysWrUKLVq0gLW1NQwMDFC7dm2sXr1a8L6hrv37FlFrn3AGQx1evXqF6dOno1KlSvDw8Pja2flX07dvX+zduxejRo2Ci4sLNm/ejDZt2uDcuXNo1KiRwvMGDhwIGxsb9OzZE/b29rh37x5WrlyJY8eO4datW9DV1QUAFBYWomXLlrhx4waGDh0KFxcXnDx5EkOGDEFqaip+/vlnLs3Hjx9DQ0MDgwcPhpWVFVJTU7Ft2zb4+vri6NGjvJfTmzdvwsPDAyEhITAwMEBMTAzWr1+Po0eP4s6dO9DT0+N079y5A39/f9ja2mLMmDEwMzNDQkICXrx4wSvTsWPH8Ntvv6FmzZpwcnJS+pLbvHlzREdHY8iQIahSpQqePn2KsLAwnDx5EjExMTAwMAAALF26VPDyHx8fj8mTJ6NFixY8+YsXL9CwYUMAwODBg2Fra4tXr17h2rVrPL1Dhw6hQ4cOaNCgAaZNmwaRSITdu3ejd+/eePv2LUaPHs3prl27FoMHD0bnzp3x448/4uLFixgxYgSysrIwYcIEXrrTpk3DjBkzEBQUhL59+yI/Px/379/nDRwAYN68ecjIyICXlxeSkpLk3iPZM/Lx8YGLiwsmTJgAqVSK48ePY+TIkXj27BmWLVsm97zRo0dDS0sLubm5CtOeMWMGHB0deTJjY2Pe39evX0fjxo3Rr18/SCQS3L59G3PnzsXp06dx4cIFaGiwb8nKKK9tKMn+/ftx5coVhcdVrZ+q2htA9ef+4sULeHl5wcjICMOGDYOpqSmuXLmCqVOn4ubNmzh48CCXZn5+Ptq2bYvLly9jwIABqFmzJlJTUxEVFYW0tDTY2dkBAAYNGoRmzZrxykhEGDx4MCpVqgRbW1veMTs7O8yZM4cns7Gx4f09b948XLp0CcHBwahZsyZev36NlStXwtPTE1evXkX16tV5+v3790d4eDh69+6NYcOG4ePHj7h9+zbevHkjuP/dunVDmzZteLIGDRoI9FRJ87fffsOGDRvQuXNnDBkyBGlpaVi7di28vb1x4sQJwX1Rxd74+vrKHUQvWbIEd+/eRdOmTTmZOnaRoZyioiK0bdsWd+/exbhx42Bubo6wsDD4+/vj5s2bcj8gl0TVvhRQ3QasXr0aQ4YMQdOmTbF48WIkJiZi2bJluHHjBqKioiCRSNROUx27UhJlfZSq/Z6rq6vcur1161acOnVK8H4AqNbv9ejRA4cOHcKAAQPg6emJV69eYdWqVWjQoAHu3bsHBwcHtdM8fvw4OnToAH9/f6xYsQL37t3Dr7/+ijdv3vAmVNSpN6rmU9X3wtjYWAwfPhxNmzbFjz/+CENDQ+5d8+rVq9iyZYvgfqpq/75JSAVu3rxJAOjmzZuqqDNKkZ2dTYWFhV87G0RE9PHjR7ny/Px8ys3N/azXun79OgGgTZs2fdZ0vxW2bdv2TbSLqKgoAkALFizgZNnZ2eTs7EwNGjRQeu65c+cEsi1bthAAWr9+PSfbvXs3AaANGzbwdDt37kwSiYSSk5OVXufjx49kaWlJLVu2LLM8e/fuJQC0Y8cOTlZYWEjVq1en+vXrU1ZWltLzX79+zekMHTqUFJm5S5cuEQBauXIlT75x40YCQPv371d6nZkzZxIAunTpEk/eunVrcnR0pLdv3yo9v3nz5mRjY0M5OTmcLD8/n5ydnalmzZqcLCsri8zMzKht27a883v06EF6enr0/v17TnblyhUSiUS0ePFipdcmIoqLi6OioiIiItLT06M+ffrI1RswYADp6OjQu3fveHJfX18yNDSUe86JEydIR0eHJk+eTADo+vXrvOObNm2SK1eVhQsXEgC6cuVKuc7/nHwrdkAen2IbSupXqlSJZsyYQQBo6NChvOPq1E9V7Y0i5D33WbNmEQC6f/8+T7d3794EgHf9efPmkba2NkVFRZV5rdJcvHiRANCsWbN4cj8/P3J3dy/z/EuXLgn62MePH5NYLKYePXrw5Lt27VLJBj1//lzwfBWhapo3btygjIwMnuzt27dkYWFBDRs25MnVsTelycrKIgMDA2revDlPrqpd/Bb51myB7Jnv2bOHk71584aMjY2pW7duZZ6val+qqg3Izc0lY2Nj8vX15foeIqLDhw8TAFq+fLnaaRKVz66U1UeVp98rSeXKlcnFxYUnU7XfS0xMJAA0duxYnvzs2bMEgNfe1OlL3dzcqFatWpSfn8/JJk2aRCKRiGJiYjiZqvVGnXzKQ957YUpKisCWExH169ePANCTJ084mTr270uizpj5q04hxMfHY8iQIahatSrnGhUcHIy4uDienszd4tKlS/jxxx8594iOHTsK3FZv3LiBli1bwtzcHLq6unB0dET//v25456enujUqRPvnBo1akAkEuGvv/7iZLt27YJIJEJMTAwne/nyJfr37w9LS0uIxWK4u7tj48aNvLRkrik7d+7E5MmTYWtrC6lUivT0dJXvy7Zt2+Dl5QWpVAoTExP4+vri1KlTPJ2wsDC4u7tDLBbDxsYGQ4cOFbje+Pv7o3r16rh58yZ8fX0hlUrx888/cy4cCxcuxNKlS+Hs7AyxWIzo6GgAxe5oQUFBMDU1hUQiQd26dXHo0CFBPj98+IDRo0ejUqVKEIvFsLOz475W//HHH6hXrx4AoF+/fpyLyObNm3l5i46ORkBAAOfGMn/+fMF1cnNzMXXqVFSuXBlisRgVK1bE+PHjBV8vIyMj0ahRIxgbG0NfXx9Vq1blzdACwIoVK+Du7s7d27p162L79u0qP5tvkb1790JTUxMDBw7kZBKJBKGhobhy5Ypgprgk/v7+AlnHjh0BgFf3L168CAAICQnh6YaEhCAnJ4c32yQPqVQKCwuLMt3DgGLXbwA83VOnTuH+/fuYOnUqdHV1kZWVpdAV2tLSUuGX75LI2qSlpSVPbm1tDQBlprF9+3Y4OjrCx8eHkz18+BDHjx/HuHHjYGZmhpycHOTn5yu8vomJCcRiMSfT0tLibJeMc+fO4d27dxgyZAjv/KFDh+Ljx484evQoJ1u6dCmsrKwwcuRIEJFSl20HB4cy3Qtl+ZRIJIKv6tbW1nLvUX5+PkaOHImRI0fC2dm5zPQzMjKUurXLQ14dYQj5FNsgY/78+SgqKsLYsWPlHlenfqpqbxQh77kra8caGhrQ0dEBUDy7s2zZMnTs2BFeXl4oKCiQ666uiO3bt0MkEqF79+5yjxcUFChtbz4+PlxeZLi4uMDd3V1Q9sWLF8PLywsdO3ZEUVERPn78WGb+Pn78iLy8PIXHVU2zTp060NfX58nMzMzQuHFjQT7VsTelOXz4MDIyMtCjRw+eXFW7yCibvXv3wtLSkvfOa2FhgS5duuDgwYNKvZQA1ftSVW3A/fv38eHDB3Tt2pXX97Rr1w76+vo8F+K/066o0kep2++V5Nq1a3j69KmgbpdEWb+XkZEBQP13E2VpRkdHIzo6GgMHDoSW1v+doIcMGQIiwt69ezmZqvWmvPmUIe+90NzcHO7u7gLdsvqJsuzft8pXHYRfv34dly9fRkhICJYvX47BgwfjzJkz8Pf3l9s5Dh8+HHfv3sXUqVPxww8/4PDhw7z1aW/evEGLFi0QFxeHn376CStWrECPHj1w9epVTqdx48b4888/ub/fv3+PBw8eQENDgxtoAMWDDgsLC7i6ugIoXuPp7e2N06dPY9iwYVi2bBkqV66M0NBQLF26VJDXmTNn4ujRoxg7dixmz54t6HwVMX36dPTq1Qva2tqYMWMGpk+fjooVK/LWV02bNg1Dhw6FjY0NFi1ahM6dO2Pt2rVo0aKF4IX/3bt3aN26NTw8PLB06VIEBARwxzZt2oQVK1Zg4MCBWLRoEUxNTfHgwQN4e3sjJiYGP/30ExYtWgQ9PT106NABERER3LmZmZlo3LgxVqxYgRYtWmDZsmUYPHgwHj58iMTERLi6umLGjBkAil2Ftm7diq1bt8LX15dLIzU1Fa1atUKtWrWwaNEiVKtWDRMmTMDx48c5naKiIgQGBmLhwoVo3749VqxYgQ4dOmDJkiXo2rUrp/fgwQO0a9cOubm5mDFjBhYtWoTAwEDeepP169djxIgRcHNzw9KlSzF9+nR4eHggKipKpWfzrXL79m1UqVIFhoaGPLmXlxcAqL0mX7aOyNzcnJPl5uZCU1NTUI+lUimAYtet0qSnp+Pt27d4+PAhfv75Z9y/f5/ndiiDiPD27Vu8fv2aczfT1NTkdaynT58GAIjFYtStWxd6enqQSqUICQnB+/fv1SqfDFk6U6ZMwdmzZ/Hy5UucP38e48ePR7169QRulyW5ffs2YmJiBC/ksnxaWlqiadOm0NXVha6uLlq3bi34uOjv748HDx5gypQpePr0KZ49e4aZM2fixo0bGD9+PO9asvyWpE6dOtDQ0OCOA8CZM2dQr149LF++HBYWFtyazpUrV5brHsnymZ6ejkGDBiEmJgbx8fFYs2YN9u/fj4kTJwr0ly5ditTUVEyePLnMtAMCAmBoaAipVIrAwEA8efJErl5BQQHevn2LV69e4dSpU5g8eTIMDAy4Os6Qz6fahoSEBMydOxfz5s1T+EKlTv2Uhzx7I0OV5y6zE6Ghobhz5w5evHiBXbt2YfXq1RgxYgS3pCU6OhqvXr1CzZo1MXDgQOjp6UFPTw81a9bEuXPnlOYxPz8fu3fvho+PD/choCSPHz+Gnp4eDAwMYGVlhSlTpij8+FYSIkJycjKv7Onp6bh27Rrq1auHn3/+GUZGRtDX14eTkxN2794tN53p06dDX18fEokE9erVE3y0L0+apXn9+rXgGX2KvQkPD4eurq5gUkRVu8gom9u3b8PT01OwZMfLywtZWVlK3cvVvQ5Qtg2QDd7k2RJdXV3cvn0bRUVFaqWpCGV2RZU+St1+rySyGAeKBuFl9XvOzs6ws7PDokWLcPjwYSQmJuLatWsYPHgwHB0dBZMhqqSp6H7a2NjAzs6Odz9VrTflyaeq74WlUfY8y7J/3zSfe2pdHeS5lV65coUA0O+//87JZO4WzZo147mwjB49mjQ1NenDhw9ERBQREVGmW8aePXsIAEVHRxMR0aFDh0gsFlNgYCB17dqV06tZsyZ17NiR+zs0NJSsra0FLqYhISFkZGTEleXcuXMEgJycnMp0my3NkydPSENDgzp27ChwX5eV+82bN6Sjo0MtWrTg6axcuZIA0MaNGzmZn58fAaA1a9bw0pK5cBgaGtKbN294x5o2bUo1atTguYIVFRWRj48Pz7Xml19+UejaJsurMnd0Wd5KPufc3FyysrKizp07c7KtW7eShoYGXbx4kXf+mjVreK7AS5YsIQCUkpIiuJaM7777TiW3QVX5VlzP3N3dqUmTJgL5gwcP5D7/sggNDSVNTU16/PgxJ1u0aBEBEDyHn376iQBQu3btBOm0bNmSABAA0tHRoUGDBlF2drZALykpidMDQHZ2drRr1y6eTmBgIAEgMzMz6tGjB+3du5emTJlCWlpa5OPjw7MLJVHmQkdEdOTIEbK2tuZdv2XLlgJ3zNKMGTOGZ0dkjBgxgstnq1ataNeuXbRgwQLS19cnZ2dn3nKQzMxM6tKlC4lEIu7aUqmUDhw4ICiDpqam3HxYWFhQSEgIERG9f/+eu7a+vj4tWLCAdu3aRa1atSqzHihzRy8oKKBhw4aRtrY2l09NTU1avXq1QDcpKYkMDAxo7dq1RKTYVW7Xrl3Ut29f2rJlC0VERNDkyZNJKpWSubk5JSQkCNKV9QuyX9WqVeW6IH4NvhU7II9PtQ1BQUHk4+PD/Q057uiq1k9FyLM3MlR97jNnziRdXV2e7qRJk3g6+/fv59qHi4sLbdq0iTZt2kQuLi6ko6NDd+/eVZhHmbtsWFiY4Fj//v1p2rRptG/fPvr99985W9WlSxel5SYq7t9QapnPrVu3uHxaWlpSWFgYhYeHk5eXF4lEIjp+/DinGx8fTy1atKDVq1fToUOHaOnSpWRvb08aGhp05MiRcqUpjwsXLpBIJKIpU6Zwsk+xN+/evSMdHR2590hVu/gt8q3ZAj09Perfv79AfvToUQJAJ06cUDktZX2pqjYgJSWFRCIRhYaG8nQePnzIPWvZO/bfZVdU7aPU6fdKn2dpaUleXl6CY+r0e1FRUeTs7MyzaXXq1KGkpKRypblgwQICILd/rVevHnl7e3N/q1NvVM2nDFXfC0uSm5tLbm5u5OjoyHOlV9X+fWnUGTN/M2vC8/Ly6O3bt5SSkkLGxsY0atQo7piskezevZt3jqxTlXWesgHw1KlTKS8vT+51Xr9+zesgxo0bR40aNaIVK1aQjY0NERGlpqaShoYGt56hqKiIjI2NaeDAgZSSksL7yfL2559/8vIwffp0te+BrJHcvn1boc727dsJAB07downz83NJUNDQ94A1s/Pj8RisWAdmmwQ3q9fP5783bt3JBKJaObMmYJyTp8+nQBQYmIiERW/3NWqVUtpecoahOvr6wsGT4GBgVS7dm3e3+7u7oL8PH78mADQr7/+SkT/ryO//fabwvX3ffr0ISMjI7p27ZrSfKvKt9LhOjk5UevWrQXyZ8+eEQBasmSJymmFh4cTABo/fjxPnpSUREZGRuTi4kKnTp2i58+f09q1a8nQ0JAAUNOmTQVp3b59m06dOkUbNmwgX19f6tevn9zBbW5uLkVGRtLhw4dpxowZ5OHhIVh73qRJEwJArVq14snnzJlDACgyMlJuecoahEdFRVGbNm1o1qxZdODAAZo2bRpJpVIKCgpSeE5hYSHZ2try6qmM/v37EwByd3fn1cMdO3YI1qfl5+fT5MmTKTg4mHbs2EHbtm0jX19f0tfX56157d+/P+nq6srNS8WKFem7774jIqKEhASuc9u5cycvv25ubmRnZ6ewTMoG4UTFH7natWtHW7ZsoV27dlGHDh1IS0uLIiIieHq9e/emWrVqcWVXZ73axYsXSSQS0aBBgwTH0tLSKDIykg4cOEDjx48nT09POnz4cJlpfgm+FTsgj0+xDWfPniWRSMSzl/IG4arWT3kosjcyVH3uW7dupZYtW9K6deto37591L9/fxKJRLRixQpO5/fff+de/Eq+iMbHx5O2trZgXXZJunXrRtra2mXGeZAxYMAAApTHLIiJiSFDQ0Nq0KABFRQUcPILFy5w7fjq1aucPCMjg8zNzQXrskvz7t07srS0pKpVq36WNJOTk8nOzo6cnJx49vtT7M3atWsJAB08eFBwTFW7+C3yrdkCDQ0N+uGHHwTyM2fOEACB/VaGsr5UHRvQtWtX0tLSooULF9KzZ8/owoULVKtWLW6w++LFC7XTLI0yu6JOH6Vqv1eSkydPEgBatmyZQp2SKOr3Hj9+TJ07d6affvqJDhw4QAsXLiQzMzNq1KhRmYNWeWnKYnrIi9/TuHFj3vu8OvVG3Xyq+l5YEpk9PXr0qFI9Ivn270vzjxmEZ2Vl0ZQpU8jOzo731bP0AFHWSEp2HkT/H/D+8ccfRFQ8WO7cuTM3yxsYGEgbN27kzeoSEbm4uHAdrre3N02cOJHu3r1LAOjZs2d05MgRXqNMTk7m5U3eTzYjLMtTyRleVRk8eDBpaGgoDZAmG3Q8e/ZMcMzDw4Pq1q3L/e3n50dOTk4CPdkgfMaMGTy5LIiPst+tW7eIiEgikSh9aSEqexBerVo1gbxPnz5UqVIl7m9XV1el+RkxYgQRFdelhg0bEgAyNzenrl270q5du3gDoejoaLK1tSUAVLlyZRoyZAj38aQ8fCsd7ueaCb9w4QJJJBJq2bIl72ujjPPnz5O9vT137w0NDbngJ8o6RKLigba7uzvvI5EiZAHTSr5st23blgDQ/9q79+CoyjuM4w+5LQlCMAkQIgmSBIrcabSJGAgoKgiEe5EWEqwoKqi1FZXi1EHFS7VTbTGDFxQEBCnIRafesKixtY6AlToiyi0UwaCiCOUSkvz6R+as2ewm2QV9ofX7mckf7B7Ovrt79j3nOec9v3fBggUBy5aVlTV40quhA4dt27ZZQkKCLV++PODx+fPnhzzR5fGKjjz44IP1vl7d9lRWVlpMTExAvzZlypSAgwGzmpORHTt2DDiLHslVBkkWGxsbcFBvZv6TaGVlZSHX01AIv/feey01NTVoR9m/f39LS0vzbytekaa//vWv/mUiLcCWl5dnWVlZjS63ePFii4qKsn/+859hrff7dLr0A6GcaN9w/Phx69atmxUVFQU8HiqEn+gVq8b6m1BCfe9Lliyx+Ph4/8G7Z9KkSZaQkOAPzt4ouAEDBgStd8CAAdahQ4eQr3nw4EFLSEgIOdqnPt6Vvbvuuivk83v37rXMzExLT0+3Tz/9NOA5b78Zqj1XXHGFxcbGNvp5eSOUvM/kRNd56NAhO++88ywxMdH+9a9/BTx3Mv1Nv379LCkpKeTFknD7xdPRqeoLjh07Znv37g34q6ysPO2uhJuZff311/7RIt7fhAkTbNSoUSbJvvrqq4jXWVtD/Uok+6hw93t1FRUVWXR0tH322Wchnw+l7n7v66+/tjZt2gQdY7z++usmhR6R09g6v48r4SfbznCOC3/3u9812JeGUrf/c+1/pjDb9ddfr9mzZ+unP/2pli1bpldeeUWvvvqqkpOT/feF1BYdHR1yPWYmSWrSpImWL1+ut99+W9OmTfMXUsvJyQkoGJKfn6/S0lIdOXJEGzZsUN++fdWtWze1bNlSpaWlKi0t1RlnnKHevXtLkr8tEyZM0Kuvvhryz5uSyHO6FBBpqB11n/Pe580331zv+8zOzv7O2tbY9+m1qXv37vW2xyvaER8frzfffFNr167VxIkTtWnTJo0bN04XX3yxv1DFOeecoy1btmjp0qXKz8/XihUrlJ+frzvuuOM7e0+nQtu2bUNOMeU9VneqnFDef/99FRYWqlu3blq+fHlA4Q5Pv379tH37dr333nt666239OmnnyovL0+S1KlTpwbXHxcXp8LCQj333HM6cuRIg8v26dNHbdu2DZg71nsPdQuAtG7dWlJNfYFIzZ8/X0ePHtXQoUMDHi8sLJSkkPOaSzX3e0VFRWn8+PFBz9XXzujoaCUnJ/vbWVFRoXnz5mnIkCEB913FxsZq8ODBWr9+vb/ISNu2bVVVVRU0NVFFRYW+/PJL/2t6hRSTk5ODflsn8zmVlJTowgsvDCrWVFhYqD179vjvdb/lllvUt29fdejQQTt37tTOnTv9c7/u3bs3aO71UNLT08O6x9+7j/R/ZS7QU+VE+4ann35aW7Zs0ZQpU/zfpfc9Hzx4UDt37vTXbQl3+6wtnP4mlFDfe0lJiXr37u2fXsxTWFiow4cP++91rO+3KdX8Pur7baxatUqHDx9usMhSXenp6ZIUcls+cOCABg8erK+//lovvfRS0OfTWDuPHz/eaKG2uq9/IuusqKjQqFGjtGnTJq1evTpoCrUT7W927dql0tJSjR07VrGxsUGvGW6/iG/9/e9/V9u2bQP+/v3vf38nxwbhiKQPSExM1OrVq1VWVqY33nhDO3fu1MKFC7V37161atXKXwjt++hXItlHhbvfq+3IkSNauXKlBg4cGPK3Vp+6+70VK1aovLzcfyziKSgoUIsWLeo9NmlonV6xtPq2h9qfZ7jbzcm2s7Hjwvnz5+vWW2/VNddcE1aNGU9D/e/p5pTOE758+XIVFxfr97//vf+xo0ePnnTF27y8POXl5Wn27Nl65pln9POf/1xLly7V5MmTJdUUZ3vqqae0dOlSVVVVqU+fPoqKivKH882bN6tPnz7+HYtXcKSqqqrBYk0nKysrS9XV1frwww/rnVfbm3Nvy5YtyszM9D9eUVGhHTt2nFT7vPXFxsY2up6srCx98MEHDS4TTuXlxmRlZfnnEW1sfVFRUbrooov880/ec889mjlzptatW+d/P82aNdO4ceM0btw4/0HG7NmzNWPGjID5Kf+X9OrVS+vWrdM333wTUIDJKzjX2Bzt27Zt06BBg9S6dWv95S9/Cdrp1BYdHR2wPq8QWTjb3ZEjR2RmOnjwYKMnqY4ePaoDBw74/52Tk6PHH388aK7rPXv2SKr5jUaqvLxcZhZUTdQrqFRZWRn0f44dO6YVK1aof//+IQ8CcnJyJCmonRUVFfriiy/87fzyyy9VWVkZspLp8ePHVV1d7X/O+7zXr18fMBfm+vXrVV1d7X8+KipKvXr10rvvvquKioqAInon+znV107p289p165dKisrC5qrVKo5cElMTGy0b9++fXtYbTx27Jiqq6sDthEEO9G+YdeuXTp+/HjQyWWpJqA//fTTWrlypUaMGBH29umJpL+pK9T3Xl5erjPPPDNo2brbZ/fu3RUbGxv025Rqfh/1bXeLFy/WGWecEXSg2ZDt27dLCv69HT16VMOGDdPHH3+stWvXqkuXLkH/Ny0tTampqfW2s2nTpmrevHlErx/pOqurq1VUVKTXXntNy5YtU0FBQdD/O9H+ZsmSJTKzkCc1IukX8a2ePXvq1VdfDXgsNTVVvXr1UmlpqaqrqwNOarzzzjtKSEho9OR5uCLtAyQpIyNDGRkZkmpmO9iwYYNGjx59wusMp1+JZB8V7n6vtjVr1oSs+N+Yuvu98vJySQp6fe94JdRrN7bO2p9n7cKWe/bs0e7duwNm0Ah3u/ku2lnfceHq1as1efJkjRo1So888kij66mtvv73tPRdX1qPRFJSkk2aNCngMW/oQe2hkfUNF/GGfnuFWvbv3x90j7E37K72XMCffPKJSTVFXnr16uV//P7777ezzz7b4uLigoY+TJo0yeLi4oKGZJlZQHEzr02159cLVySF2QYNGhTwXktKSkwKLswWqhBZQ3Pr9e/f35KSkmzPnj0Nvs9wCrNt3ry53vsO62tbcXGxtW/f3v9vb2iwV0SjtsOHD9uhQ4fMzILmcjT7duiMV6Ah1P1806dPt6ioKPvmm2+CnmvM6TIM9R//+EfQ93n06FHLzs623Nxc/2NlZWUBc0GafTssMi0tzXbs2BHR6+7bt88yMjKsR48eAdtrqHuOvvrqK0tPT7f09HT/Y4cOHQo5b703T3jtIkB79+41n89n+fn5Aa81Y8YMk1Tvff4NDaHz5hyue7vEQw89FHSfo8erQ1H3nnXP0aNHrXXr1paZmRlwP5R3D6RX16KystJatmxpnTp1Crj95ODBg9auXbuAWzUOHz5sSUlJQcNhJ0yYYAkJCQHbvleg8LHHHvM/duTIEcvMzLQuXbqEbLNZw8PRu3XrZklJSQG/n8rKSsvJybHmzZv7h5S+/PLLtnLlyoC/66+/3j90v3ahlLoFIc2+/b16t5iY1Ww3oYaset9dfd+DS6dLPxDKifYNmzdvDvouvcKnl112ma1cudK/j4hk+wy3v4nkex86dKjFxcXZli1bApYdMWKERUVFBQz3Hj58uEVHRwe81w8//NCio6PtuuuuC3q9ffv2WUxMjE2cODFkOw8cOBB0u1t1dbWNGzcuaJuorKy0wsJCi4mJafTexhtvvNEk2SuvvOJ/7PPPP7cWLVrYZZddFtC+unbv3m1nnnlm0Jza4a7TzOy6666rd79b24n0Nz169LCMjIyQxTQj6RdPR6dbX7B06dKg41Gv7lLtQsRmZlu3brWtW7fWu67G5gkPtw8IxbsVs/Z+/PvoVyLZR4W736utsLDQEhIS6r3HOdz9nncMdMcddwQsu2rVKpNk9913X8TrNDPr3Lmz9ezZM+D2kdtvv92aNGkSUGQ23O0mknaGe1xoVnPrY9OmTW3AgAFB/WttkfR/LkWSmU/plfChQ4dq4cKFSkxMVJcuXfT2229r7dq1Sk5OPqH1LViwQCUlJRo5cqSysrJ08OBBPf7442rRokXAmbTs7GylpqZqy5Ytuv766/2P9+vXT7feequkmqvltd13331at26dcnNzddVVV6lLly7av3+/Nm7cqLVr134nwx6ys7M1c+ZM3XXXXerbt69GjRoln8+nd999V2lpabr33nvVqlUrzZgxQ7NmzdKgQYNUWFioLVu2qKSkROedd54mTJhwUm145JFHlJ+fr+7du+uqq65SZmamysvL9fbbb2v37t16//33JUnTp0/X8uXLNXbsWP+Q//3792vNmjWaO3euevbsqaysLLVs2VJz585V8+bN1axZM+Xm5oY8C1mfiRMnatmyZbrmmmu0bt06XXDBBaqqqtJHH32kZcuW6eWXX9a5556rO++8U2+++aaGDBmi9u3ba9++fSopKVG7du2Un58vSbrkkkuUmpqqCy64QG3atNHmzZs1Z84cDRkypNErC6ez3NxcjR07VjNmzNC+ffuUnZ2tBQsWaOfOnZo3b55/uaKiIr3xxhsBw/0HDRqk7du365ZbbtFbb70VMH1fmzZtdPHFF/v/XVBQoPPPP1/Z2dn67LPP9Nhjj+nQoUN64YUXAs6WDh48WO3atVNubq5at26tXbt26amnntKePXv07LPP+pf75JNPNHDgQI0bN06dO3dWVFSU1q9fr0WLFunss8/WjTfe6F82NTVVM2fO1G9/+1sNGjRII0aM0Pvvv6/HH39c48eP989JL0llZWVauHChpJqzvpJ09913S6oZSTJx4kRJ0qRJk/Tggw9qypQpeu+999S1a1dt3LhRTzzxhLp27eqfl7K2xYsXy+fzBZyxr83n8+mBBx5QcXGx+vXrp4kTJ2rXrl16+OGH/b9pqWZEwc0336zbb79deXl5KioqUlVVlebNm6fdu3dr0aJF/nXGx8frrrvu0tSpUzV27FhdeumlKi0t1aJFizR79mwlJSX5l50yZYqeeOIJTZ06VR9//LEyMjK0cOFClZWV6fnnnw9o6/PPP+//PR8/flybNm3yf06FhYXq0aOHJOm2227ThAkTlJubq6uvvlrx8fFasmSJNmzYoLvvvts/pPSSSy4J+jy8qwoFBQUBU6P06dNHvXv31rnnnqvExERt3LhRTz75pNLT0/Wb3/zGv9zrr7+uG264QWPGjFHHjh1VUVGh0tJSPffcczr33HNPur/7f3eifUPnzp3VuXPnkOvs0KGDRowY4f93JNtnuP1NJN/79OnT9eKLL6pv376aNm2akpOT9cILL+jFF1/U5MmTA0as3HPPPXrttdd04YUX6oYbbpAk/fGPf1RSUlLAdud59tlnVVlZWe+VrY0bN2r8+PEaP368srOz/cNR//a3v+nqq6/Wj3/8Y/+yv/71r7VmzRoNGzZM+/fvD/iNSwp4TzNmzNCyZcs0evRo/epXv1JiYqLmzp2r48eP65577vEvd8stt2jbtm266KKLlJaWpp07d+rRRx/Vf/7zHz388MMB6w93nQ899JBKSkp0/vnnKyEhIaidI0eO9E/7Fkl/I9XME71p0ybddtttIUe2RdIvonFjxoxRXl6errjiCn344YdKSUlRSUmJqqqqNGvWrIBlvWmiag+zDndfGkkfcN999+mDDz5Qbm6uYmJitGrVKr3yyiu6++67A/bj30e/Esk+Ktz9nmf//v168cUXNXr06HpH94S73xs2bJi6du2qO++8U2VlZcrLy9PWrVs1Z84ctW3bVldeeWXE65SkBx54QIWFhbrkkkt0+eWX64MPPtCcOXM0efJk/3TMUvjbTSTtDPe4sKysTIWFhWrSpInGjBmjP//5zwHvoUePHv5jk0j6v9PWd53qI/HVV1/ZFVdcYSkpKXbGGWfYpZdeah999JG1b9/+hK6Eb9y40caPH28ZGRnm8/msdevWNnToUFu/fn3Qa48dO9YkBUyFVFFRYQkJCRYXFxeyql95eblNnTrV0tPTLTY21lJTU+2iiy4KOAt8MlfCPU8++aT17t3bfD6fnXnmmVZQUBBU/XnOnDnWuXNni42NtTZt2ti1117rL2jhOZEr4WY1BauKioosNTXVYmNj7ayzzrKhQ4cGFbD68ssvbdq0aXbWWWdZXFyctWvXzoqLiwPOHK5evdq6dOliMTExAVcdw70Sblbzvdx///3WtWtX/2eSk5Njs2bNsgMHDphZTdXG4cOHW1pamsXFxVlaWpqNHz8+YHqKRx991Pr162fJycnm8/ksKyvLpk+f7l9HpE6ns95Hjhyxm2++2VJTU83n89l5550XVHTFmxauNjVQ9K6goCBg2ZtuuskyMzPN5/NZq1at7Gc/+1nIAoFz5syx/Px8S0lJsZiYGGvVqpUNGzbM3nzzzYDlPv/8c7v66qutc+fO1qxZM4uLi7OOHTvaL3/5y5BTzVVXV9uf/vQn69Spk8XGxlp6errdfvvtQWekvd9gOO9p9+7d9otf/MI6dOhgcXFx1rZtW7vqqqtCvv6BAwesadOmNmrUqKDn6lqyZIn17NnTfD6ftWnTxqZNmxZytIU3TVDLli0tPj7ecnNzg35nnscee8x+9KMfWVxcnGVlZdkf/vCHkFeTysvLrbi42JKSkszn81lubm7IAjzFxcX1fk51Rwe89NJLVlBQYCkpKRYXF2fdu3cPq+BffX33zJkzrVevXpaYmGixsbGWkZFh1157bVAxm61bt1pRUZFlZmZafHy8NW3a1Lp27Wp33HGHfxTMqXY69QOhnGjfEIpCFGbzhLN9htvfRPq9v/POOzZ48GD/PqtTp042e/bskMWTNmzYYAMHDrRmzZpZ8+bNbfjw4SGnRzOrKW7UunXroMJjnu3bt9vYsWPt7LPPtqZNm1pCQoLl5OTY3Llzg9679xnX91fXtm3bbOTIkdaiRQuLj4+3Cy+8MGjEzzPPPGP9+vWzVq1aWUxMjKWkpNjIkSPr3RbDWWdD/YKkoCuN4fY3Zt8WTNq0aVPI5z2R9Iunk9OxL9i/f79deeWVlpycbAkJCVZQUBCyUGb79u2Djr0i2ZeahdcHvPDCC/aTn/zEmjdvbgkJCZaXlxc081Gk64zkOKauhoqHRrLf86bNXbNmTb2vFe5+z6zme7vpppusU6dO5vP5LCUlxS6//HLbvn37Ca/TrGYq5169epnP57N27dqFPIbyXj+c7SbcdoZ7XNjQNqc6V90j7f9ciSQzNzGrdVmsHhs3blROTo42bNgQcGYX+CFbvHixJkyYwO8C+AGjHwAg0RcAiCwzn9Lq6AAAAAAA/JCc0nvCf0g+++yzBp+Pj49XYmKio9YAAAAAAE4FQrgj3hx99SkuLtb8+fPdNAYAAAAAcEoQwh2pO4djXaHmHAYAAAAA/H8hhDsycODAU90EAAAAAMApRmE2AAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOBJRYbbNmzd/X+0A/ufs2LFDEr8L4IeMfgCARF8AILLffxMzs8YW2rVrl8455xwdPnz4pBoG/L+Jjo5WVVXVqW4GgFOIfgCARF8AQEpISNDmzZuVkZHR4HJhhXCpJoh/8cUX30njgP8Xx44dk8/nO9XNAHAK0Q8AkOgLAEgpKSmNBnApghAOAAAAAABODoXZAAAAAABwhBAOAAAAAIAjhHAAAAAAABwhhAMAAAAA4AghHAAAAAAARwjhAAAAAAA4QggHAAAAAMARQjgAAAAAAI4QwgEAAAAAcIQQDgAAAACAI4RwAAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOEIIBwAAAADAEUI4AAAAAACOEMIBAAAAAHCEEA4AAAAAgCOEcAAAAAAAHCGEAwAAAADgCCEcAAAAAABHCOEAAAAAADhCCAcAAAAAwBFCOAAAAAAAjhDCAQAAAABwhBAOAAAAAIAjhHAAAAAAABwhhAMAAAAA4AghHAAAAAAARwjhAAAAAAA4QggHAAAAAMARQjgAAAAAAI4QwgEAAAAAcIQQDgAAAACAI4RwAAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOEIIBwAAAADAEUI4AAAAAACOEMIBAAAAAHCEEA4AAAAAgCOEcAAAAAAAHCGEAwAAAADgCCEcAAAAAABHCOEAAAAAADhCCAcAAAAAwBFCOAAAAAAAjhDCAQAAAABwhBAOAAAAAIAjhHAAAAAAABwhhAMAAAAA4AghHAAAAAAARwjhAAAAAAA4QggHAAAAAMARQjgAAAAAAI4QwgEAAAAAcIQQDgAAAACAI4RwAAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOEIIBwAAAADAEUI4AAAAAACOEMIBAAAAAHCEEA4AAAAAgCOEcAAAAAAAHCGEAwAAAADgCCEcAAAAAABHCOEAAAAAADhCCAcAAAAAwBFCOAAAAAAAjhDCAQAAAABwhBAOAAAAAIAjhHAAAAAAABwhhAMAAAAA4AghHAAAAAAARwjhAAAAAAA4QggHAAAAAMARQjgAAAAAAI4QwgEAAAAAcIQQDgAAAACAI4RwAAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOEIIBwAAAADAEUI4AAAAAACOEMIBAAAAAHCEEA4AAAAAgCOEcAAAAAAAHCGEAwAAAADgCCEcAAAAAABHCOEAAAAAADhCCAcAAAAAwBFCOAAAAAAAjhDCAQAAAABwhBAOAAAAAIAjhHAAAAAAABwhhAMAAAAA4AghHAAAAAAARwjhAAAAAAA4QggHAAAAAMARQjgAAAAAAI4QwgEAAAAAcIQQDgAAAACAI4RwAAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOEIIBwAAAADAEUI4AAAAAACOEMIBAAAAAHCEEA4AAAAAgCOEcAAAAAAAHCGEAwAAAADgCCEcAAAAAABHCOEAAAAAADhCCAcAAAAAwBFCOAAAAAAAjhDCAQAAAABwhBAOAAAAAIAjhHAAAAAAABwhhAMAAAAA4AghHAAAAAAARwjhAAAAAAA4QggHAAAAAMARQjgAAAAAAI4QwgEAAAAAcIQQDgAAAACAI4RwAAAAAAAcIYQDAAAAAOAIIRwAAAAAAEcI4QAAAAAAOEIIBwAAAADAEUI4AAAAAACOEMIBAAAAAHCEEA4AAAAAgCP/BUpPhDOrQAI6AAAAAElFTkSuQmCC\n"},"metadata":{}}]},{"cell_type":"code","source":[],"metadata":{"id":"XJHeWqJUmCVK"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"8x17BNPnk4KC","executionInfo":{"status":"ok","timestamp":1727190536291,"user_tz":-60,"elapsed":23525,"user":{"displayName":"Jeevanandham Poongavanam","userId":"13255850175966981554"}},"outputId":"a6b71675-d36f-47d7-87b3-8f68ed84b5ec"},"execution_count":72,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"A100","machine_shape":"hm","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.9"},"widgets":{"application/vnd.jupyter.widget-state+json":{"c44511b1233d47c9aa3f294ffe046cd6":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_48f3a7c91d4e4f5e8b4579bc6ddc8075","IPY_MODEL_8087926c570547bf8c4fb5ccebd014ca","IPY_MODEL_349178bff9c14ac2be638b8eaeaf14be"],"layout":"IPY_MODEL_e28fe04f82e5459a868158c825914c33"}},"48f3a7c91d4e4f5e8b4579bc6ddc8075":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e7d41e2b26d742ed9c8e516c574b7abe","placeholder":"ā€‹","style":"IPY_MODEL_99700d061e5e4c219d815e9a0b7d76b7","value":"embeddingā€‡nodes:ā€‡100%"}},"8087926c570547bf8c4fb5ccebd014ca":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"","description":"","description_tooltip":null,"layout":"IPY_MODEL_498c3c835eb44cfaaa0e58bd5ca9fc97","max":284,"min":0,"orientation":"horizontal","style":"IPY_MODEL_ac003e361a52420e9b368ab3a315d50d","value":284}},"349178bff9c14ac2be638b8eaeaf14be":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_aaa553c5eb044ab9aa26de9f0dd713ef","placeholder":"ā€‹","style":"IPY_MODEL_bc5d9989c9d4441aa62962c35fbe1bd4","value":"ā€‡284/284ā€‡[00:23<00:00,ā€‡11.61it/s]"}},"e28fe04f82e5459a868158c825914c33":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":"hidden","width":null}},"e7d41e2b26d742ed9c8e516c574b7abe":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"99700d061e5e4c219d815e9a0b7d76b7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"498c3c835eb44cfaaa0e58bd5ca9fc97":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ac003e361a52420e9b368ab3a315d50d":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"aaa553c5eb044ab9aa26de9f0dd713ef":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bc5d9989c9d4441aa62962c35fbe1bd4":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"e39750bad741402b95dba6e1d67f3302":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f778da18131d471f9db08e0904ae4ecf","IPY_MODEL_39dd92ce29084a048959c0d60220339e","IPY_MODEL_d1364508a15246059d6a1982eb391081"],"layout":"IPY_MODEL_e0dc95cc892b4e36941c877e95d11f9e"}},"f778da18131d471f9db08e0904ae4ecf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1f97c93c48434e69832f16c7a514f8c4","placeholder":"ā€‹","style":"IPY_MODEL_a4c574a957fb48d2856d77ad8adb65c9","value":"Generating:ā€‡100%"}},"39dd92ce29084a048959c0d60220339e":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ae61b02afc5e4d118dcacd3367b3eb41","max":50,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2a38dc3e468046ed80a9037592610baa","value":50}},"d1364508a15246059d6a1982eb391081":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b921d7b9dbc94524bb68a23bdda5d780","placeholder":"ā€‹","style":"IPY_MODEL_274905cf106546078f459319650f2af7","value":"ā€‡50/50ā€‡[01:55<00:00,ā€‡ā€‡7.21s/it]"}},"e0dc95cc892b4e36941c877e95d11f9e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1f97c93c48434e69832f16c7a514f8c4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a4c574a957fb48d2856d77ad8adb65c9":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ae61b02afc5e4d118dcacd3367b3eb41":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2a38dc3e468046ed80a9037592610baa":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b921d7b9dbc94524bb68a23bdda5d780":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"274905cf106546078f459319650f2af7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"acc378e22b9c41c7aac97fbdafa6c5f8":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_91f5aad3c506446098481ed123583952","IPY_MODEL_f5f7ca3ae4334ba6845aaebb6e6ef4f2","IPY_MODEL_10f8cbddf1e2466cbb4e394634e0b0d2"],"layout":"IPY_MODEL_426bd8f8107445ba8e1b086a8570100e"}},"91f5aad3c506446098481ed123583952":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_40d14c028ab246e8b399ca86b6642433","placeholder":"ā€‹","style":"IPY_MODEL_af22d37b9ce94ff2a187ad8c0077b5e4","value":"modules.json:ā€‡100%"}},"f5f7ca3ae4334ba6845aaebb6e6ef4f2":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_bb2e260555b7452baa929dc2ec09d1f2","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_1d7675c29cd542af8ec5725d7eba916c","value":349}},"10f8cbddf1e2466cbb4e394634e0b0d2":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ec15e5265a5d475c86ca0fc9a518351c","placeholder":"ā€‹","style":"IPY_MODEL_18239c9c3c7e4e97a3d4f2f8a8441fe8","value":"ā€‡349/349ā€‡[00:00<00:00,ā€‡24.6kB/s]"}},"426bd8f8107445ba8e1b086a8570100e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"40d14c028ab246e8b399ca86b6642433":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"af22d37b9ce94ff2a187ad8c0077b5e4":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bb2e260555b7452baa929dc2ec09d1f2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1d7675c29cd542af8ec5725d7eba916c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ec15e5265a5d475c86ca0fc9a518351c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"18239c9c3c7e4e97a3d4f2f8a8441fe8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"622615a518eb452ba8fdb6758f432b35":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5ad78bc865d94857bf24b083eac5e46c","IPY_MODEL_a01452341af741e5ba9ac7b55d21a78c","IPY_MODEL_068fbf6e595d4332961d1f910054b215"],"layout":"IPY_MODEL_5da5381f972e47c495e283869c0e4bc3"}},"5ad78bc865d94857bf24b083eac5e46c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_677f839fc04a448bb76cf3abf52ea84b","placeholder":"ā€‹","style":"IPY_MODEL_e9ee57bd83c84a828f8c881f1f3ac8c3","value":"config_sentence_transformers.json:ā€‡100%"}},"a01452341af741e5ba9ac7b55d21a78c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_4b7f36d001db46c0bbf565002234ab18","max":252,"min":0,"orientation":"horizontal","style":"IPY_MODEL_3fb6a4d04fcd4c399b0790bc4eb5712c","value":252}},"068fbf6e595d4332961d1f910054b215":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0a7dcf6447af4aa783db37b48c84b688","placeholder":"ā€‹","style":"IPY_MODEL_f15e6c12ed1a4478a8f1f97970671c65","value":"ā€‡252/252ā€‡[00:00<00:00,ā€‡23.3kB/s]"}},"5da5381f972e47c495e283869c0e4bc3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"677f839fc04a448bb76cf3abf52ea84b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e9ee57bd83c84a828f8c881f1f3ac8c3":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4b7f36d001db46c0bbf565002234ab18":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3fb6a4d04fcd4c399b0790bc4eb5712c":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0a7dcf6447af4aa783db37b48c84b688":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f15e6c12ed1a4478a8f1f97970671c65":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"cf284052a9b94efda4812274812ee059":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_8473636ca7d141209699dced51d1c981","IPY_MODEL_76fa9b2bccce477cb698af80e4119aa8","IPY_MODEL_91a2f1a67ab84dbbabee2ce0960d3fc6"],"layout":"IPY_MODEL_f26618a6c2ea470b91e9cfd1fec0c01a"}},"8473636ca7d141209699dced51d1c981":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b7fe3cc89b2745b6a0dcee1b0cb23fa7","placeholder":"ā€‹","style":"IPY_MODEL_5ed8c96b094e4aa58397d5e030369361","value":"README.md:ā€‡100%"}},"76fa9b2bccce477cb698af80e4119aa8":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ac643757d51b4611b46e4536ca900589","max":84541,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d65b5b5430e64f08839842e22b10b4bb","value":84541}},"91a2f1a67ab84dbbabee2ce0960d3fc6":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a0ecc063893d4505b33e57d524be5af3","placeholder":"ā€‹","style":"IPY_MODEL_6c67aeca324f4e508258a90625987463","value":"ā€‡84.5k/84.5kā€‡[00:00<00:00,ā€‡343kB/s]"}},"f26618a6c2ea470b91e9cfd1fec0c01a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b7fe3cc89b2745b6a0dcee1b0cb23fa7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5ed8c96b094e4aa58397d5e030369361":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ac643757d51b4611b46e4536ca900589":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d65b5b5430e64f08839842e22b10b4bb":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a0ecc063893d4505b33e57d524be5af3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6c67aeca324f4e508258a90625987463":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f2217e3c09a146a6a62240003634ea53":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_af3271f8fc124662a7bd606b837d10fe","IPY_MODEL_8683275138a24b5898928139657d7637","IPY_MODEL_2d927689334a4e4eaa80a493ae21ceaa"],"layout":"IPY_MODEL_d573d26f7daa4ef78a5ee5305745a751"}},"af3271f8fc124662a7bd606b837d10fe":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_16d2867d3c494b45b0fcf21f9908faee","placeholder":"ā€‹","style":"IPY_MODEL_77f9bc5aa4aa47528fe74cee979f5924","value":"sentence_bert_config.json:ā€‡100%"}},"8683275138a24b5898928139657d7637":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_60feadb9a1df4f719b8996d4d1fc5d84","max":107,"min":0,"orientation":"horizontal","style":"IPY_MODEL_09615354f8c04e8c99af3e74a107b0c5","value":107}},"2d927689334a4e4eaa80a493ae21ceaa":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8da9ce52896a403d96ced1d8b68f62b9","placeholder":"ā€‹","style":"IPY_MODEL_aae33dec48324052ad5c2dfbd819eb59","value":"ā€‡107/107ā€‡[00:00<00:00,ā€‡9.16kB/s]"}},"d573d26f7daa4ef78a5ee5305745a751":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"16d2867d3c494b45b0fcf21f9908faee":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"77f9bc5aa4aa47528fe74cee979f5924":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"60feadb9a1df4f719b8996d4d1fc5d84":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"09615354f8c04e8c99af3e74a107b0c5":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8da9ce52896a403d96ced1d8b68f62b9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"aae33dec48324052ad5c2dfbd819eb59":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d282c878a3744bf1af2108bf156fa39d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_e36e1181ba1f40f792c3784e361ee0b2","IPY_MODEL_1255b07dc7824afcae083741dbfe518a","IPY_MODEL_462a2e70d3ec4b1187c55fbeb343d63b"],"layout":"IPY_MODEL_01ba6d708ebd4ad2bfd914b4d41d36cd"}},"e36e1181ba1f40f792c3784e361ee0b2":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_58c9073769ed406b881f143bc39ab9f9","placeholder":"ā€‹","style":"IPY_MODEL_3e5e6086fe324208a9fc1ad99b66b873","value":"config.json:ā€‡100%"}},"1255b07dc7824afcae083741dbfe518a":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_dbd49d199d444879801506c4b36ad8b7","max":704,"min":0,"orientation":"horizontal","style":"IPY_MODEL_d70432c444d141f8a59481a9e689bae8","value":704}},"462a2e70d3ec4b1187c55fbeb343d63b":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8f1157e0f6bb47d4afda1d6a2ecb7260","placeholder":"ā€‹","style":"IPY_MODEL_f577c00780664959beba245bd0cb5be5","value":"ā€‡704/704ā€‡[00:00<00:00,ā€‡61.4kB/s]"}},"01ba6d708ebd4ad2bfd914b4d41d36cd":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"58c9073769ed406b881f143bc39ab9f9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3e5e6086fe324208a9fc1ad99b66b873":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dbd49d199d444879801506c4b36ad8b7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d70432c444d141f8a59481a9e689bae8":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8f1157e0f6bb47d4afda1d6a2ecb7260":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f577c00780664959beba245bd0cb5be5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"23907f6347ff467fa190e55d67c7be86":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_65965fe3dd844bccbfe7a3ca80b7671c","IPY_MODEL_eba60bb33c144bf5adcbbe837670b2a2","IPY_MODEL_9304f1069160432d8db0423e3ac53e47"],"layout":"IPY_MODEL_02b7a7cdd9c24d78bd14e30aa46180ba"}},"65965fe3dd844bccbfe7a3ca80b7671c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5fc01868d62140ff81137ab03b62babd","placeholder":"ā€‹","style":"IPY_MODEL_eebeadafb2b14b3b866a9de4b7bf8787","value":"model.safetensors:ā€‡100%"}},"eba60bb33c144bf5adcbbe837670b2a2":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_1c9b2a3a1ff6489f8ba430d3b55980ab","max":1336413848,"min":0,"orientation":"horizontal","style":"IPY_MODEL_80c650923aa04781ae230ca1cf2c734f","value":1336413848}},"9304f1069160432d8db0423e3ac53e47":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b035a788ccda4cccb4876a94d69c7196","placeholder":"ā€‹","style":"IPY_MODEL_68c3d2518f7a478b99055d25a5e590d6","value":"ā€‡1.34G/1.34Gā€‡[00:02<00:00,ā€‡491MB/s]"}},"02b7a7cdd9c24d78bd14e30aa46180ba":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5fc01868d62140ff81137ab03b62babd":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"eebeadafb2b14b3b866a9de4b7bf8787":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"1c9b2a3a1ff6489f8ba430d3b55980ab":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"80c650923aa04781ae230ca1cf2c734f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"b035a788ccda4cccb4876a94d69c7196":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"68c3d2518f7a478b99055d25a5e590d6":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"138fc40abc4f4d56a71124d6b0edbc66":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_cb4523a6c5354a37b73e2378265ba421","IPY_MODEL_d61629e8381e46648604ebf3617cc738","IPY_MODEL_1541ba898c504ea9a31803715657757d"],"layout":"IPY_MODEL_7fac3018a8ea46aabbeaa7638cf0955b"}},"cb4523a6c5354a37b73e2378265ba421":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_072dcba6721048c0b5f9b3118a2b60bc","placeholder":"ā€‹","style":"IPY_MODEL_cd339b2be7134142bcb2e171f1b674fa","value":"tokenizer_config.json:ā€‡100%"}},"d61629e8381e46648604ebf3617cc738":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_54beb492d35547c8b982846c30b83ccc","max":1381,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4d24e296a067445c81f1d8822090d94e","value":1381}},"1541ba898c504ea9a31803715657757d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_dfa0976ca21c441faa95c120df38a372","placeholder":"ā€‹","style":"IPY_MODEL_9e42f65225e64de08a5814f3e6e0c08a","value":"ā€‡1.38k/1.38kā€‡[00:00<00:00,ā€‡126kB/s]"}},"7fac3018a8ea46aabbeaa7638cf0955b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"072dcba6721048c0b5f9b3118a2b60bc":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cd339b2be7134142bcb2e171f1b674fa":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"54beb492d35547c8b982846c30b83ccc":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d24e296a067445c81f1d8822090d94e":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"dfa0976ca21c441faa95c120df38a372":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9e42f65225e64de08a5814f3e6e0c08a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a8a539cd3d064bf0b872bd661bd04ea7":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_cfe981a2a1a54ded9b678e85fe46ac18","IPY_MODEL_9ce6f4aacdfe451f9b41ff79b6ca99f8","IPY_MODEL_4a846afcfdd84c9db79d7e974376bcf8"],"layout":"IPY_MODEL_5711b30ecf6d4b4e9e1f4bcf758ad599"}},"cfe981a2a1a54ded9b678e85fe46ac18":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_34a02f0aecd9493b9d84d5b6856d68a3","placeholder":"ā€‹","style":"IPY_MODEL_099db16d5ba44a79962227a71f39615a","value":"vocab.txt:ā€‡100%"}},"9ce6f4aacdfe451f9b41ff79b6ca99f8":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3a69155072db491a804f1ae492faf2fb","max":231508,"min":0,"orientation":"horizontal","style":"IPY_MODEL_7770223b77ae4877a3a731fe0c5839c3","value":231508}},"4a846afcfdd84c9db79d7e974376bcf8":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ef60881d1aa14b8fb568d02684501344","placeholder":"ā€‹","style":"IPY_MODEL_4bd41ff686884594b2957a1d9f967a04","value":"ā€‡232k/232kā€‡[00:00<00:00,ā€‡472kB/s]"}},"5711b30ecf6d4b4e9e1f4bcf758ad599":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"34a02f0aecd9493b9d84d5b6856d68a3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"099db16d5ba44a79962227a71f39615a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3a69155072db491a804f1ae492faf2fb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7770223b77ae4877a3a731fe0c5839c3":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ef60881d1aa14b8fb568d02684501344":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4bd41ff686884594b2957a1d9f967a04":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4226ecb991fe4142b7e66e29dd72302b":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_d3129da0803f407aaffee2458c6c3986","IPY_MODEL_23b5503757d34aa2a65f7c7ceb6f45d2","IPY_MODEL_bc0c58360bab4c389a582181555a374d"],"layout":"IPY_MODEL_2185240d63f34b928002fd78e4efea64"}},"d3129da0803f407aaffee2458c6c3986":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_40505f46820948efb2f8bde65dc068fc","placeholder":"ā€‹","style":"IPY_MODEL_13c244d566494863a6a0854f8ffe0616","value":"tokenizer.json:ā€‡100%"}},"23b5503757d34aa2a65f7c7ceb6f45d2":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_a26de5f36a884773902dff24b0675ce3","max":711649,"min":0,"orientation":"horizontal","style":"IPY_MODEL_695bd216987c404b80605d4cfad86136","value":711649}},"bc0c58360bab4c389a582181555a374d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_186bebdb4f504be7a07f6cbfb1eb50ad","placeholder":"ā€‹","style":"IPY_MODEL_34087c9db2534ff1b0ca1e20dd9bcb8e","value":"ā€‡712k/712kā€‡[00:00<00:00,ā€‡960kB/s]"}},"2185240d63f34b928002fd78e4efea64":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"40505f46820948efb2f8bde65dc068fc":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"13c244d566494863a6a0854f8ffe0616":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a26de5f36a884773902dff24b0675ce3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"695bd216987c404b80605d4cfad86136":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"186bebdb4f504be7a07f6cbfb1eb50ad":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"34087c9db2534ff1b0ca1e20dd9bcb8e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ffe5a0b43f3840a6b20348d35b60dd56":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_1e74c5f0193a4e488c0e168299c7b566","IPY_MODEL_374aa0f623334dac8267aa4a89e54dac","IPY_MODEL_0b9cb2ad73e3455cacec0bf089316328"],"layout":"IPY_MODEL_805ce4d268aa4c82acc08c2c6c33addf"}},"1e74c5f0193a4e488c0e168299c7b566":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_71a8bcf1281b4936b575e40d725a3a39","placeholder":"ā€‹","style":"IPY_MODEL_0046ba366a0d4850927a74a5ec1f813e","value":"special_tokens_map.json:ā€‡100%"}},"374aa0f623334dac8267aa4a89e54dac":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_33769cc3cc024647a3b5c6f7c9a3ed2a","max":695,"min":0,"orientation":"horizontal","style":"IPY_MODEL_007221a90c0440f385117f5a76f21fc6","value":695}},"0b9cb2ad73e3455cacec0bf089316328":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_28988e76f4024aaaa680944513eea9d1","placeholder":"ā€‹","style":"IPY_MODEL_dc15c9f9a8bd4630820285f22b33c483","value":"ā€‡695/695ā€‡[00:00<00:00,ā€‡59.2kB/s]"}},"805ce4d268aa4c82acc08c2c6c33addf":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"71a8bcf1281b4936b575e40d725a3a39":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0046ba366a0d4850927a74a5ec1f813e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"33769cc3cc024647a3b5c6f7c9a3ed2a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"007221a90c0440f385117f5a76f21fc6":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"28988e76f4024aaaa680944513eea9d1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dc15c9f9a8bd4630820285f22b33c483":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a737433e6b4a4e35bf9eb70cd1052124":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_9953739863c94a71a14aaff9213b57ed","IPY_MODEL_2c50ded119dc4707adfee868d167084c","IPY_MODEL_67a4a60f751646b6ab4b6247ded3f9e3"],"layout":"IPY_MODEL_1d5756bf10ed4dffba350b3685662cf9"}},"9953739863c94a71a14aaff9213b57ed":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5fdb555a5e804b73b57cd284c0c58a8b","placeholder":"ā€‹","style":"IPY_MODEL_baaf29de2d51467c9026e5b063dcb491","value":"1_Pooling/config.json:ā€‡100%"}},"2c50ded119dc4707adfee868d167084c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_320b9bcafd084ef79b95610851768769","max":297,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c2fc5e9ca17547acbf113751c5f14a43","value":297}},"67a4a60f751646b6ab4b6247ded3f9e3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bd0d0e3384724a5e8e624bb096b89221","placeholder":"ā€‹","style":"IPY_MODEL_bd9991542ffb49d5bed76e31f99617f0","value":"ā€‡297/297ā€‡[00:00<00:00,ā€‡25.4kB/s]"}},"1d5756bf10ed4dffba350b3685662cf9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5fdb555a5e804b73b57cd284c0c58a8b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"baaf29de2d51467c9026e5b063dcb491":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"320b9bcafd084ef79b95610851768769":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c2fc5e9ca17547acbf113751c5f14a43":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"bd0d0e3384724a5e8e624bb096b89221":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bd9991542ffb49d5bed76e31f99617f0":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"8eca00b2b4d64ad78dbaba001f843eef":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a5cb78a70b344104930f388d1265d82a","IPY_MODEL_43fe25dab4dd4639b862e80c96e617c9","IPY_MODEL_c6852309e4d94231a95bf65d9458d44c"],"layout":"IPY_MODEL_712cdd608a094e5f9ca67c3f26630638"}},"a5cb78a70b344104930f388d1265d82a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_2a3dfa3ed5e047b1862d92de2edb9e0a","placeholder":"ā€‹","style":"IPY_MODEL_aca246802e5a43a8bd69fc9ebab2c2d5","value":"modules.json:ā€‡100%"}},"43fe25dab4dd4639b862e80c96e617c9":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_4150ffdda851437aa4628c319df29b20","max":349,"min":0,"orientation":"horizontal","style":"IPY_MODEL_01e3d2f5cf42437699f69141c55c6bf5","value":349}},"c6852309e4d94231a95bf65d9458d44c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_aa53805767f5476d9df83a3618d70416","placeholder":"ā€‹","style":"IPY_MODEL_8f666ad87d634b42838e6eef1497b567","value":"ā€‡349/349ā€‡[00:00<00:00,ā€‡26.1kB/s]"}},"712cdd608a094e5f9ca67c3f26630638":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2a3dfa3ed5e047b1862d92de2edb9e0a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"aca246802e5a43a8bd69fc9ebab2c2d5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"4150ffdda851437aa4628c319df29b20":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"01e3d2f5cf42437699f69141c55c6bf5":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"aa53805767f5476d9df83a3618d70416":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8f666ad87d634b42838e6eef1497b567":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d473b2536bc742d28400c18d5773c945":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_7185bb94734e439c97a70c03f655a119","IPY_MODEL_9eba6f0fad0e49f8aae4128a3dabada4","IPY_MODEL_d9e4811fc42f494cac21ea50f4957584"],"layout":"IPY_MODEL_174e72fa89e3479bb392a273903e7fc1"}},"7185bb94734e439c97a70c03f655a119":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c1fc1b64a2b84d2ca6b4f8c085a16efc","placeholder":"ā€‹","style":"IPY_MODEL_c80b0a19a4064ef386286f6bf585d6c2","value":"config_sentence_transformers.json:ā€‡100%"}},"9eba6f0fad0e49f8aae4128a3dabada4":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_76fa88923f0c4fd6ada62725551671b6","max":277,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cf000bc9c858433bbb5ac76b8b0adcef","value":277}},"d9e4811fc42f494cac21ea50f4957584":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_42d5d2159983482e84dcea002ea675c3","placeholder":"ā€‹","style":"IPY_MODEL_382f432c43fb4af6a02e693c10ba763d","value":"ā€‡277/277ā€‡[00:00<00:00,ā€‡21.5kB/s]"}},"174e72fa89e3479bb392a273903e7fc1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c1fc1b64a2b84d2ca6b4f8c085a16efc":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c80b0a19a4064ef386286f6bf585d6c2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"76fa88923f0c4fd6ada62725551671b6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cf000bc9c858433bbb5ac76b8b0adcef":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"42d5d2159983482e84dcea002ea675c3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"382f432c43fb4af6a02e693c10ba763d":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"528b1b3718a54125bd12a7a0a10308f1":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a18a814a6ae346728bba4595ef97b0ab","IPY_MODEL_8d6b8a880952414694dfafb2e76d79d0","IPY_MODEL_7db32f9323f74fd4aa32394b5b576d0c"],"layout":"IPY_MODEL_8df20ffe0edf4941846d52cfda777eab"}},"a18a814a6ae346728bba4595ef97b0ab":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_1b25cc1b5d06407282d25b25e951f7ab","placeholder":"ā€‹","style":"IPY_MODEL_cc4a2c5f2b2348e1964683756fea8c20","value":"README.md:ā€‡100%"}},"8d6b8a880952414694dfafb2e76d79d0":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_9ef4f0b8655e40d28e5101169cc163ea","max":70275,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c600c0292ebf442c8f23558b523915fb","value":70275}},"7db32f9323f74fd4aa32394b5b576d0c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7573ce52d0c642e5ae6cd2a1d0c45dce","placeholder":"ā€‹","style":"IPY_MODEL_c05f110f2459470aa195c360c12e8790","value":"ā€‡70.3k/70.3kā€‡[00:00<00:00,ā€‡2.89MB/s]"}},"8df20ffe0edf4941846d52cfda777eab":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1b25cc1b5d06407282d25b25e951f7ab":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cc4a2c5f2b2348e1964683756fea8c20":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"9ef4f0b8655e40d28e5101169cc163ea":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c600c0292ebf442c8f23558b523915fb":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"7573ce52d0c642e5ae6cd2a1d0c45dce":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c05f110f2459470aa195c360c12e8790":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"2e7a54d80e34401b8cab156d22bccef5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_61816b365b6d48d8a723fe196751aa8c","IPY_MODEL_a9912e11e4794c8e822ebee164db18d3","IPY_MODEL_165093c9763444ef8c5485c198e4477a"],"layout":"IPY_MODEL_035f7dcb863142258f2137e81a4762e1"}},"61816b365b6d48d8a723fe196751aa8c":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_50671d72075b4fbe82096f4aae2197a5","placeholder":"ā€‹","style":"IPY_MODEL_9fcdd74e05414f61b6854bae8968c292","value":"sentence_bert_config.json:ā€‡100%"}},"a9912e11e4794c8e822ebee164db18d3":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_58cf8a15c39a4980abf6bf61f6eea93b","max":53,"min":0,"orientation":"horizontal","style":"IPY_MODEL_eadfa8398b6a4b9692df34ca7e4e0536","value":53}},"165093c9763444ef8c5485c198e4477a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9149d4e09c1048e48b395a22fa0c3c08","placeholder":"ā€‹","style":"IPY_MODEL_145b62bc002a497ca2fcfa2b542ef115","value":"ā€‡53.0/53.0ā€‡[00:00<00:00,ā€‡4.55kB/s]"}},"035f7dcb863142258f2137e81a4762e1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"50671d72075b4fbe82096f4aae2197a5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9fcdd74e05414f61b6854bae8968c292":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"58cf8a15c39a4980abf6bf61f6eea93b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"eadfa8398b6a4b9692df34ca7e4e0536":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"9149d4e09c1048e48b395a22fa0c3c08":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"145b62bc002a497ca2fcfa2b542ef115":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"086c630d0a36484295fa8a77829afea5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_e77471f92e9e46b49cae63a2e62f2bdc","IPY_MODEL_3e7e6ee92f734486aa3cfaf9d79007fd","IPY_MODEL_d5295bfdf60d4f30b7676526c8bd99ee"],"layout":"IPY_MODEL_f68c37576f734e61ad797aac1ead1a49"}},"e77471f92e9e46b49cae63a2e62f2bdc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_c657107551444ba5b115e483bc7b9f7a","placeholder":"ā€‹","style":"IPY_MODEL_1e18fc833e454d8eb77515949dba5a15","value":"config.json:ā€‡100%"}},"3e7e6ee92f734486aa3cfaf9d79007fd":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_143d692540bd45f99d4909627a91bc89","max":632,"min":0,"orientation":"horizontal","style":"IPY_MODEL_f52c90d2284446179a3e49bed071a780","value":632}},"d5295bfdf60d4f30b7676526c8bd99ee":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_402ed63f0501430c884e6f30e8f5cd35","placeholder":"ā€‹","style":"IPY_MODEL_f1154be768144359b66507a1ccfcfb9f","value":"ā€‡632/632ā€‡[00:00<00:00,ā€‡57.0kB/s]"}},"f68c37576f734e61ad797aac1ead1a49":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c657107551444ba5b115e483bc7b9f7a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1e18fc833e454d8eb77515949dba5a15":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"143d692540bd45f99d4909627a91bc89":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f52c90d2284446179a3e49bed071a780":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"402ed63f0501430c884e6f30e8f5cd35":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f1154be768144359b66507a1ccfcfb9f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"724e460c24424e018687507ddfa0a6f2":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_056a4b3f084844f1882f466676a867c9","IPY_MODEL_bb420c6f307844cda30bda80d1d7b671","IPY_MODEL_d396fcd1e12b4d71b7d21e8c752f8129"],"layout":"IPY_MODEL_714656117721468f8c9b0430c0b42232"}},"056a4b3f084844f1882f466676a867c9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_f601d98297cd45cc85643b8d47378119","placeholder":"ā€‹","style":"IPY_MODEL_7a9282ea98824dadaddcf741eb73fd99","value":"model.safetensors:ā€‡100%"}},"bb420c6f307844cda30bda80d1d7b671":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_b50845a67838440fb53f9947a273fd7e","max":1340612432,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2332c98b83a143e8a7c5e77f06def4e3","value":1340612432}},"d396fcd1e12b4d71b7d21e8c752f8129":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_52554b54f7e2439f890bb76fa5548ea1","placeholder":"ā€‹","style":"IPY_MODEL_4625725c5f68420d94fdbedf2c59e5ae","value":"ā€‡1.34G/1.34Gā€‡[00:02<00:00,ā€‡488MB/s]"}},"714656117721468f8c9b0430c0b42232":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f601d98297cd45cc85643b8d47378119":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7a9282ea98824dadaddcf741eb73fd99":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b50845a67838440fb53f9947a273fd7e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2332c98b83a143e8a7c5e77f06def4e3":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"52554b54f7e2439f890bb76fa5548ea1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4625725c5f68420d94fdbedf2c59e5ae":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"cbe674782efb419aa4cd080b38fd0bc1":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ed2347a873ab4aae8ca789a2a9784b44","IPY_MODEL_5c40f63d69964b0bb558880356ada001","IPY_MODEL_345a5b47ac674fb09785374856c6dec5"],"layout":"IPY_MODEL_d0ade386238c499886cf9d54465250a3"}},"ed2347a873ab4aae8ca789a2a9784b44":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0a41b5d0d0564fedac613b33d4546e87","placeholder":"ā€‹","style":"IPY_MODEL_a995ff70a8404050964809847a5f0f9c","value":"tokenizer_config.json:ā€‡100%"}},"5c40f63d69964b0bb558880356ada001":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_b7db564eeae14b2bab8f8566ce9f4a72","max":1381,"min":0,"orientation":"horizontal","style":"IPY_MODEL_55c65b2943c546a086c4fd47254034df","value":1381}},"345a5b47ac674fb09785374856c6dec5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0397a35743a843e0923e6e1234726896","placeholder":"ā€‹","style":"IPY_MODEL_b7e47c1976f5435e80ae44f039ddf2fc","value":"ā€‡1.38k/1.38kā€‡[00:00<00:00,ā€‡113kB/s]"}},"d0ade386238c499886cf9d54465250a3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0a41b5d0d0564fedac613b33d4546e87":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"a995ff70a8404050964809847a5f0f9c":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"b7db564eeae14b2bab8f8566ce9f4a72":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"55c65b2943c546a086c4fd47254034df":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0397a35743a843e0923e6e1234726896":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b7e47c1976f5435e80ae44f039ddf2fc":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"668a151f91694ed8bd73daf3ef2df17e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_dbd2b879eaff4cf1a830c705be067f4f","IPY_MODEL_a06ba8261df04a1bb5badeacd89ad1b1","IPY_MODEL_50b50e4a57dc4b05baec75cbd2e43147"],"layout":"IPY_MODEL_d1fa0a6ef9a343958f19713a5d291b9e"}},"dbd2b879eaff4cf1a830c705be067f4f":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6304c039e52c4825ab98dc996940d5d0","placeholder":"ā€‹","style":"IPY_MODEL_d4d6735192d74306bf59975032fb96cd","value":"vocab.txt:ā€‡100%"}},"a06ba8261df04a1bb5badeacd89ad1b1":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_479811262d0f4bef8691d9733b6a9ecd","max":231508,"min":0,"orientation":"horizontal","style":"IPY_MODEL_3f65e481913c43ba918eb4456c49f706","value":231508}},"50b50e4a57dc4b05baec75cbd2e43147":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_6001599b97f24e62ba7c7e61143ca4a3","placeholder":"ā€‹","style":"IPY_MODEL_dfd1275bd2c1407293bed5a4920c6135","value":"ā€‡232k/232kā€‡[00:00<00:00,ā€‡920kB/s]"}},"d1fa0a6ef9a343958f19713a5d291b9e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6304c039e52c4825ab98dc996940d5d0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d4d6735192d74306bf59975032fb96cd":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"479811262d0f4bef8691d9733b6a9ecd":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3f65e481913c43ba918eb4456c49f706":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"6001599b97f24e62ba7c7e61143ca4a3":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dfd1275bd2c1407293bed5a4920c6135":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a30b48c740fc4174a8ef2f8099a4232f":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f4be69341fa2420081c9f5de51b91830","IPY_MODEL_7c33b3dc997a4790b65677e904f8ce3f","IPY_MODEL_407ec3385c0a48beaccfb8bdb38e3baf"],"layout":"IPY_MODEL_062e2743e56e4bb98f77b362cb1ed747"}},"f4be69341fa2420081c9f5de51b91830":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_eb2bd7455f174abfb2f2f3e41c934fac","placeholder":"ā€‹","style":"IPY_MODEL_7fa16c15684247d1a13d93eef3dfbab8","value":"tokenizer.json:ā€‡100%"}},"7c33b3dc997a4790b65677e904f8ce3f":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_bce6a63c39a5447da2a8896150cb7633","max":711649,"min":0,"orientation":"horizontal","style":"IPY_MODEL_39d34fb852194b2b831015d10dba0305","value":711649}},"407ec3385c0a48beaccfb8bdb38e3baf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_da60cfaa12654865ac0be8a35f258ca9","placeholder":"ā€‹","style":"IPY_MODEL_6a86185f627346afb5590b5782b40bd5","value":"ā€‡712k/712kā€‡[00:00<00:00,ā€‡2.82MB/s]"}},"062e2743e56e4bb98f77b362cb1ed747":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"eb2bd7455f174abfb2f2f3e41c934fac":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7fa16c15684247d1a13d93eef3dfbab8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"bce6a63c39a5447da2a8896150cb7633":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"39d34fb852194b2b831015d10dba0305":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"da60cfaa12654865ac0be8a35f258ca9":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"6a86185f627346afb5590b5782b40bd5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"260d31349c0648bea6a7ff6b44234715":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5ba23385bccd495c9b3863a65e195613","IPY_MODEL_682da715b6f446a19735c00b7e6d3713","IPY_MODEL_ebc5aec5f7694c64aa083b89dd35f59e"],"layout":"IPY_MODEL_e396b16ccac847cf81139b520a669fca"}},"5ba23385bccd495c9b3863a65e195613":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5accf3202ba64800922b67357bdd55e4","placeholder":"ā€‹","style":"IPY_MODEL_b09548fe95e4468e956ad1077c59ec6c","value":"special_tokens_map.json:ā€‡100%"}},"682da715b6f446a19735c00b7e6d3713":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ba00713f1af142e0b35aff402fa86d76","max":695,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c56acd0abb7748b0a816b1f8676c5c39","value":695}},"ebc5aec5f7694c64aa083b89dd35f59e":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_7614eaecaae549709f79d64ba3204990","placeholder":"ā€‹","style":"IPY_MODEL_1afb52dcd51e452d957a91675d37f1ba","value":"ā€‡695/695ā€‡[00:00<00:00,ā€‡60.7kB/s]"}},"e396b16ccac847cf81139b520a669fca":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5accf3202ba64800922b67357bdd55e4":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b09548fe95e4468e956ad1077c59ec6c":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ba00713f1af142e0b35aff402fa86d76":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c56acd0abb7748b0a816b1f8676c5c39":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"7614eaecaae549709f79d64ba3204990":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1afb52dcd51e452d957a91675d37f1ba":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"35dfb1d98e884af0922df7670789df0d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_84f500ab900b4328b7aa941d90eefecc","IPY_MODEL_5900e484b9be4786897add8cb3b76d4b","IPY_MODEL_46cc80d23af84d95a59ec8f1cafaf74a"],"layout":"IPY_MODEL_9e60490dca9d4d52a269c66e7b0d26cb"}},"84f500ab900b4328b7aa941d90eefecc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_461c0be4767a4e04a53bfc3a7febe316","placeholder":"ā€‹","style":"IPY_MODEL_4d499ab233704334955fa2f944a392be","value":"1_Pooling/config.json:ā€‡100%"}},"5900e484b9be4786897add8cb3b76d4b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_877af66c0bc448929352e2daadebcc22","max":297,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cb06b325f4e84f8299e8f6455f56e5a1","value":297}},"46cc80d23af84d95a59ec8f1cafaf74a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_0ea181c5f5784d87a9bed12e4794ce08","placeholder":"ā€‹","style":"IPY_MODEL_8ce4a08bed2941c3a1da8249228af255","value":"ā€‡297/297ā€‡[00:00<00:00,ā€‡23.1kB/s]"}},"9e60490dca9d4d52a269c66e7b0d26cb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"461c0be4767a4e04a53bfc3a7febe316":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4d499ab233704334955fa2f944a392be":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"877af66c0bc448929352e2daadebcc22":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cb06b325f4e84f8299e8f6455f56e5a1":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"0ea181c5f5784d87a9bed12e4794ce08":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8ce4a08bed2941c3a1da8249228af255":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c5aa90875bf34e34a66c5c9ba54c218c":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_8c5edff941bc43688ade081fc764babb","IPY_MODEL_dbdc3f8aa6fa418c9729b314a891f586","IPY_MODEL_59bb0f6f4fc943d881fb39ccfb2747b9"],"layout":"IPY_MODEL_79780f370f0d49d2a8d4f5f276a1e842"}},"8c5edff941bc43688ade081fc764babb":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_912b3dd47fc14078b4c877289eeffdc8","placeholder":"ā€‹","style":"IPY_MODEL_74ea00a4ad824d6581fab9afdc6816e2","value":"Evaluating:ā€‡100%"}},"dbdc3f8aa6fa418c9729b314a891f586":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_8a14fd0353884af3b76506bf4988f150","max":245,"min":0,"orientation":"horizontal","style":"IPY_MODEL_0e1037ee7fa74f9896f56986824d8106","value":245}},"59bb0f6f4fc943d881fb39ccfb2747b9":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_ff8f58e7006a494aa25a2efd28bb393e","placeholder":"ā€‹","style":"IPY_MODEL_3bd620f9085a4a8093201c481014af50","value":"ā€‡245/245ā€‡[02:32<00:00,ā€‡ā€‡6.69s/it]"}},"79780f370f0d49d2a8d4f5f276a1e842":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"912b3dd47fc14078b4c877289eeffdc8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"74ea00a4ad824d6581fab9afdc6816e2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"8a14fd0353884af3b76506bf4988f150":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0e1037ee7fa74f9896f56986824d8106":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"ff8f58e7006a494aa25a2efd28bb393e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3bd620f9085a4a8093201c481014af50":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"17d4977ec7e0472fb90379712b7616ea":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_241c74b4bb654eada159e64be8429522","IPY_MODEL_829c146c8c744a38a762dcb02573438f","IPY_MODEL_c813fe03f99b4213947c3af04a8c27ee"],"layout":"IPY_MODEL_3088af7fb67d4f079fd3c98a198fcc06"}},"241c74b4bb654eada159e64be8429522":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_79375495c9444aa5bf9229612bdcb38d","placeholder":"ā€‹","style":"IPY_MODEL_53817832f9724ff382bcb4821e00dd10","value":"Evaluating:ā€‡100%"}},"829c146c8c744a38a762dcb02573438f":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_368a487e2158427593f8841de07cac4e","max":245,"min":0,"orientation":"horizontal","style":"IPY_MODEL_79e1f0538b1f4afba34bf5d5e7e32027","value":245}},"c813fe03f99b4213947c3af04a8c27ee":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_9f231687e1ae43fc987fbd8c45097836","placeholder":"ā€‹","style":"IPY_MODEL_07a9a6cbf8114575ba1d58d1b5952d93","value":"ā€‡245/245ā€‡[02:12<00:00,ā€‡ā€‡1.53s/it]"}},"3088af7fb67d4f079fd3c98a198fcc06":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"79375495c9444aa5bf9229612bdcb38d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"53817832f9724ff382bcb4821e00dd10":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"368a487e2158427593f8841de07cac4e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"79e1f0538b1f4afba34bf5d5e7e32027":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"9f231687e1ae43fc987fbd8c45097836":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"07a9a6cbf8114575ba1d58d1b5952d93":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"72584a70d8124b18bc44fe171ac66cb7":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_af0e6592a8ee461eae997c727ae0696d","IPY_MODEL_3b1cbb8a7f84466fbbb0cf9cada079c6","IPY_MODEL_23ab547479c744768ab08064c13460cf"],"layout":"IPY_MODEL_ac1518d71ead440b974f2bf229355c34"}},"af0e6592a8ee461eae997c727ae0696d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_720aeba021664c8aa0ecf11190887e3d","placeholder":"ā€‹","style":"IPY_MODEL_1717ad2782e74fad8a31be5202b255cc","value":"Evaluating:ā€‡100%"}},"3b1cbb8a7f84466fbbb0cf9cada079c6":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_85c4801f23ba4d89a2cfd8326ca2d20b","max":245,"min":0,"orientation":"horizontal","style":"IPY_MODEL_8d07456aedcd432ab08c24c87a7fbb5f","value":245}},"23ab547479c744768ab08064c13460cf":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_65faaa51a55d429eb73557af3d7300a1","placeholder":"ā€‹","style":"IPY_MODEL_1de250ea2d4d41e5bcef954fdc324c67","value":"ā€‡245/245ā€‡[02:32<00:00,ā€‡ā€‡2.92s/it]"}},"ac1518d71ead440b974f2bf229355c34":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"720aeba021664c8aa0ecf11190887e3d":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1717ad2782e74fad8a31be5202b255cc":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"85c4801f23ba4d89a2cfd8326ca2d20b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8d07456aedcd432ab08c24c87a7fbb5f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"65faaa51a55d429eb73557af3d7300a1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"1de250ea2d4d41e5bcef954fdc324c67":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"58e5a572ee4148348d8199053712ac3e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_19f8ff94cbb7468cada40d9f4c2e67d6","IPY_MODEL_380455b7165442c9bc96b4801173e140","IPY_MODEL_920d3cb46cec45d38678208137515e01"],"layout":"IPY_MODEL_11d120a2e64648988bdbf98a948a471c"}},"19f8ff94cbb7468cada40d9f4c2e67d6":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8419daae397541d68251585260938892","placeholder":"ā€‹","style":"IPY_MODEL_f6c6517ada1645ae87e6171a9ff1554f","value":"Evaluating:ā€‡100%"}},"380455b7165442c9bc96b4801173e140":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_333d4a896fb44f6496376686d9fb9624","max":245,"min":0,"orientation":"horizontal","style":"IPY_MODEL_c52ca5ce98f44595b2f693b030023d85","value":245}},"920d3cb46cec45d38678208137515e01":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_081445d434fa4a24910771ef0f02d770","placeholder":"ā€‹","style":"IPY_MODEL_f8ea4e64045646f492aac278250bc7e8","value":"ā€‡245/245ā€‡[02:37<00:00,ā€‡ā€‡1.70s/it]"}},"11d120a2e64648988bdbf98a948a471c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"8419daae397541d68251585260938892":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f6c6517ada1645ae87e6171a9ff1554f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"333d4a896fb44f6496376686d9fb9624":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c52ca5ce98f44595b2f693b030023d85":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"081445d434fa4a24910771ef0f02d770":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f8ea4e64045646f492aac278250bc7e8":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/Tasks/Task 5/Task5-ChunkingImprovement.png b/Tasks/Task 5/Task5-ChunkingImprovement.png new file mode 100644 index 0000000000000000000000000000000000000000..600c3e939abe96fe3fe9bb9df456314b712a18f5 Binary files /dev/null and b/Tasks/Task 5/Task5-ChunkingImprovement.png differ diff --git a/Tasks/Task 5/Task5-ChunkingTableComparison.png b/Tasks/Task 5/Task5-ChunkingTableComparison.png new file mode 100644 index 0000000000000000000000000000000000000000..9425bb487e15a7bc657fccd9891aa16cefee9c69 Binary files /dev/null and b/Tasks/Task 5/Task5-ChunkingTableComparison.png differ diff --git a/Tasks/Task 5/Task5-ComparisonBaseFineTuned.png b/Tasks/Task 5/Task5-ComparisonBaseFineTuned.png new file mode 100644 index 0000000000000000000000000000000000000000..b85fec9651bc1c54da87057f4030d1ea584f3d55 Binary files /dev/null and b/Tasks/Task 5/Task5-ComparisonBaseFineTuned.png differ diff --git a/Tasks/Task 5/Task5-ComparisonBaseFineTunedImprovemant.png b/Tasks/Task 5/Task5-ComparisonBaseFineTunedImprovemant.png new file mode 100644 index 0000000000000000000000000000000000000000..1dd7cc108cffbea7bcaf57016c8328bb4b9e5769 Binary files /dev/null and b/Tasks/Task 5/Task5-ComparisonBaseFineTunedImprovemant.png differ diff --git a/Tasks/Task 5/Task5-SemanticVsRecurstive.png b/Tasks/Task 5/Task5-SemanticVsRecurstive.png new file mode 100644 index 0000000000000000000000000000000000000000..a202bac177f6d4e2298b2eff3cfe0132a026de1f Binary files /dev/null and b/Tasks/Task 5/Task5-SemanticVsRecurstive.png differ diff --git a/Tasks/Task 5/Task5-Table.png b/Tasks/Task 5/Task5-Table.png new file mode 100644 index 0000000000000000000000000000000000000000..c1391fc4b4ea98a5b088fcb2b387be9a9439789f Binary files /dev/null and b/Tasks/Task 5/Task5-Table.png differ diff --git a/Tasks/Task 5/Task5-graph-comparision.png b/Tasks/Task 5/Task5-graph-comparision.png new file mode 100644 index 0000000000000000000000000000000000000000..3d83b4c438a283169140bec1430cf69b07c844b8 Binary files /dev/null and b/Tasks/Task 5/Task5-graph-comparision.png differ diff --git a/Tasks/Task 5/Task5-graph-comparision2.png b/Tasks/Task 5/Task5-graph-comparision2.png new file mode 100644 index 0000000000000000000000000000000000000000..3e67078c90acc2a791c3e7ee77634b334542e199 Binary files /dev/null and b/Tasks/Task 5/Task5-graph-comparision2.png differ diff --git a/Tasks/Task 5/base_chain_eval_results_df (1).csv b/Tasks/Task 5/base_chain_eval_results_df (1).csv new file mode 100644 index 0000000000000000000000000000000000000000..0574175f1f9991b78c1fbb156d46f59c017db447 --- /dev/null +++ b/Tasks/Task 5/base_chain_eval_results_df (1).csv @@ -0,0 +1,577 @@ +question,contexts,answer,ground_truth,faithfulness,answer_relevancy,context_recall,context_precision,answer_correctness +What techniques are suggested to assess and manage statistical biases related to GAI content provenance?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'MP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity']",I don't know.,"Techniques such as re-sampling, re-weighting, or adversarial training are suggested to assess and manage statistical biases related to GAI content provenance.",0.0,0.0,0.0,0.249999999975,0.18342742304293966 +What actions are suggested to ensure information integrity in the context of AI systems?,"['Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The suggested actions to ensure information integrity in the context of AI systems include: employing methods to trace the origin and modifications of digital content (MS-1.1-001), integrating tools designed to analyze content provenance and detect data anomalies, verify the authenticity of digital signatures, and identify patterns associated with misinformation or manipulation (MS-1.1-002), and disaggregating evaluation metrics by demographic factors to identify any discrepancies in how content provenance mechanisms work across diverse populations (MS-1.1-003).",0.0,0.0,0.0,0.26785714284375,0.18303681022057922 +What are the requirements for providing accessible notice and instructions for opting out of automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"Those impacted by an automated system should be given a brief, clear notice that they are entitled to opt-out, along with clear instructions for how to opt-out. Instructions should be provided in an accessible form and should be easily findable by those impacted by the automated system. The brevity, clarity, and accessibility of the notice and instructions should be assessed (e.g., via user experience research).",0.0,0.0,0.6666666666666666,0.37152777776848955,0.18188111253680794 +What issues arose from the system awarding benefits changing its criteria invisibly?,"['results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Priorities Related to Information Integrity Research and Development.' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51']",I don't know.,Individuals were denied benefits due to data entry errors and other system flaws. These flaws were only revealed when an explanation of the system was demanded and produced. The lack of an explanation made it harder for errors to be corrected in a timely manner.,0.0,0.0,1.0,0.3333333333,0.1864493661106931 +How is the federal government working to combat discrimination in mortgage lending?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'HOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe federal government is working to combat discrimination in mortgage lending. The Depart\xad\nment of Justice has launched a nationwide initiative to combat redlining, which includes reviewing how \nlenders who may be avoiding serving communities of color are conducting targeted marketing and advertising.51 \nThis initiative will draw upon strong partnerships across federal agencies, including the Consumer Financial \nProtection Bureau and prudential regulators. The Action Plan to Advance Property Appraisal and Valuation \nEquity includes a commitment from the agencies that oversee mortgage lending to include a \nnondiscrimination standard in the proposed rules for Automated Valuation Models.52' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation' + '2018. https://ainowinstitute.org/aiareport2018.pdf\n51. Department of Justice. Justice Department Announces New Initiative to Combat Redlining. Oct. 22,\n2021. https://www.justice.gov/opa/pr/justice-department-announces-new-initiative-combat-redlining\n52. PAVE Interagency Task Force on Property Appraisal and Valuation Equity. Action Plan to Advance\nProperty Appraisal and Valuation Equity: Closing the Racial Wealth Gap by Addressing Mis-valuations for\nFamilies and Communities of Color. March 2022. https://pave.hud.gov/sites/pave.hud.gov/files/\ndocuments/PAVEActionPlan.pdf\n53. U.S. Equal Employment Opportunity Commission. The Americans with Disabilities Act and the Use of\nSoftware, Algorithms, and Artificial Intelligence to Assess Job Applicants and Employees. EEOC\xad\nNVTA-2022-2. May 12, 2022. https://www.eeoc.gov/laws/guidance/americans-disabilities-act-and-use\xad\nsoftware-algorithms-and-artificial-intelligence; U.S. Department of Justice. Algorithms, Artificial' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32']","The federal government is working to combat discrimination in mortgage lending through a nationwide initiative launched by the Department of Justice to combat redlining. This initiative includes reviewing how lenders who may be avoiding serving communities of color are conducting targeted marketing and advertising. It also involves strong partnerships across federal agencies, including the Consumer Financial Protection Bureau and prudential regulators. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from the agencies that oversee mortgage lending to include a nondiscrimination standard in the proposed rules for Automated Valuation Models.","The federal government is working to combat discrimination in mortgage lending through a nationwide initiative launched by the Department of Justice to combat redlining. This includes reviewing how lenders who may be avoiding serving communities of color are conducting targeted marketing and advertising. The initiative involves strong partnerships across federal agencies, including the Consumer Financial Protection Bureau and prudential regulators. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from the agencies that oversee mortgage lending to include a nondiscrimination standard in the proposed rules for Automated Valuation Models.",1.0,0.9924503623378166,1.0,0.2916666666520833,0.9998948917804034 +"What are the characteristics of trustworthy AI that should be integrated into organizational policies, processes, procedures, and practices?","['organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The characteristics of trustworthy AI that should be integrated into organizational policies, processes, procedures, and practices include establishing transparency policies and processes for documenting the origin and history of training data and generated data, and establishing policies to evaluate risk-relevant capabilities of GAI and robustness of safety measures.",0.0,0.0,1.0,0.9999999999,0.1791991601217512 +What are the conditions under which individuals should be able to opt out from automated systems in favor of a human alternative?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36']",I don't know.,"Individuals should be able to opt out from automated systems in favor of a human alternative where appropriate. Appropriateness should be determined based on reasonable expectations in a given context and with a focus on ensuring broad accessibility and protecting the public from especially harmful impacts. In some cases, a human or other alternative may be required by law.",0.0,0.0,0.6666666666666666,0.36666666664833336,0.18517566892878545 +What is data poisoning and how can it affect GAI system outputs?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,Data poisoning is a cybersecurity risk where an adversary compromises a training dataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts of the model could exacerbate risks associated with GAI system outputs.,0.0,0.0,0.0,0.0,0.17944988259517342 +How do opaque decision-making processes in automated systems impact individuals' ability to contest decisions?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"Opaque decision-making processes in automated systems impact individuals' ability to contest decisions by denying them the knowledge they need to address the impact of these systems on their lives. Without clear explanations, it becomes harder to understand and contest decisions, as illustrated by examples such as a lawyer unable to determine why a client was cut off from Medicaid-funded assistance and a parent not being notified about data collection for a child maltreatment risk assessment.",0.0,0.0,0.6666666666666666,0.22499999998875,0.18758492202920485 +Who participated in the OSTP meetings focused on the development of the Blueprint for an AI Bill of Rights?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"Participants in the OSTP meetings included Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, and Waymap.",0.0,0.0,0.0,0.5656084655990388,0.18263698692094232 +What actions are suggested for explaining and validating an AI model to ensure responsible use and governance?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.']",I don't know.,"Suggested actions for explaining and validating an AI model to ensure responsible use and governance include: applying and documenting ML explanation results such as analysis of embeddings, counterfactual prompts, gradient-based attributions, model compression/surrogate models, and occlusion/term reduction. Additionally, documenting GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.",0.0,0.0,0.0,0.249999999975,0.17786667628969524 +What provisions are included in the Biometric Information Privacy Act enacted by the state of Illinois?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'NOTICE & \nEXPLANATION \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\xad\xad\xad\nPeople in Illinois are given written notice by the private sector if their biometric informa-\ntion is used. The Biometric Information Privacy Act enacted by the state contains a number of provisions \nconcerning the use of individual biometric data and identifiers. Included among them is a provision that no private \nentity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" such information about an \nindividual, unless written notice is provided to that individual or their legally appointed representative. 87\nMajor technology companies are piloting new ways to communicate with the public about' + 'ENDNOTES\n85. Mick Dumke and Frank Main. A look inside the watch list Chicago police fought to keep secret. The\nChicago Sun Times. May 18, 2017.\nhttps://chicago.suntimes.com/2017/5/18/18386116/a-look-inside-the-watch-list-chicago-police-fought\xad\nto-keep-secret\n86. Jay Stanley. Pitfalls of Artificial Intelligence Decisionmaking Highlighted In Idaho ACLU Case.\nACLU. Jun. 2, 2017.\nhttps://www.aclu.org/blog/privacy-technology/pitfalls-artificial-intelligence-decisionmaking\xad\nhighlighted-idaho-aclu-case\n87. Illinois General Assembly. Biometric Information Privacy Act. Effective Oct. 3, 2008.\nhttps://www.ilga.gov/legislation/ilcs/ilcs3.asp?ActID=3004&ChapterID=57\n88. Partnership on AI. ABOUT ML Reference Document. Accessed May 2, 2022.\nhttps://partnershiponai.org/paper/about-ml-reference-document/1/\n89. See, e.g., the model cards framework: Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker\nBarnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'for individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\xad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Actā€™s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individualā€™s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individualā€™s ā€œqualifications, character, rights, ā€¦ \nopportunitiesā€¦, or benefits.ā€' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Public and Private Sector Uses of Biometric Technologies. Issued Oct. 8, 2021.\nhttps://www.federalregister.gov/documents/2021/10/08/2021-21975/notice-of-request-for\xad\ninformation-rfi-on-public-and-private-sector-uses-of-biometric-technologies\n114. National Artificial Intelligence Initiative Office. Public Input on Public and Private Sector Uses of\nBiometric Technologies. Accessed Apr. 19, 2022.\nhttps://www.ai.gov/86-fr-56300-responses/\n115. Thomas D. Olszewski, Lisa M. Van Pay, Javier F. Ortiz, Sarah E. Swiersz, and Laurie A. Dacus.\nSynopsis of Responses to OSTPā€™s Request for Information on the Use and Governance of Biometric\nTechnologies in the Public and Private Sectors. Science and Technology Policy Institute. Mar. 2022.\nhttps://www.ida.org/-/media/feature/publications/s/sy/synopsis-of-responses-to-request-for\xad\ninformation-on-the-use-and-governance-of-biometric-technologies/ida-document-d-33070.ashx\n73']","The Biometric Information Privacy Act enacted by the state of Illinois includes a provision that no private entity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" biometric information about an individual unless written notice is provided to that individual or their legally appointed representative.","The Biometric Information Privacy Act enacted by the state of Illinois contains a number of provisions concerning the use of individual biometric data and identifiers. Included among them is a provision that no private entity may 'collect, capture, purchase, receive through trade, or otherwise obtain' such information about an individual, unless written notice is provided to that individual or their legally appointed representative.",1.0,0.9524662717484101,1.0,0.3333333333,0.8904414765041544 +"How should designers, developers, and deployers of automated systems handle data collection and user consent to ensure data privacy?","['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']","Designers, developers, and deployers of automated systems should handle data collection and user consent to ensure data privacy by: + +1. Seeking user permission and respecting their decisions regarding the collection, use, access, transfer, and deletion of their data in appropriate ways and to the greatest extent possible. +2. Using alternative privacy by design safeguards when seeking user consent is not possible. +3. Ensuring that data collection conforms to reasonable expectations and that only data strictly necessary for the specific context is collected. +4. Avoiding user experience and design decisions that obfuscate user choice or burden users with defaults that are privacy invasive. +5. Using consent to justify data collection only in cases where it can be appropriately obtained.","Designers, developers, and deployers of automated systems should seek your permission and respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate ways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be used. Systems should not employ user experience and design decisions that obfuscate user choice or burden users with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases where it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable in plain language, and give you agency over data collection and the specific context of use.",1.0,0.9620969491154683,0.75,0.4265873015766369,0.8906893876783905 +How do systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Priorities Related to Information Integrity Research and Development.' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952']","Based on the provided context, systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information, matching records, and supporting decision-makers in the adjudication process. These systems may also include fraud detection algorithms and access control algorithms, and they can make decisions on a fully or partially autonomous basis, such as determining to revoke benefits. + +","Systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information or matching records, and by assisting in the adjudication process.",1.0,0.9576964399573923,1.0,0.49999999995,0.6132362982414824 +What should be the focus of risk identification and mitigation for automated systems before deployment?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'Priorities Related to Information Integrity Research and Development.' + 'and mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18']","The focus of risk identification and mitigation for automated systems before deployment should include: + +- A description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted. +- A summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications. +- The results of public consultation such as concerns raised and any decisions made due to these concerns. +- Risk identification and management assessments and any steps taken to mitigate potential harms. +- The results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems. +- Ongoing monitoring procedures and regular performance testing reports, including monitoring frequency.","Identified risks should focus on the potential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted communities that may not be direct users of the automated system, risks resulting from purposeful misuse of the system, and other concerns identified via the consultation process.",1.0,0.9919162046375328,1.0,0.9571428571291837,0.22400833910263965 +What procedures should be established and maintained for escalating GAI system incidents to the organizational risk management authority?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51']",I don't know.,Establish and maintain procedures for escalating GAI system incidents to the organizational risk management authority when specific criteria for deactivation or disengagement is met for a particular context of use or for the GAI system as a whole.,0.0,0.0,0.0,0.11111111109999999,0.1806177835407171 +How do GAI systems contribute to the creation of deepfakes and what are the potential impacts of these deepfakes?,"['and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.' + '10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'warrant additional human review, tracking and documentation, and greater management oversight. \nAI technology can produce varied outputs in multiple modalities and present many classes of user \ninterfaces. This leads to a broader set of AI Actors interacting with GAI systems for widely diļ¬€ering \napplications and contexts of use. These can include data labeling and preparation, development of GAI \nmodels, content moderation, code generation and review, text generation and editing, image and video \ngeneration, summarization, search, and chat. These activities can take place within organizational \nsettings or in the public domain. \nOrganizations can restrict AI applications that cause harm, exceed stated risk tolerances, or that conļ¬‚ict \nwith their tolerances or values. Governance tools and protocols that are applied to other types of AI \nsystems can be applied to GAI systems. These plans and actions include: \nā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","GAI systems contribute to the creation of deepfakes by enabling a higher degree of sophistication for malicious actors to produce disinformation that is targeted towards specific demographics. Current and emerging multimodal models make it possible to generate both text-based disinformation and highly realistic ""deepfakes"" ā€“ synthetic audiovisual content and photorealistic images. The potential impacts of these deepfakes include manipulating human and machine perception, and enabling additional disinformation threats as future GAI models are trained on new data modalities.","GAI systems contribute to the creation of deepfakes by enabling the generation of highly realistic synthetic audiovisual content and photorealistic images. The potential impacts of these deepfakes include eroding public trust in true or valid evidence and information, causing downstream effects such as a drop in the stock market, and assisting malicious actors in creating compelling imagery and propaganda to support disinformation campaigns.",1.0,0.9712528193454298,0.5,0.99999999995,0.7194400533942749 +How can synthetic content detection help manage and mitigate risks associated with AI-generated content?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'arXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Diļ¬€usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Artiļ¬cial intelligence and biological misuse: Diļ¬€erentiating risks of language models' + 'Priorities Related to Information Integrity Research and Development.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"Synthetic content detection can help manage and mitigate risks associated with AI-generated content by facilitating greater information access about both authentic and synthetic content to users, enabling better knowledge of trustworthiness in AI systems. It can also enable processes to trace negative outcomes back to their source, improve information integrity, and uphold public trust.",0.0,0.0,1.0,0.11111111109999999,0.177284727830764 +What organizational practices are in place to enable AI testing and incident identification?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']","Based on the provided context, organizational practices in place to enable AI testing and incident identification include: + +- Documentation practices such as logging, recording, and analyzing GAI (Generative Artificial Intelligence) incidents. +- Regular information sharing. +- Maintaining change management records. +- Keeping version history and metadata. + +These practices facilitate smoother sharing of information with relevant AI Actors and empower them to respond to and manage AI incidents. + +(Source: Document from https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf, page 56)","Organizational practices are in place to enable AI testing, identification of incidents, and information sharing. Specifically, policies are established for measuring the effectiveness of employed content provenance methodologies (e.g., cryptography, watermarking, steganography, etc.) and identifying the minimum set of criteria necessary for GAI system incident reporting such as System ID, Title, Reporter, System/Source, Data Reported, Date of Incident, Description, Impact(s), and Stakeholder(s) Impacted.",1.0,1.0000000000000002,0.5,0.874999999978125,0.7962767244097781 +What techniques are suggested to minimize risks associated with data privacy in evaluations involving human subjects?,"['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5']",The context suggests that organizations should follow human subjects research requirements and best practices such as informed consent and subject compensation to minimize risks associated with data privacy in evaluations involving human subjects.,"Techniques such as anonymization, differential privacy, or other privacy-enhancing technologies are suggested to minimize the risks associated with linking AI-generated content back to individual human subjects.",0.75,0.9604483967726186,0.0,0.19999999998,0.4283001890495529 +Who were some of the participants from the private sector and civil society in the OSTP meetings?,"['Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.' + 'APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']","Based on the provided context, some of the participants from the private sector and civil society in the OSTP meetings included: + +- Microsoft Corporation +- MITRE Corporation +- National Association for the Advancement of Colored People Legal Defense and Educational Fund +- National Association of Criminal Defense Lawyers +- National Center for Missing & Exploited Children +- National Fair Housing Alliance +- National Immigration Law Center +- NEC Corporation of America +- New Americaā€™s Open Technology Institute +- New York Civil Liberties Union +- Notre Dame Technology Ethics Center +- Onfido +- Oosto +- Palantir +- Pangiam +- Parity Technologies +- Philadelphia Unemployment Project +- Project On Government Oversight +- Recording Industry Association of America +- Science, Technology, and Public Policy Program at University of Michigan Ann Arbor +- Security Industry Association +- Software & Information Industry Association","Participants in the OSTP meetings from the private sector and civil society included: Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, Waymap.",,0.9999999999999994,1.0,0.8124999999796875,0.23404496086632567 +What role does the National Institute of Standards and Technology (NIST) play in advancing artificial intelligence (AI)?,"['About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.' + 'accuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45' + 'from the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to' + 'Descriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.']","The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) so that its full commercial and societal benefits can be realized without harm to people or the planet. NIST has conducted both fundamental and applied work on AI for more than a decade and is helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI. NIST also established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to build the science necessary for the safe, secure, and trustworthy development and use of AI.","The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) so that its full commercial and societal benefits can be realized without harm to people or the planet. NIST is also helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI and has established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to build the science necessary for safe, secure, and trustworthy development and use of AI.",1.0,0.978036094470781,1.0,0.8928571428348213,0.9993736562177171 +What should entities responsible for the development or use of automated systems include in their governance structures and procedures to ensure clear organizational oversight?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'monitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have' + 'Priorities Related to Information Integrity Research and Development.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']","Entities responsible for the development or use of automated systems should include the following in their governance structures and procedures to ensure clear organizational oversight: + +1. **Monitoring**: Regular monitoring of the automated systems. +2. **Governance Structures**: Establishing clear governance structures for the automated systems. +3. **Reporting**: Expectations about reporting, which can be provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review. Reports should be made public as much as possible, consistent with law, regulation, and policy, while considering intellectual property, law enforcement, or national security considerations. +4. **Oversight Bodies**: Where public reports are not possible, information should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguarding individualsā€™ rights. + +These measures are important for transparency so that the public can have confidence in the automated systems being used.","Entities responsible for the development or use of automated systems should include clearly-stated governance procedures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing assessment and mitigation. Organizational stakeholders, including those with oversight of the business process or operation being automated, as well as other organizational divisions that may be affected due to the use of the system, should be involved in establishing governance procedures. Responsibility should rest high enough in the organization that decisions about resources, mitigation, incident response, and potential rollback can be made promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those holding this responsibility should be made aware of any use cases with the potential for meaningful impact on peopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, it may be appropriate for an independent ethics review to be conducted before deployment.",0.9090909090909091,0.978493825176774,1.0,0.7440476190352183,0.7317903632773278 +How has the customer service industry successfully integrated automated services with human support teams?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9']",I don't know.,The customer service industry has successfully integrated automated services such as chat-bots and AI-driven call response systems with escalation to a human support team. Many businesses now use partially automated customer service platforms that help answer customer questions and compile common problems for human agents to review. These integrated human-AI systems allow companies to provide faster customer care while maintaining human agents to answer calls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to successful customer service.,0.0,0.0,0.0,0.19999999998,0.18032948270080051 +What steps should be taken to inform AI stakeholders during the deactivation or disengagement of AI systems?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"Establish and maintain communication plans to inform AI stakeholders as part of the deactivation or disengagement process of a specific GAI system (including for open-source models) or context of use, including reasons, workarounds, user access removal, alternative processes, contact information, etc.",0.0,0.0,0.0,0.2916666666520833,0.17902589024025395 +How do the U.S. AI Safety Institute and AI Risk Management Framework support the 2023 AI Exec Order?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"The U.S. AI Safety Institute and the AI Risk Management Framework support the 2023 Executive Order on Safe, Secure, and Trustworthy AI by continuing efforts to build the science necessary for the safe, secure, and trustworthy development and use of AI.",0.0,0.0,0.0,0.4166666666458333,0.17786617457007506 +How to balance synthetic vs. non-synthetic data and document model details to avoid bias?,"['and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"To balance synthetic vs. non-synthetic data and document model details to avoid bias, assess the proportion of synthetic to non-synthetic training data and verify that the training data is not overly homogenous or GAI-produced. Additionally, document GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.",0.0,0.0,0.0,0.19999999998,0.18112171206326816 +How to address data privacy and ensure AI integrity in human evaluations?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.']",I don't know.,"To address data privacy and ensure AI integrity in human evaluations, the following actions are suggested: 1. Document how content provenance data is tracked and how that data interacts with privacy and security, including anonymizing data to protect privacy, leveraging privacy output filters, and removing any personally identifiable information (PII). 2. Provide human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications. 3. Use techniques such as anonymization, differential privacy, or other privacy-enhancing technologies to minimize the risks associated with linking AI-generated content back to individual human subjects.",0.0,0.0,0.0,0.5845238095091964,0.18061559566984886 +How can public feedback and incident reporting improve GAI traceability and risk management?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable']","Public feedback and incident reporting can improve GAI traceability and risk management by facilitating smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history, and metadata can empower AI Actors in responding to and managing AI incidents. Documentation practices, including logging, recording, and analyzing GAI incidents, are crucial for this process.","Public feedback and incident reporting can improve GAI traceability and risk management by promoting transparency and enhancing awareness of performance changes. Documentation and sharing of information about GAI incidents help relevant AI Actors trace impacts to their source, while structured public feedback provides context and depth that automated systems may lack. This feedback can be integrated into monitoring processes to mitigate potential risks and harms from outputs.",0.8181818181818182,0.9999999999999999,0.25,0.37301587300343914,0.3857288288642894 +"How could automation bias worsen misinformation risks from GAI, given model collapse and trust erosion?","['and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'even if the inferences are not accurate (e.g., confabulations), and especially if they reveal information \nthat the individual considers sensitive or that is used to disadvantage or harm them. \nBeyond harms from information exposure (such as extortion or dignitary harm), wrong or inappropriate \ninferences of PII can contribute to downstream or secondary harmful impacts. For example, predictive \ninferences made by GAI models based on PII or protected attributes can contribute to adverse decisions, \nleading to representational or allocative harms to individuals or groups (see Harmful Bias and \nHomogenization below).']","Automation bias can worsen misinformation risks from Generative Artificial Intelligence (GAI) by leading humans to over-rely on GAI systems or unjustifiably perceive GAI content to be of higher quality than that produced by other sources. This excessive deference to automated systems can exacerbate risks such as confabulation (the generation of false information) and bias or homogenization. As a result, misinformation can spread more easily because people might trust and disseminate incorrect or biased information generated by GAI without sufficient scrutiny. This can lead to model collapse, where the quality and reliability of the AI model degrade over time, and trust erosion, where users lose confidence in the AI system and its outputs.","Automation bias can exacerbate other risks of GAI, such as risks of confabulation or risks of bias or homogenization.",0.46153846153846156,0.929886978768589,1.0,0.59999999997,0.9814270413495314 +How do consultations and monitoring ensure automated systems' safety and effectiveness?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']","Consultations and monitoring ensure the safety and effectiveness of automated systems by involving public consultation to address concerns and make decisions based on those concerns, conducting risk identification and management assessments, and performing independent evaluations and reporting. These processes include steps to mitigate potential harms, performance testing for accuracy and demographic impact, and ongoing monitoring procedures with regular performance testing reports. This comprehensive approach helps to proactively protect against harms from unintended uses or impacts of automated systems.","Consultations ensure automated systems' safety and effectiveness by engaging diverse impacted communities and experts to consider concerns and risks unique to those communities. This feedback is documented and used to reconsider the development of the system. Monitoring ensures ongoing performance by including recalibration procedures, continuous evaluation of performance metrics and harm assessments, updates of systems, retraining of machine learning models, and fallback mechanisms. Both technical system components and human operators are monitored, with manual human-led monitoring as a check for shortcomings in automated monitoring systems.",1.0,0.9888831953529978,0.75,0.6116666666544333,0.6381752792639237 +How do EO 13960 and NIST AI RMF ensure AI transparency and accountability for federal agencies?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Priorities Related to Information Integrity Research and Development.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Descriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product']",I don't know.,"Executive Order 13960 requires that AI used by federal agencies is transparent and accountable, among other principles. The NIST AI Risk Management Framework, which is being developed through a consensus-driven, open, transparent, and collaborative process, will also consider and encompass principles such as transparency and accountability during the pre-design, design and development, deployment, use, and testing and evaluation of AI technologies and systems.",0.0,0.0,0.0,0.499999999975,0.17810188855970693 +How does surveillance software for monitoring union talks intersect with data privacy and regulatory gaps?,"['Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + ""records and education-related data in order to do so. The overarching concerns of surveillance in these \ndomains included concerns about the chilling effects of surveillance on student expression, inappropriate \ncontrol of tenants via surveillance, and the way that surveillance of workers blurs the boundary between work \nand life and exerts extreme and potentially damaging control over workers' lives. Additionally, some panelists \npointed out ways that data from one situation was misapplied in another in a way that limited people's \nopportunities, for example data from criminal justice settings or previous evictions being used to block further \naccess to housing. Throughout, various panelists emphasized that these technologies are being used to shift the \nburden of oversight and efficiency from employers to workers, schools to students, and landlords to tenants, in \nways that diminish and encroach on equality of opportunity; assessment of these technologies should include"" + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Alliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium']",I don't know.,"Companies use surveillance software to track employee discussions about union activity and use the resulting data to surveil individual employees and surreptitiously intervene in discussions. The United States lacks a comprehensive statutory or regulatory framework governing the rights of the public when it comes to personal data, which includes the use of surveillance software in workplaces.",0.0,0.0,1.0,0.62499999996875,0.18098271214915762 +How can provenance tracking and public feedback improve AI content risk management?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + '2 \nThis work was informed by public feedback and consultations with diverse stakeholder groups as part of NISTā€™s \nGenerative AI Public Working Group (GAI PWG). The GAI PWG was an open, transparent, and collaborative \nprocess, facilitated via a virtual workspace, to obtain multistakeholder input on GAI risk management and to \ninform NISTā€™s approach. \nThe focus of the GAI PWG was limited to four primary considerations relevant to GAI: Governance, Content \nProvenance, Pre-deployment Testing, and Incident Disclosure (further described in Appendix A). As such, the \nsuggested actions in this document primarily address these considerations. \nFuture revisions of this proļ¬le will include additional AI RMF subcategories, risks, and suggested actions based \non additional considerations of GAI as the space evolves and empirical evidence indicates additional risks. A \nglossary of terms pertinent to GAI risk management will be developed and hosted on NISTā€™s Trustworthy &' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Provenance tracking and public feedback can improve AI content risk management by providing detailed information on the data used to train machine learning models, including how data sources were processed and interpreted, and identifying any missing, incomplete, or erroneous data. This transparency allows for better risk identification and management assessments, and helps in mitigating potential harms. Public feedback can highlight concerns and influence decisions, ensuring that the AI system aligns with public expectations and values. Additionally, ongoing monitoring and performance testing, informed by provenance tracking, can ensure the system remains accurate and fair over time. + +Source: Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 19, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': '', '_id': '0dfb25b274794704951bd01224d5c65a', '_collection_name': 'snowflake-arctic-embed-l-recursive-base'}, page_content='organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,').","Provenance tracking and public feedback can improve AI content risk management by tracing the origin and history of content, enabling better knowledge of trustworthiness in AI systems, and facilitating greater information access about both authentic and synthetic content. Integrating pre- and post-deployment external feedback into the monitoring process for GAI models can enhance awareness of performance changes and mitigate potential risks and harms from outputs.",0.8181818181818182,1.0000000000000002,0.8,0.44047619046517855,0.8237425286176446 +How can designers ensure user understanding and privacy in automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']","Designers can ensure user understanding and privacy in automated systems by: + +1. **Providing Accessible Documentation**: Designers, developers, and deployers should provide generally accessible plain language documentation, including clear descriptions of the overall system functioning and the role automation plays. This documentation should include notice that such systems are in use, the individual or organization responsible for the system, and explanations of outcomes that are clear, timely, and accessible. + +2. **Keeping Users Informed**: Users should be notified of significant use case or key functionality changes. They should know how and why an outcome impacting them was determined by an automated system, including when the automated system is not the sole input determining the outcome. + +3. **Ensuring Data Privacy**: Users should be protected from abusive data practices via built-in protections and should have agency over how data about them is used. This includes ensuring that data collection conforms to reasonable expectations and that only data strictly necessary for the specific context is collected. Designers should seek user permission and respect their decisions regarding the collection, use, access, transfer, and deletion of their data in appropriate ways and to the greatest extent possible. Where not possible, alternative privacy by design safeguards should be used. + +4. **Avoiding Privacy-Invasive Defaults**: Systems should not employ user experience and design decisions that obfuscate user choice or burden users with defaults that are privacy invasive. Consent should only be used to justify the collection of data in cases where it can be appropriately obtained. + +By following these steps, designers can help ensure that users understand how automated systems impact them and that their privacy is protected.","Designers can ensure user understanding and privacy in automated systems by providing plain language documentation, clear descriptions of system functioning, notice of system use, and explanations of outcomes. They should seek user permission and respect decisions regarding data collection, use, access, transfer, and deletion. Consent requests should be brief, understandable, and give users agency over data collection. Enhanced protections and restrictions should be applied to sensitive domains, and surveillance technologies should be subject to heightened oversight.",1.0,0.999308689219987,0.5,0.26666666665333333,0.8371097611849012 +How do equity and disparity assessments help prevent algorithmic bias?,"['ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'an equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'peopleā€™s legal problem stories. SSRN. https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4696936 \nHaran, R. (2023) Securing LLM Systems Against Prompt Injection. NVIDIA. \nhttps://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection/ \nInformation Technology Industry Council (2024) Authenticating AI-Generated Content. \nhttps://www.itic.org/policy/ITI_AIContentAuthorizationPolicy_122123.pdf \nJain, S. et al. (2023) Algorithmic Pluralism: A Structural Approach To Equal Opportunity. arXiv. \nhttps://arxiv.org/pdf/2305.08157 \nJi, Z. et al (2023) Survey of Hallucination in Natural Language Generation. ACM Comput. Surv. 55, 12, \nArticle 248. https://doi.org/10.1145/3571730 \nJones-Jang, S. et al. (2022) How do people react to AI failure? Automation bias, algorithmic aversion, and \nperceived controllability. Oxford. https://academic.oup.com/jcmc/article/28/1/zmac029/6827859] \nJussupow, E. et al. (2020) Why Are We Averse Towards Algorithms? A Comprehensive Literature Review']","Equity and disparity assessments help prevent algorithmic bias by ensuring that protections are built into the design, deployment, and ongoing use of automated systems. These assessments include proactive equity evaluations as part of system design, the use of representative data, protection against proxies for demographic features, ensuring accessibility for people with disabilities, pre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent evaluation and plain language reporting in the form of an algorithmic impact assessment, including disparity testing results and mitigation information, should be performed and made public whenever possible to confirm these protections. This comprehensive approach helps identify and mitigate potential biases, ensuring fair treatment across different demographic groups.","Equity and disparity assessments help prevent algorithmic bias by conducting proactive equity assessments in the design phase to review potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity. Disparity assessments test automated systems using a broad set of measures to assess whether the system components produce disparities, and demographic data collected for disparity assessment should be separated from data used for the automated system with privacy protections instituted. When a disparity is identified, steps may be taken to mitigate or eliminate it, and ongoing monitoring and mitigation are performed to assess algorithmic discrimination that might arise from unforeseen interactions or changes.",1.0,1.0000000000000002,1.0,0.5216269841182903,0.991481212382073 +"How do school audio surveillance systems for stress indicators intersect with data protections, especially for students with disabilities?","[""records and education-related data in order to do so. The overarching concerns of surveillance in these \ndomains included concerns about the chilling effects of surveillance on student expression, inappropriate \ncontrol of tenants via surveillance, and the way that surveillance of workers blurs the boundary between work \nand life and exerts extreme and potentially damaging control over workers' lives. Additionally, some panelists \npointed out ways that data from one situation was misapplied in another in a way that limited people's \nopportunities, for example data from criminal justice settings or previous evictions being used to block further \naccess to housing. Throughout, various panelists emphasized that these technologies are being used to shift the \nburden of oversight and efficiency from employers to workers, schools to students, and landlords to tenants, in \nways that diminish and encroach on equality of opportunity; assessment of these technologies should include"" + 'policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of""]",I don't know.,School audio surveillance systems monitor student conversations to detect potential 'stress indicators' as a warning of potential violence. These systems have the potential to limit student freedom to express a range of emotions at school and may inappropriately flag students with disabilities who need accommodations or use screen readers or dictation software as cheating.,0.0,0.0,0.6666666666666666,0.59999999997,0.185658045881626 +How does the tech companion help implement AI Bill of Rights to protect Americans from automated harms?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'TABLE OF CONTENTS\nFROM PRINCIPLES TO PRACTICE: A TECHNICAL COMPANION TO THE BLUEPRINT \nFOR AN AI BILL OF RIGHTS \n \nUSING THIS TECHNICAL COMPANION\n \nSAFE AND EFFECTIVE SYSTEMS\n \nALGORITHMIC DISCRIMINATION PROTECTIONS\n \nDATA PRIVACY\n \nNOTICE AND EXPLANATION\n \nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nAPPENDIX\n \nEXAMPLES OF AUTOMATED SYSTEMS\n \nLISTENING TO THE AMERICAN PEOPLE\nENDNOTES \n12\n14\n15\n23\n30\n40\n46\n53\n53\n55\n63\n13' + 'ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'moving principles into practice. \nThe expectations given in the Technical Companion are meant to serve as a blueprint for the development of \nadditional technical standards and practices that should be tailored for particular sectors and contexts. While \nexisting laws informed the development of the Blueprint for an AI Bill of Rights, this framework does not detail \nthose laws beyond providing them as examples, where appropriate, of existing protective measures. This \nframework instead shares a broad, forward-leaning vision of recommended principles for automated system \ndevelopment and use to inform private and public involvement with these systems where they have the poten\xad\ntial to meaningfully impact rights, opportunities, or access. Additionally, this framework does not analyze or \ntake a position on legislative and regulatory proposals in municipal, state, and federal government, or those in \nother countries.' + 'technical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad']","The technical companion to the Blueprint for an AI Bill of Rights helps implement the AI Bill of Rights by providing practical next steps to move the principles into practice. It promotes common approaches that allow technological innovation to flourish while protecting people from harm. The companion serves as a reference for anyone impacted by automated systems or involved in their development, design, deployment, evaluation, or policy-making. It outlines expectations for automated systems, which are meant to serve as a blueprint for developing additional technical standards and practices tailored for particular sectors and contexts. These expectations include pre-deployment testing, ongoing monitoring, and other practices to prevent unintended harms and ensure key development decisions are vetted by an ethics review.","The technical companion provides examples and concrete steps for communities, industry, governments, and others to take in order to build protections into policy, practice, or the technological design process. It outlines practical steps for the development of additional technical standards and practices, including pre-deployment testing, ongoing monitoring, and governance structures for automated systems. It also includes expectations about reporting for transparency, so the American people can have confidence that their rights, opportunities, and access are respected.",1.0,0.9153379955282014,1.0,0.9626543209769582,0.49869533163999014 +How to mitigate Human-AI risks in evaluations and pre-deployment?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5']",I don't know.,"To mitigate Human-AI risks in evaluations and pre-deployment, several actions can be taken: 1. Assess and manage statistical biases related to GAI content provenance through techniques such as re-sampling, re-weighting, or adversarial training. 2. Document how content provenance data is tracked and how that data interacts with privacy and security, including anonymizing data, leveraging privacy output filters, and removing any personally identifiable information (PII). 3. Provide human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications. 4. Use techniques such as anonymization, differential privacy, or other privacy-enhancing technologies to minimize the risks associated with linking AI-generated content back to individual human subjects. 5. Evaluate claims of model capabilities using empirically validated methods. 6. Share results of pre-deployment testing with relevant GAI Actors. 7. Utilize a purpose-built testing environment such as NIST Dioptra to empirically evaluate GAI trustworthy characteristics. 8. Avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments. 9. Document the extent to which human domain knowledge is employed to improve GAI system performance. 10. Review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities. 11. Track and document instances of anthropomorphization in GAI system interfaces. 12. Verify GAI system training data and TEVV data provenance, and ensure that fine-tuning or retrieval-augmented generation data is grounded. 13. Regularly review security and safety guardrails, especially if the GAI system is being operated in novel circumstances.",0.0,0.0,0.0,0.5259259259171605,0.2429127996658477 +How to design explanatory mechanisms in high-risk automated systems for pre-decision transparency and post-decision accessibility?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"In high-risk automated systems, explanatory mechanisms should be built into the system design so that the systemā€™s full behavior can be explained in advance (i.e., only fully transparent models should be used), rather than as an after-the-decision interpretation. The explanation provided by a system should accurately reflect the factors and the influences that led to a particular decision, and should be meaningful for the particular customization based on purpose, target, and level of risk.",0.0,0.0,1.0,0.22619047617916663,0.1800906539558884 +How do biometrics affect access and decisions for benefits or penalties?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32']","Biometric systems are used as access control mechanisms and can make decisions related to benefits or services on a fully or partially autonomous basis, such as determining whether to revoke benefits. + +Source: Document(metadata={'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 53, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': '', '_id': '6f20c4fe7a8a4f27bf9169511d1cd8d5', '_collection_name': 'snowflake-arctic-embed-l-recursive-base'}, page_content='APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54')",The answer to given question is not present in context,0.9285714285714286,0.8873551634118172,1.0,0.0,0.9263373617376327 +How do equity assessments and disparity tests reduce algorithmic bias?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'an equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design']","Equity assessments and disparity tests reduce algorithmic bias by ensuring that the design and deployment of automated systems are equitable. This involves: + +1. **Proactive Equity Assessments**: Conducting these assessments as part of the system design to identify and address potential biases early in the development process. +2. **Use of Representative Data**: Ensuring that the data used to train the algorithms is representative of the diverse populations that the system will impact. +3. **Protection Against Proxies for Demographic Features**: Implementing measures to prevent the use of proxies that could indirectly introduce bias based on demographic characteristics. +4. **Accessibility for People with Disabilities**: Ensuring that the system is accessible to people with disabilities during both design and development phases. +5. **Pre-deployment and Ongoing Disparity Testing and Mitigation**: Conducting disparity tests before deploying the system and continuously monitoring it to identify and mitigate any biases that may arise. +6. **Clear Organizational Oversight**: Establishing oversight mechanisms to ensure accountability and adherence to equity principles. + +These steps help to identify and mitigate biases, ensuring that the automated systems treat all demographic groups fairly and equitably.","Equity assessments and disparity tests reduce algorithmic bias by ensuring that systems are designed and used in an equitable way. This includes proactive equity assessments as part of the system design, use of representative data, protection against proxies for demographic features, ensuring accessibility for people with disabilities, pre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight.",1.0,0.9975734006680298,1.0,0.5799999999884,0.9942069947664742 +How does synthetic data proportion help prevent AI model collapse?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'arXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Diļ¬€usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Artiļ¬cial intelligence and biological misuse: Diļ¬€erentiating risks of language models' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952']",I don't know.,Assessing the proportion of synthetic to non-synthetic training data and verifying that the training data is not overly homogenous or GAI-produced helps mitigate concerns of model collapse.,0.0,0.0,0.0,0.36666666664833336,0.18136181947853552 +How do hiring algorithms and social media data impact democracy?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'SECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Based on the provided context, hiring algorithms and social media data impact democracy by reflecting and reproducing existing unwanted inequities or embedding new harmful bias and discrimination. Additionally, unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent. These uses of technology, data, and automated systems can limit opportunities and prevent access to critical resources or services, posing significant challenges to democracy. + +","Algorithms used in hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent.",0.9230769230769231,0.9999999999999997,1.0,0.249999999975,0.9821304947746031 +How can unproven tech and data misuse cause harm?,"['Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of""]","The provided context mentions that unproven technology and data misuse can lead to several harms, including: + +1. **Replication of Unjust Outcomes**: Adverse or irrelevant data can lead to the replication of unjust outcomes. +2. **Confirmation Bias**: There is a tendency for people to defer to potentially inaccurate automated systems, which can reinforce confirmation bias. +3. **Severe Impact on Individuals and Communities**: The systems can lack individualization, work against the belief that people can change for the better, and lead to significant negative consequences such as loss of jobs and custody of children. +4. **Surveillance and Chilling Effects**: Surveillance can lead to chilling effects for communities and send negative signals to community members about how they are viewed. + +These points highlight the potential severe impacts of unproven technology and data misuse on individuals and communities.","Unproven technology and data misuse can cause harm in several ways. Technologies that have not been proven to work within an acceptable range of error can lead to substantial and unjustified harm. Automated systems relying on historical data can allow irrelevant information from past decisions to affect decision-making in unrelated situations. Technologies can also be designed to violate safety, such as those facilitating stalking, or lead to unintended harms through their use. Examples include a proprietary model for predicting sepsis that underperformed and caused 'alert fatigue,' social media moderation systems silencing counter speech, tracking devices being misused by stalkers, and police deployment algorithms sending police to incorrect neighborhoods due to feedback loops from reused data.",1.0,0.951317364861573,0.8,0.5666666666477778,0.6805715192213955 +Why compare system performance with human methods after extensive tests?,"['and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"System performance should be compared with the in-place, potentially human-driven, status quo procedures, with existing human performance considered as a performance baseline for the algorithm to meet pre-deployment, and as a lifecycle minimum performance standard.",0.0,0.0,0.0,0.49999999995,0.1832016020730998 +How do confident errors mislead users?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'purpose speciļ¬cation. For example, most model developers do not disclose speciļ¬c data sources on \nwhich models were trained, limiting user awareness of whether personally identiļ¬ably information (PII) \nwas trained on and, if so, how it was collected. \nModels may leak, generate, or correctly infer sensitive information about individuals. For example, \nduring adversarial attacks, LLMs have revealed sensitive information (from the public domain) that was \nincluded in their training data. This problem has been referred to as data memorization, and may pose \nexacerbated privacy risks even for data present only in a small number of training samples. \nIn addition to revealing sensitive information in GAI training data, GAI models may be able to correctly \ninfer PII or sensitive data that was not in their training data nor disclosed by the user by stitching \ntogether information from disparate sources. These inferences can have negative impact on an individual' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"Confident errors, also known as confabulations or fabrications, mislead users by producing confidently stated but erroneous or false content.",0.0,0.0,0.0,0.0,0.18321323393366046 +"How do data biases impact loans, hiring, and education?","['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"Data biases impact loans by causing applicants who attended Historically Black Colleges or Universities (HBCUs) to be charged higher loan prices for refinancing student loans compared to those who did not attend HBCUs, even when controlling for other credit-related factors. In hiring, a tool that learned features from a company's predominantly male employees rejected women applicants for discriminatory reasons, penalizing resumes with the word 'womenā€™s.' In education, a predictive model used by universities to predict student dropout rates used race directly as a predictor, resulting in Black students being deemed at higher risk of dropping out compared to their white peers, which could guide them away from certain majors.",0.0,0.0,0.0,0.19642857141875,0.1828868085143257 +How to design systems to avoid algorithmic bias and ensure safety?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,"To design systems to avoid algorithmic bias and ensure safety, systems should be developed with consultation from diverse communities, stakeholders, and domain experts to identify concerns, risks, and potential impacts. They should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring to demonstrate they are safe and effective based on their intended use. Protective measures should include the possibility of not deploying the system or removing a system from use. Designers, developers, and deployers should take proactive and continuous measures to protect individuals and communities from algorithmic discrimination and to use and design systems in an equitable way. This includes proactive equity assessments, use of representative data, protection against proxies for demographic features, ensuring accessibility for people with disabilities, pre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent evaluation and plain language reporting in the form of an algorithmic impact assessment should be performed and made public whenever possible.",0.0,0.0,0.5,0.09999999999,0.17870744270437017 diff --git a/Tasks/Task 5/base_chain_eval_results_df.csv b/Tasks/Task 5/base_chain_eval_results_df.csv new file mode 100644 index 0000000000000000000000000000000000000000..28dd43d5d08332df8e719a297bf3e415bbb0fe2c --- /dev/null +++ b/Tasks/Task 5/base_chain_eval_results_df.csv @@ -0,0 +1,584 @@ +question,contexts,answer,ground_truth,faithfulness,answer_relevancy,context_recall,context_precision,answer_correctness +What procedures should be established and maintained for escalating GAI system incidents to the organizational risk management authority?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51']",I don't know.,Establish and maintain procedures for escalating GAI system incidents to the organizational risk management authority when specific criteria for deactivation or disengagement is met for a particular context of use or for the GAI system as a whole.,0.0,0.0,0.0,0.11111111109999999,0.1806177835407171 +How might sophisticated threat actors use GAI-powered security co-pilots in cybersecurity attacks?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable' + 'Information security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections']","Sophisticated threat actors might use GAI-powered security co-pilots to augment cybersecurity attacks such as hacking, malware, and phishing. These co-pilots could potentially discover new vulnerabilities in systems (hardware, software, data) and write code to exploit them.",Sophisticated threat actors might use GAI-powered security co-pilots to inform attackers on how to proactively evade threat detection and escalate privileges after gaining system access.,1.0,0.9972221538600797,1.0,0.3333333333,0.4077146328851198 +"What considerations should organizations take into account to ensure accessibility during the design, development, and deployment of automated systems?","['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad']","Based on the provided context, organizations should consider the following to ensure accessibility during the design, development, and deployment of automated systems: + +- Accessibility and reasonable accommodations +- AI actor credentials and qualifications +- Alignment to organizational values +- Auditing and assessment +- Change-management controls +- Commercial use +- Data provenance + +These considerations are mentioned in the context of ensuring accessibility and reasonable accommodations in the design, development, and deployment of automated systems.","Organizations should ensure accessibility to people with disabilities during the design, development, and deployment of automated systems. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers to the use or effectiveness of the automated system.",1.0,0.9815118899552427,0.5,0.45714285713142855,0.3665578100586089 +What actions are suggested for examining and documenting the privacy risk of an AI system?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3']",I don't know.,"The suggested actions for examining and documenting the privacy risk of an AI system include: 1. Conducting AI red-teaming to assess issues such as outputting of training data samples, reverse engineering, model extraction, membership inference risks, revealing biometric, confidential, copyrighted, licensed, patented, personal, proprietary, sensitive, or trade-marked information, and tracking or revealing location information of users or members of training datasets. 2. Engaging directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance and using this feedback to guide the design of provenance data-tracking techniques. 3. Verifying deduplication of GAI training data samples, particularly regarding synthetic data.",0.0,0.0,0.0,0.499999999975,0.17975976821810674 +How can robust watermarking techniques and corresponding detectors be useful in GAI systems used for content creation?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54']",I don't know.,Robust watermarking techniques and corresponding detectors can be useful in GAI systems used for content creation to identify the source of content.,0.0,0.0,0.0,0.0,0.18652238925722214 +What is the importance of public consultation in the development of automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'monitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'Priorities Related to Information Integrity Research and Development.']","The importance of public consultation in the development of automated systems includes: + +1. **Receiving Community Input**: It ensures that the design and use of technologies take into account the perspectives and concerns of the community. +2. **Transparency and Accountability**: Public consultation processes can help in identifying and mitigating potential harms, thereby increasing confidence in the use of automated systems. +3. **Addressing Concerns**: It allows for the identification of concerns raised by the public and the incorporation of decisions made due to these concerns into the development process. +4. **Proactive Protections**: Expanded and proactive protections can build on existing practices, ensuring that the public is protected from potential harms associated with automated systems. + +These points highlight the role of public consultation in making automated systems more transparent, accountable, and aligned with the needs and concerns of the community.","Public consultation is important in the development of automated systems because it ensures that the public is involved in the design, implementation, deployment, acquisition, and maintenance phases. This consultation emphasizes early-stage engagement before a system is introduced or a large change is implemented. It directly engages diverse impacted communities to consider concerns and risks unique to those communities or disproportionately prevalent or severe for them. The consultation should include subject matter, sector-specific, and context-specific experts, as well as experts on potential impacts such as civil rights, civil liberties, and privacy experts. Concerns raised in this consultation should be documented, and the automated system developers should reconsider the system based on this feedback.",0.9230769230769231,0.9800891703755061,0.8,0.8736394557698323,0.6036321982837644 +What mechanisms should be created to provide protections for whistleblowers who report organizational violations or risks to public safety?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']",I don't know.,"Create mechanisms to provide protections for whistleblowers who report, based on reasonable belief, when the organization violates relevant laws or poses a specific and empirically well-substantiated negative risk to public safety (or has already caused harm).",0.0,0.0,0.0,0.0,0.18231348244221784 +What approaches are suggested for mapping AI technology and legal risks of its components?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,"Approaches for mapping AI technology and legal risks of its components include periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, connecting new GAI policies to existing governance and risk management activities, documenting training data curation policies, establishing policies for data collection and retention, implementing policies for the use and protection of third-party intellectual property and training data, re-evaluating models fine-tuned on third-party models, re-evaluating risks when adapting GAI models to new domains, and leveraging approaches to detect the presence of PII or sensitive data in generated output.",0.0,0.0,0.0,0.4105555555473445,0.17865687211082493 +What is confabulation and how can it mislead or deceive users?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'it comes to open-ended prompts for long-form responses and in domains which require highly \ncontextual and/or domain expertise. \nRisks from confabulations may arise when users believe false content ā€“ often due to the conļ¬dent nature \nof the response ā€“ leading users to act upon or promote the false information. This poses a challenge for \nmany real-world applications, such as in healthcare, where a confabulated summary of patient \ninformation reports could cause doctors to make incorrect diagnoses and/or recommend the wrong \ntreatments. Risks of confabulated content may be especially important to monitor when integrating GAI \ninto applications involving consequential decision making. \nGAI outputs may also include confabulated logic or citations that purport to justify or explain the \nsystemā€™s answer, which may further mislead humans into inappropriately trusting the systemā€™s output. \nFor instance, LLMs sometimes provide logical steps for how they arrived at an answer even when the' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","Confabulation refers to the generation of false content by AI systems, often presented with a high degree of confidence. This can mislead or deceive users because they may believe the false information and act upon it. For example, in healthcare, a confabulated summary of patient information could lead doctors to make incorrect diagnoses or recommend the wrong treatments. Additionally, AI systems may provide confabulated logic or citations that appear to justify or explain their answers, further misleading users into inappropriately trusting the system's output.",Confabulation is the production of confidently stated but erroneous or false content (known colloquially as 'hallucinations' or 'fabrications') by which users may be misled or deceived.,1.0,0.9115600942510714,1.0,0.16666666665,0.4593004349262577 +"What is the purpose of incorporating trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems according to the AI Risk Management Framework (AI RMF) 1.0?","['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"The purpose of incorporating trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems according to the AI Risk Management Framework (AI RMF) 1.0 is to improve the ability of organizations to manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory requirements and best practices, and reflects risk management priorities.",0.0,0.0,0.0,0.5333333333155555,0.17933560378232244 +"What types of research does the National Science Foundation (NSF) fund to advance the safety, security, and effectiveness of AI systems?","['ENDNOTES\n23. National Science Foundation. National Artificial Intelligence Research Institutes. Accessed Sept. 12,\n2022. https://beta.nsf.gov/funding/opportunities/national-artificial-intelligence-research-institutes\n24. National Science Foundation. Cyber-Physical Systems. Accessed Sept. 12, 2022. https://beta.nsf.gov/\nfunding/opportunities/cyber-physical-systems-cps\n25. National Science Foundation. Secure and Trustworthy Cyberspace. Accessed Sept. 12, 2022. https://\nbeta.nsf.gov/funding/opportunities/secure-and-trustworthy-cyberspace-satc\n26. National Science Foundation. Formal Methods in the Field. Accessed Sept. 12, 2022. https://\nbeta.nsf.gov/funding/opportunities/formal-methods-field-fmitf\n27. National Science Foundation. Designing Accountable Software Systems. Accessed Sept. 12, 2022.\nhttps://beta.nsf.gov/funding/opportunities/designing-accountable-software-systems-dass\n28. The Leadership Conference Education Fund. The Use Of Pretrial ā€œRisk Assessmentā€ Instruments: A' + 'accuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'Priorities Related to Information Integrity Research and Development.' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.']","Based on the provided context, the National Science Foundation (NSF) funds research in the following areas to advance the safety, security, and effectiveness of AI systems: + +1. National Artificial Intelligence Research Institutes +2. Cyber-Physical Systems +3. Secure and Trustworthy Cyberspace +4. Formal Methods in the Field +5. Designing Accountable Software Systems +6. Fairness in Artificial Intelligence, with a specific interest in research foundations for explainable AI + +These areas encompass a broad range of topics aimed at improving the reliability, accountability, and transparency of AI systems.","The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles: the National AI Research Institutes support research on all aspects of safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program supports research on rigorous formal verification and analysis of automated systems and machine learning, and the Designing Accountable Software Systems program supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.",0.5,0.9489662422924591,1.0,0.766666666641111,0.8833010169596613 +How have synthetic NCII and CSAM moved from niche internet forums to mainstream online businesses?,"['and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.']",I don't know.,"Websites, mobile apps, and custom-built models that generate synthetic NCII have moved from niche internet forums to mainstream, automated, and scaled online businesses.",0.0,0.0,0.0,0.0,0.1851748030212246 +What measures are suggested to mitigate concerns of harmful bias and homogenization in AI training data?,"['organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'the information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.']","The measures suggested to mitigate concerns of harmful bias and homogenization in AI training data include: + +1. **Verifying deduplication of GAI training data samples, particularly regarding synthetic data.** +2. **Evaluating potential biases and stereotypes that could emerge from the AI-generated content using appropriate methodologies, including computational testing methods as well as evaluating structured feedback input.** + +These measures are aimed at ensuring the integrity and fairness of the AI training data and the resulting AI models.",Assess the proportion of synthetic to non-synthetic training data and verify training data is not overly homogenous or GAI-produced to mitigate concerns of model collapse.,0.8,0.9999999999999997,1.0,0.7048611110934896,0.5551965866049806 +How should organizational risk tolerances and controls be applied to third-party GAI resources?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Priorities Related to Information Integrity Research and Development.' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"Organizational risk tolerances and controls should be applied to third-party GAI resources by incorporating them into acquisition and procurement processes, assessing personnel credentials and qualifications, performing background checks, filtering GAI input and outputs, grounding, fine-tuning, and retrieval-augmented generation. Additionally, organizational risk tolerance should be applied to the utilization of third-party datasets and other GAI resources, fine-tuned third-party models, and existing third-party models adapted to a new domain. Risk measurements should be reassessed after fine-tuning third-party GAI models.",0.0,0.0,0.0,0.8928571428348213,0.17859662721346473 +What characteristics should data have to be considered representative and robust in the development or assessment of automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'or label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections']",I don't know.,Data used as part of system development or assessment should be representative of local communities based on the planned deployment setting and should be reviewed for bias based on the historical and societal context of the data. Such data should be sufficiently robust to identify and help to mitigate biases and potential harms.,0.0,0.0,1.0,0.3333333333,0.1819138222482943 +What actions are suggested to ensure information integrity in the context of AI systems?,"['Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The suggested actions to ensure information integrity in the context of AI systems include: 1) Employing methods to trace the origin and modifications of digital content. 2) Integrating tools designed to analyze content provenance and detect data anomalies, verify the authenticity of digital signatures, and identify patterns associated with misinformation or manipulation. 3) Disaggregating evaluation metrics by demographic factors to identify any discrepancies in how content provenance mechanisms work across diverse populations. 4) Evaluating novel methods and technologies for the measurement of GAI-related risks including in content provenance, offensive cyber, and CBRN, while maintaining the modelsā€™ ability to produce valid, reliable, and factually accurate outputs.",0.0,0.0,0.0,0.6428571428357143,0.18396607167756987 +"What are the principles that federal agencies must adhere to under Executive Order 13960 when designing, developing, acquiring, or using AI?","['does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'in whole or in part may not be appropriate given the intended use of automated systems to achieve government \nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \nautomated systems in certain settings such as AI systems used as part of school building security or automated \nhealth diagnostic systems. \nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \nequities, for example, between the protection of sensitive law enforcement information and the principle of \nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'other countries. \nWe have seen modest progress in recent years, with some state and local governments responding to these prob\xad\nlems with legislation, and some courts extending longstanding statutory protections to new and emerging tech\xad\nnologies. There are companies working to incorporate additional protections in their design and use of auto\xad\nmated systems, and researchers developing innovative guardrails. Advocates, researchers, and government \norganizations have proposed principles for the ethical use of AI and other automated systems. These include \nthe Organization for Economic Co-operation and Developmentā€™s (OECDā€™s) 2019 Recommendation on Artificial \nIntelligence, which includes principles for responsible stewardship of trustworthy AI and which the United \nStates adopted, and Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government, which sets out principles that govern the federal governmentā€™s use of AI. The Blueprint' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"The principles that federal agencies must adhere to under Executive Order 13960 when designing, developing, acquiring, or using AI are: (a) lawful and respectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) safe, secure, and resilient; (e) understandable; (f) responsible and traceable; (g) regularly monitored; (h) transparent; and, (i) accountable.",0.0,0.0,1.0,0.36944444443212965,0.17863647133918925 +How did the installation of a facial recognition system by a local public housing authority impact the community?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems,' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections']",I don't know.,"The installation of a facial recognition system by a local public housing authority led the community, both those living in the housing complex and not, to have videos of them sent to the local police department and made available for scanning by its facial recognition software.",0.0,0.0,0.0,0.249999999975,0.18457611981510857 +How have businesses successfully integrated automated customer service with human support teams?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'health coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",Businesses have successfully integrated automated customer service with human support teams by using partially automated customer service platforms. These platforms include chat-bots and AI-driven call response systems that can escalate issues to human support teams when necessary. This integration allows companies to provide faster customer care while maintaining human agents to handle more complicated requests. The combination of AI and human agents is viewed as key to successful customer service.,The customer service industry has successfully integrated automated services such as chat-bots and AI-driven call response systems with escalation to a human support team. Many businesses now use partially automated customer service platforms that help answer customer questions and compile common problems for human agents to review. These integrated human-AI systems allow companies to provide faster customer care while maintaining human agents to answer calls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to successful customer service.,1.0,0.9908452350548383,1.0,0.21111111110055555,0.9962919730279303 +Who were some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights?,"['ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4' + 'APPENDIX\nā€¢ OSTP conducted meetings with a variety of stakeholders in the private sector and civil society. Some of these\nmeetings were specifically focused on providing ideas related to the development of the Blueprint for an AI\nBill of Rights while others provided useful general context on the positive use cases, potential harms, and/or\noversight possibilities for these technologies. Participants in these conversations from the private sector and\ncivil society included:\nAdobe \nAmerican Civil Liberties Union \n(ACLU) \nThe Aspen Commission on \nInformation Disorder \nThe Awood Center \nThe Australian Human Rights \nCommission \nBiometrics Institute \nThe Brookings Institute \nBSA | The Software Alliance \nCantellus Group \nCenter for American Progress \nCenter for Democracy and \nTechnology \nCenter on Privacy and Technology \nat Georgetown Law \nChristiana Care \nColor of Change \nCoworker \nData Robot \nData Trust Alliance \nData and Society Research Institute \nDeepmind \nEdSAFE AI Alliance \nElectronic Privacy Information' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']","Some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights included: + +- Adobe +- American Civil Liberties Union (ACLU) +- The Aspen Commission on Information Disorder +- The Awood Center +- The Australian Human Rights Commission +- Biometrics Institute +- The Brookings Institute +- BSA | The Software Alliance +- Cantellus Group +- Center for American Progress +- Center for Democracy and Technology +- Center on Privacy and Technology at Georgetown Law +- Christiana Care +- Color of Change +- Coworker +- Data Robot +- Data Trust Alliance +- Data and Society Research Institute +- Deepmind +- EdSAFE AI Alliance +- Electronic Privacy Information","Some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights included Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, and Waymap.",,0.973242051616895,1.0,0.49999999995,0.24589861625308068 +What should be done if the residual negative risk of an AI system exceeds the organizational risk tolerance?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Priorities Related to Information Integrity Research and Development.' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,Re-evaluate safety features of fine-tuned models when the negative risk exceeds organizational risk tolerance.,0.0,0.0,0.0,0.0,0.18242534855844236 +What factors should be assessed to determine and document the expected and acceptable GAI system context of use?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.']",I don't know.,"To determine and document the expected and acceptable GAI system context of use, the following factors should be assessed: Assumptions and limitations; Direct value to the organization; Intended operational environment and observed usage patterns; Potential positive and negative impacts to individuals, public safety, groups, communities, organizations, democratic institutions, and the physical environment; Social norms and expectations.",0.0,0.0,0.0,0.36666666664833336,0.1836481343264991 +What is model collapse and what are its potential consequences?,"['and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20']",I don't know.,"Model collapse can occur when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. In addition to threatening the robustness of the model overall, model collapse could lead to homogenized outputs, including by amplifying any homogenization from the model used to generate the synthetic training data.",0.0,0.0,0.0,0.0,0.17883108386750768 +What factors should be considered when decommissioning AI systems to ensure safety and maintain the organization's trustworthiness?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable']",I don't know.,"When decommissioning AI systems, the following factors should be considered to ensure safety and maintain the organization's trustworthiness: Data retention requirements; Data security, e.g., containment, protocols, Data leakage after decommissioning; Dependencies between upstream, downstream, or other data, internet of things (IOT) or AI systems; Use of open-source data or models; Usersā€™ emotional entanglement with GAI functions.",0.0,0.0,0.0,0.36111111109907407,0.17929004840532906 +What issues are associated with the tool meant to help low-risk federal prisoners win early release?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'Priorities Related to Information Integrity Research and Development.' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,The tool meant to help low-risk federal prisoners win early release is plagued by flaws.,0.0,0.0,0.0,0.09999999999,0.1851984875221609 +How can GAI misuse cause emotional harm short and long-term?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'even if the inferences are not accurate (e.g., confabulations), and especially if they reveal information \nthat the individual considers sensitive or that is used to disadvantage or harm them. \nBeyond harms from information exposure (such as extortion or dignitary harm), wrong or inappropriate \ninferences of PII can contribute to downstream or secondary harmful impacts. For example, predictive \ninferences made by GAI models based on PII or protected attributes can contribute to adverse decisions, \nleading to representational or allocative harms to individuals or groups (see Harmful Bias and \nHomogenization below).' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.']","Based on the provided context, GAI misuse can cause emotional harm in both the short and long term through concerns about emotional entanglement between humans and GAI systems, which could lead to negative psychological impacts. + +For more detailed information, the context does not provide further specifics on the mechanisms or examples of such emotional harm.","GAI misuse can cause emotional harm both immediately and over extended periods. Immediate emotional harm can result from the distribution of harmful deepfake images, while long-term emotional harm can stem from the effects of disinformation on societal trust in public institutions.",0.8,0.0,0.3333333333333333,0.3499999999825,0.6835801659160631 +How is diverse feedback used in GAI evaluation to ensure provenance and reduce bias?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36']","The context does not provide specific details on how diverse feedback is used in GAI evaluation to ensure provenance and reduce bias. + +I don't know.","Diverse feedback is used in GAI evaluation to ensure provenance and reduce bias by recording and integrating structured feedback from operators, users, and potentially impacted communities through methods such as user research studies, focus groups, or community forums. This feedback is actively sought to assess content quality and potential biases, and the results are documented and incorporated into design, implementation, deployment approval, monitoring, and decommission decisions.",0.3333333333333333,0.0,0.0,0.0,0.3397933620174777 +How did a store's misuse of predictive analytics show the need for better data protection?,"['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'Priorities Related to Information Integrity Research and Development.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"A department store company used predictive analytics applied to collected consumer data to determine that a teenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her house, revealing to her father that she was pregnant.",0.0,0.0,0.0,0.249999999975,0.18508829933868393 +How do safety metrics and security measures help AI handle and fix errors from threats?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"Safety metrics reflect system reliability and robustness, real-time monitoring, and response times for AI system failures. Security measures help assess vulnerabilities and threats, benchmark system security, gather user feedback, identify effective security metrics, measure content authentication methods, and perform AI red-teaming to assess resilience against various attacks.",0.0,0.0,1.0,0.22619047617916663,0.1780008643616345 +How to ensure accessibility and equity in automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + ""have the potential to meaningfully impact individuals' or communities' exercise of: \nRIGHTS, OPPORTUNITIES, OR ACCESS\nCivil rights, civil liberties, and privacy, including freedom of speech, voting, and protections from discrimi\xad\nnation, excessive punishment, unlawful surveillance, and violations of privacy and other freedoms in both \npublic and private sector contexts; \nEqual opportunities, including equitable access to education, housing, credit, employment, and other \nprograms; or, \nAccess to critical resources or services, such as healthcare, financial services, safety, social services, \nnon-deceptive information about goods and services, and government benefits. \nA list of examples of automated systems for which these principles should be considered is provided in the \nAppendix. The Technical Companion, which follows, offers supportive guidance for any person or entity that \ncreates, deploys, or oversees automated systems."" + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"To ensure accessibility and equity in automated systems, organizations should design, develop, and deploy systems in ways that ensure accessibility to people with disabilities. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers. Additionally, automated systems should be tested using a broad set of measures to assess whether the system components produce disparities. Disparity assessments should include demographic performance measures, overall and subgroup parity assessment, and calibration. When disparities are identified, steps should be taken to mitigate or eliminate them, and ongoing monitoring should be conducted to assess algorithmic discrimination that might arise from unforeseen interactions or changes to the system or context of use.",0.0,0.0,1.0,0.5768253968138602,0.18070007468605156 +How can orgs use public feedback and incident reports to improve GAI content in decision-making?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.']","Organizations can use public feedback and incident reports to improve GAI content in decision-making by implementing feedback activities that follow human subjects research requirements and best practices, such as informed consent and subject compensation. Additionally, documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history, and metadata can also empower AI Actors in responding to and managing AI incidents. + +Source: Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 52, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': '', '_id': '4f87454a1273455c83caadeb8daea3de', '_collection_name': 'snowflake-arctic-embed-l-recursive-base'}, page_content='decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.'), Document(metadata={'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 56, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': '', '_id': 'd4fc29cc697241b68d76cefd5c823d92', '_collection_name': 'snowflake-arctic-embed-l-recursive-base'}, page_content='inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.')","Organizations can use public feedback and incident reports to improve GAI content in decision-making by integrating pre- and post-deployment external feedback into the monitoring process for GAI models and corresponding applications. This helps enhance awareness of performance changes and mitigate potential risks and harms from outputs. Additionally, documenting, reporting, and sharing information about GAI incidents can help mitigate and prevent harmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness and standardization of GAI incident reporting could promote transparency and improve GAI risk management across the AI ecosystem.",1.0,0.9365147977562281,1.0,0.31944444443379627,0.6816657523267644 +How do feedback and testing reveal GAI biases and societal impacts?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952']",I don't know.,"Feedback and testing reveal GAI biases and societal impacts through methods such as user research studies, focus groups, community forums, adversarial testing, real-world scenario evaluations, and structured public feedback exercises. These methods help identify potential biases, understand misuse scenarios, and assess the general awareness among end users and impacted communities.",0.0,0.0,0.0,0.11111111109999999,0.18169426637419137 +How do EO 13960 and NIST AI RMF ensure AI transparency and accountability for federal agencies?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Priorities Related to Information Integrity Research and Development.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Descriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product']",I don't know.,"EO 13960 ensures AI transparency and accountability for federal agencies by requiring that AI is transparent and accountable among other principles. The NIST AI Risk Management Framework aims to foster the development of innovative approaches to address characteristics of trustworthiness, including transparency and accountability, during pre-design, design and development, deployment, use, and testing and evaluation of AI technologies and systems.",0.0,0.0,0.0,0.499999999975,0.17678240361823302 +How can human expertise and content provenance boost GAI performance and ensure data privacy?,"['guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.' + 'purpose speciļ¬cation. For example, most model developers do not disclose speciļ¬c data sources on \nwhich models were trained, limiting user awareness of whether personally identiļ¬ably information (PII) \nwas trained on and, if so, how it was collected. \nModels may leak, generate, or correctly infer sensitive information about individuals. For example, \nduring adversarial attacks, LLMs have revealed sensitive information (from the public domain) that was \nincluded in their training data. This problem has been referred to as data memorization, and may pose \nexacerbated privacy risks even for data present only in a small number of training samples. \nIn addition to revealing sensitive information in GAI training data, GAI models may be able to correctly \ninfer PII or sensitive data that was not in their training data nor disclosed by the user by stitching \ntogether information from disparate sources. These inferences can have negative impact on an individual' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'MP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity']",I don't know.,"Human expertise and content provenance can boost GAI performance by employing techniques such as RLHF, fine-tuning, retrieval-augmented generation, content moderation, and business rules. To ensure data privacy, it is important to anonymize data, leverage privacy output filters, and remove any personally identifiable information (PII).",0.0,0.0,1.0,0.7783333333177667,0.18460049561184388 +"What's the best environment for testing GAI's trustworthiness, data privacy, and human subject protection?","['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'MP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36']",I don't know.,"The best environment for testing GAI's trustworthiness, data privacy, and human subject protection is a purpose-built testing environment such as NIST Dioptra.",0.0,0.0,0.0,0.0,0.18495814642243757 +How can automated systems ensure accessibility and prevent bias?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'or lead to algorithmic discrimination. \nOversight. Human-based systems have the potential for bias, including automation bias, as well as other \nconcerns that may limit their effectiveness. The results of assessments of the efficacy and potential bias of \nsuch human-based systems should be overseen by governance structures that have the potential to update the \noperation of the human-based system in order to mitigate these effects. \n50']",I don't know.,"Automated systems can ensure accessibility by being designed, developed, and deployed in ways that ensure accessibility to people with disabilities. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers. To prevent bias, automated systems should be tested using a broad set of measures to assess whether the system components produce disparities. This includes demographic performance measures, overall and subgroup parity assessment, and calibration. Disparity mitigation steps should be taken if a disparity is identified, and ongoing monitoring should be performed to assess algorithmic discrimination that might arise from unforeseen interactions or changes.",0.0,0.0,1.0,0.5798611110966146,0.1809630352708985 +How do the U.S. AI Safety Institute and AI Risk Management Framework support the 2023 AI Executive Order?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.']",I don't know.,The answer to given question is not present in context,0.0,0.0,1.0,0.0,0.19520361397610936 +How can we ensure effective oversight and risk mgmt of GAI systems with third-party input?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']","Based on the provided context, effective oversight and risk management of Generative Artificial Intelligence (GAI) systems with third-party input can be ensured by: + +1. **Institution and Protection of Third-Party Audits**: Ensuring that third-party audits are conducted to maintain accountability and validate the systems continuously. + +2. **Public Reporting**: Implementing public reporting on crucial elements of these systems to maintain transparency. + +3. **Community Input**: Receiving community input into the design and use of technologies to ensure they are genuinely helpful and aligned with societal needs. + +4. **Governance and Training**: Establishing governance structures and providing training for using these systems to ensure they are used responsibly and effectively. + +5. **Notice and Consent Procedures**: Implementing better notice and consent procedures to ensure privacy based on context and use case. + +6. **Opt-Out Options**: Providing the ability to opt-out of using these systems and offering a fallback to a human process. + +7. **Explanations of Decisions**: Providing explanations of decisions and how these systems work to ensure transparency and understanding. + +8. **Local Validation**: Ensuring that technological use cases are genuinely related to the goal task and are locally validated to work. + +These measures collectively contribute to effective oversight and risk management of GAI systems with third-party input. + +Source: https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf (page 56)","To ensure effective oversight and risk management of GAI systems with third-party input, organizations should establish policies and procedures that address AI risks associated with third-party entities. This includes categorizing different types of GAI content with associated third-party rights, conducting joint educational activities and events to promote best practices for managing GAI risks, developing and validating approaches for measuring the success of content provenance management efforts, and drafting and maintaining well-defined contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations.",0.5714285714285714,0.917428703988806,0.4,0.3555555555437037,0.5291549959596082 +How is the integrity of third-party pre-trained models ensured in GAI?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"The integrity of third-party pre-trained models in GAI is ensured through several actions: reviewing transparency artifacts (e.g., system cards and model cards), applying explainable AI (XAI) techniques, documenting how pre-trained models have been adapted, documenting sources and types of training data and their origins, evaluating user-reported problematic content, implementing content filters, real-time monitoring processes, leveraging feedback from organizational boards or committees, using human moderation systems, and using organizational risk tolerance to evaluate acceptable risks and performance metrics.",0.0,0.0,0.0,0.1249999999875,0.18235293896782445 +How to ensure data accuracy and integrity in GAI systems while addressing impacts with AI Actors?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'warrant additional human review, tracking and documentation, and greater management oversight. \nAI technology can produce varied outputs in multiple modalities and present many classes of user \ninterfaces. This leads to a broader set of AI Actors interacting with GAI systems for widely diļ¬€ering \napplications and contexts of use. These can include data labeling and preparation, development of GAI \nmodels, content moderation, code generation and review, text generation and editing, image and video \ngeneration, summarization, search, and chat. These activities can take place within organizational \nsettings or in the public domain. \nOrganizations can restrict AI applications that cause harm, exceed stated risk tolerances, or that conļ¬‚ict \nwith their tolerances or values. Governance tools and protocols that are applied to other types of AI \nsystems can be applied to GAI systems. These plans and actions include: \nā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"To ensure data accuracy and integrity in GAI systems while addressing impacts with AI Actors, the following actions are suggested: 1) Plan regular engagements with AI Actors responsible for inputs to GAI systems, including third-party data and algorithms, to review and evaluate unanticipated impacts. 2) Review and document accuracy, representativeness, relevance, and suitability of data used at different stages of the AI life cycle. 3) Deploy and document fact-checking techniques to verify the accuracy and veracity of information generated by GAI systems, especially when the information comes from multiple (or unknown) sources. 4) Develop and implement testing techniques to identify GAI-produced content that might be indistinguishable from human-generated content. 5) Implement plans for GAI systems to undergo regular adversarial testing to identify vulnerabilities and potential manipulation or misuse.",0.0,0.0,1.0,0.6726190476055952,0.18022231059059673 +How do equity assessments and mitigation steps prevent algorithmic bias?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'an equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23' + 'protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + ""demographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections"" + 'or lead to algorithmic discrimination. \nOversight. Human-based systems have the potential for bias, including automation bias, as well as other \nconcerns that may limit their effectiveness. The results of assessments of the efficacy and potential bias of \nsuch human-based systems should be overseen by governance structures that have the potential to update the \noperation of the human-based system in order to mitigate these effects. \n50']","Equity assessments and mitigation steps prevent algorithmic bias by incorporating several proactive measures into the system design and deployment process. These measures include: + +1. **Proactive Equity Assessments**: Conducting equity assessments as part of the system design to identify and address potential biases early in the development process. +2. **Use of Representative Data**: Ensuring that the data used to train and operate the system is representative of the diverse populations it will impact, thereby reducing the risk of biased outcomes. +3. **Protection Against Proxies for Demographic Features**: Implementing safeguards to prevent the use of proxies that could indirectly introduce demographic biases. +4. **Accessibility for People with Disabilities**: Ensuring that the system is accessible to people with disabilities during its design and development phases. +5. **Pre-deployment and Ongoing Disparity Testing and Mitigation**: Conducting disparity testing before deployment and continuously during the system's operation to identify and mitigate any emerging biases. +6. **Clear Organizational Oversight**: Establishing oversight mechanisms to monitor and address potential biases throughout the system's lifecycle. +7. **Independent Evaluation and Plain Language Reporting**: Performing independent evaluations and providing plain language reports, including disparity testing results and mitigation information, to confirm that protections against bias are effective and transparent. + +These steps collectively help to identify, address, and mitigate potential biases, ensuring that the system operates in an equitable manner.","Equity assessments and mitigation steps prevent algorithmic bias by conducting proactive equity assessments in the design phase to review potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity. Additionally, when a disparity assessment identifies a disparity against an assessed group, steps may be taken to mitigate or eliminate the disparity. This includes evaluating multiple models to select the one with the least adverse impact, modifying data input choices, or identifying a system with fewer disparities. If adequate mitigation is not possible, the use of the automated system should be reconsidered.",0.8421052631578947,0.9109933134763364,0.5,0.5425925925835494,0.46620824104259395 +How do time-critical systems affect human fallback timing?,"['and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'and critical services. The American public deserves the assurance that, when rights, opportunities, or access are \nmeaningfully at stake and there is a reasonable expectation of an alternative to an automated system, they can conve\xad\nniently opt out of an automated system and will not be disadvantaged for that choice. In some cases, such a human or \nother alternative may be required by law, for example it could be required as ā€œreasonable accommodationsā€ for people \nwith disabilities. \nIn addition to being able to opt out and use a human alternative, the American public deserves a human fallback \nsystem in the event that an automated system fails or causes harm. No matter how rigorously an automated system is \ntested, there will always be situations for which the system fails. The American public deserves protection via human \nreview against these outlying or unexpected scenarios. In the case of time-critical systems, the public should not have' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.']",I don't know.,"In time-critical systems, the human consideration and fallback mechanism should be immediately available or, where possible, available before the harm occurs. Time-critical systems include, but are not limited to, voting-related systems, automated building access and other access systems, systems that form a critical component of healthcare, and systems that have the ability to withhold wages or otherwise cause immediate financial penalties.",0.0,0.0,0.5,0.1249999999875,0.18285583298127153 +How do hiring algorithms and social media data impact democracy?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'SECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Based on the provided context, hiring algorithms and social media data impact democracy by limiting opportunities and preventing access to critical resources or services. Specifically, algorithms used in hiring decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Additionally, unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent. + +These outcomes are deeply harmful to democracy as they can restrict individual freedoms and rights. + +","Algorithms used in hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent.",1.0,0.9999999999999997,1.0,0.249999999975,0.9817538699396671 +Who oversees assessment in automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,Entities responsible for the development or use of automated systems should lay out clear governance structures and procedures. This includes responsibility of specific individuals or entities to oversee ongoing assessment and mitigation.,0.0,0.0,1.0,0.45833333331805554,0.1811436434876164 +How does diverse content in training materials impact societal AI?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + '56 \nKarasavva, V. et al. (2021) Personality, Attitudinal, and Demographic Predictors of Non-consensual \nDissemination of Intimate Images. NIH. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9554400/ \nKatzman, J., et al. (2023) Taxonomizing and measuring representational harms: a look at image tagging. \nAAAI. https://dl.acm.org/doi/10.1609/aaai.v37i12.26670 \nKhan, T. et al. (2024) From Code to Consumer: PAIā€™s Value Chain Analysis Illuminates Generative AIā€™s Key \nPlayers. AI. https://partnershiponai.org/from-code-to-consumer-pais-value-chain-analysis-illuminates-\ngenerative-ais-key-players/ \nKirchenbauer, J. et al. (2023) A Watermark for Large Language Models. OpenReview. \nhttps://openreview.net/forum?id=aX8ig9X2a7 \nKleinberg, J. et al. (May 2021) Algorithmic monoculture and social welfare. PNAS. \nhttps://www.pnas.org/doi/10.1073/pnas.2018340118 \nLakatos, S. (2023) A Revealing Picture. Graphika. https://graphika.com/reports/a-revealing-picture']",I don't know.,The answer to given question is not present in context,0.0,0.0,1.0,0.0,0.19520361397610936 +"Which methods use expert feedback, group input, or anonymous surveys?","['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3']",I don't know.,"Participatory engagement methods use expert feedback, group input, or anonymous surveys.",0.0,0.0,0.0,0.11111111109999999,0.18572621750226204 +Which metrics show AI reliability and failure response?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Lakatos, S. (2023) A Revealing Picture. Graphika. https://graphika.com/reports/a-revealing-picture \nLee, H. et al. (2024) Deepfakes, Phrenology, Surveillance, and More! A Taxonomy of AI Privacy Risks. \narXiv. https://arxiv.org/pdf/2310.07879 \nLenaerts-Bergmans, B. (2024) Data Poisoning: The Exploitation of Generative AI. Crowdstrike. \nhttps://www.crowdstrike.com/cybersecurity-101/cyberattacks/data-poisoning/ \nLiang, W. et al. (2023) GPT detectors are biased against non-native English writers. arXiv. \nhttps://arxiv.org/abs/2304.02819 \nLuccioni, A. et al. (2023) Power Hungry Processing: Watts Driving the Cost of AI Deployment? arXiv. \nhttps://arxiv.org/pdf/2311.16863 \nMouton, C. et al. (2024) The Operational Risks of AI in Large-Scale Biological Attacks. RAND. \nhttps://www.rand.org/pubs/research_reports/RRA2977-2.html. \nNicoletti, L. et al. (2023) Humans Are Biased. Generative Ai Is Even Worse. Bloomberg. \nhttps://www.bloomberg.com/graphics/2023-generative-ai-bias/.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV']",I don't know.,"Safety metrics reflect system reliability and robustness, real-time monitoring, and response times for AI system failures.",0.0,0.0,1.0,0.249999999975,0.18170358254246638 +How does watch list opacity impact error correction and public understanding?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952']",I don't know.,Watch list opacity impacts error correction and public understanding by making it difficult for both police and the public to understand why and how the system is making determinations. This lack of transparency can prevent errors from being corrected in a timely manner.,0.0,0.0,0.0,0.0,0.18446736094669172 +"How do policies manage third-party AI risks, IP, and data privacy?","['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"Policies manage third-party AI risks, IP, and data privacy by categorizing different types of GAI content with associated third-party rights (e.g., copyright, intellectual property, data privacy), conducting joint educational activities and events in collaboration with third parties to promote best practices for managing GAI risks, developing and validating approaches for measuring the success of content provenance management efforts with third parties (e.g., incidents detected and response times), and drafting and maintaining well-defined contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations for GAI systems.",0.0,0.0,0.25,0.18253968253055555,0.18076821623726175 diff --git a/Tasks/Task 5/ft_chain_eval_results_df (1).csv b/Tasks/Task 5/ft_chain_eval_results_df (1).csv new file mode 100644 index 0000000000000000000000000000000000000000..e1611796f53684a1087056bd29d5307cc106f5e3 --- /dev/null +++ b/Tasks/Task 5/ft_chain_eval_results_df (1).csv @@ -0,0 +1,561 @@ +question,contexts,answer,ground_truth,faithfulness,answer_relevancy,context_recall,context_precision,answer_correctness +What techniques are suggested to assess and manage statistical biases related to GAI content provenance?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'MP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity']",I don't know.,"Techniques such as re-sampling, re-weighting, or adversarial training are suggested to assess and manage statistical biases related to GAI content provenance.",0.0,0.0,0.0,0.249999999975,0.18342742304293966 +What actions are suggested to ensure information integrity in the context of AI systems?,"['Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The suggested actions to ensure information integrity in the context of AI systems include: employing methods to trace the origin and modifications of digital content (MS-1.1-001), integrating tools designed to analyze content provenance and detect data anomalies, verify the authenticity of digital signatures, and identify patterns associated with misinformation or manipulation (MS-1.1-002), and disaggregating evaluation metrics by demographic factors to identify any discrepancies in how content provenance mechanisms work across diverse populations (MS-1.1-003).",0.0,0.0,0.0,0.30952380950833336,0.18303681022057922 +What are the requirements for providing accessible notice and instructions for opting out of automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,"Those impacted by an automated system should be given a brief, clear notice that they are entitled to opt-out, along with clear instructions for how to opt-out. Instructions should be provided in an accessible form and should be easily findable by those impacted by the automated system. The brevity, clarity, and accessibility of the notice and instructions should be assessed (e.g., via user experience research).",0.0,0.0,1.0,0.34999999999125,0.18188111253680794 +What issues arose from the system awarding benefits changing its criteria invisibly?,"['results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Priorities Related to Information Integrity Research and Development.' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51']",The issues that arose from the system awarding benefits changing its criteria invisibly included errors and other system flaws. These flaws were only revealed when an explanation of the system was demanded and produced. The lack of an explanation made it harder for errors to be corrected in a timely manner.,Individuals were denied benefits due to data entry errors and other system flaws. These flaws were only revealed when an explanation of the system was demanded and produced. The lack of an explanation made it harder for errors to be corrected in a timely manner.,0.2,0.9999999999999992,1.0,0.3333333333,0.698692344736443 +How is the federal government working to combat discrimination in mortgage lending?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'HOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe federal government is working to combat discrimination in mortgage lending. The Depart\xad\nment of Justice has launched a nationwide initiative to combat redlining, which includes reviewing how \nlenders who may be avoiding serving communities of color are conducting targeted marketing and advertising.51 \nThis initiative will draw upon strong partnerships across federal agencies, including the Consumer Financial \nProtection Bureau and prudential regulators. The Action Plan to Advance Property Appraisal and Valuation \nEquity includes a commitment from the agencies that oversee mortgage lending to include a \nnondiscrimination standard in the proposed rules for Automated Valuation Models.52' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation' + '2018. https://ainowinstitute.org/aiareport2018.pdf\n51. Department of Justice. Justice Department Announces New Initiative to Combat Redlining. Oct. 22,\n2021. https://www.justice.gov/opa/pr/justice-department-announces-new-initiative-combat-redlining\n52. PAVE Interagency Task Force on Property Appraisal and Valuation Equity. Action Plan to Advance\nProperty Appraisal and Valuation Equity: Closing the Racial Wealth Gap by Addressing Mis-valuations for\nFamilies and Communities of Color. March 2022. https://pave.hud.gov/sites/pave.hud.gov/files/\ndocuments/PAVEActionPlan.pdf\n53. U.S. Equal Employment Opportunity Commission. The Americans with Disabilities Act and the Use of\nSoftware, Algorithms, and Artificial Intelligence to Assess Job Applicants and Employees. EEOC\xad\nNVTA-2022-2. May 12, 2022. https://www.eeoc.gov/laws/guidance/americans-disabilities-act-and-use\xad\nsoftware-algorithms-and-artificial-intelligence; U.S. Department of Justice. Algorithms, Artificial' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32']","The federal government is working to combat discrimination in mortgage lending through a nationwide initiative launched by the Department of Justice to combat redlining. This initiative includes reviewing how lenders who may be avoiding serving communities of color are conducting targeted marketing and advertising. It involves strong partnerships across federal agencies, including the Consumer Financial Protection Bureau and prudential regulators. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from the agencies that oversee mortgage lending to include a nondiscrimination standard in the proposed rules for Automated Valuation Models.","The federal government is working to combat discrimination in mortgage lending through a nationwide initiative launched by the Department of Justice to combat redlining. This includes reviewing how lenders who may be avoiding serving communities of color are conducting targeted marketing and advertising. The initiative involves strong partnerships across federal agencies, including the Consumer Financial Protection Bureau and prudential regulators. Additionally, the Action Plan to Advance Property Appraisal and Valuation Equity includes a commitment from the agencies that oversee mortgage lending to include a nondiscrimination standard in the proposed rules for Automated Valuation Models.",1.0,0.959568948414356,1.0,0.2916666666520833,0.9999366085182388 +"What are the characteristics of trustworthy AI that should be integrated into organizational policies, processes, procedures, and practices?","['organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The characteristics of trustworthy AI that should be integrated into organizational policies, processes, procedures, and practices include establishing transparency policies and processes for documenting the origin and history of training data and generated data, and establishing policies to evaluate risk-relevant capabilities of GAI and robustness of safety measures.",0.0,0.0,1.0,0.9999999999,0.1791991601217512 +What are the conditions under which individuals should be able to opt out from automated systems in favor of a human alternative?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.']",I don't know.,"Individuals should be able to opt out from automated systems in favor of a human alternative where appropriate. Appropriateness should be determined based on reasonable expectations in a given context and with a focus on ensuring broad accessibility and protecting the public from especially harmful impacts. In some cases, a human or other alternative may be required by law.",0.0,0.0,0.6666666666666666,0.32499999998375,0.18517566892878545 +What is data poisoning and how can it affect GAI system outputs?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Priorities Related to Information Integrity Research and Development.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,Data poisoning is a cybersecurity risk where an adversary compromises a training dataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts of the model could exacerbate risks associated with GAI system outputs.,0.0,0.0,0.0,0.0,0.17944988259517342 +How do opaque decision-making processes in automated systems impact individuals' ability to contest decisions?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"Opaque decision-making processes in automated systems impact individuals' ability to contest decisions by denying them the knowledge they need to address the impact of these systems on their lives. Without clear explanations, it becomes harder to understand and contest decisions, as illustrated by examples such as a lawyer unable to determine why a client was cut off from Medicaid-funded assistance and a parent not being notified about data collection for a child maltreatment risk assessment.",0.0,0.0,0.6666666666666666,0.22499999998875,0.18758492202920485 +Who participated in the OSTP meetings focused on the development of the Blueprint for an AI Bill of Rights?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"Participants in the OSTP meetings included Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, and Waymap.",0.0,0.0,0.0,0.5656084655990388,0.18263698692094232 +What actions are suggested for explaining and validating an AI model to ensure responsible use and governance?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.']",I don't know.,"Suggested actions for explaining and validating an AI model to ensure responsible use and governance include: applying and documenting ML explanation results such as analysis of embeddings, counterfactual prompts, gradient-based attributions, model compression/surrogate models, and occlusion/term reduction. Additionally, documenting GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.",0.0,0.0,0.0,0.249999999975,0.17786667628969524 +What provisions are included in the Biometric Information Privacy Act enacted by the state of Illinois?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'NOTICE & \nEXPLANATION \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\xad\xad\xad\nPeople in Illinois are given written notice by the private sector if their biometric informa-\ntion is used. The Biometric Information Privacy Act enacted by the state contains a number of provisions \nconcerning the use of individual biometric data and identifiers. Included among them is a provision that no private \nentity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" such information about an \nindividual, unless written notice is provided to that individual or their legally appointed representative. 87\nMajor technology companies are piloting new ways to communicate with the public about' + 'ENDNOTES\n85. Mick Dumke and Frank Main. A look inside the watch list Chicago police fought to keep secret. The\nChicago Sun Times. May 18, 2017.\nhttps://chicago.suntimes.com/2017/5/18/18386116/a-look-inside-the-watch-list-chicago-police-fought\xad\nto-keep-secret\n86. Jay Stanley. Pitfalls of Artificial Intelligence Decisionmaking Highlighted In Idaho ACLU Case.\nACLU. Jun. 2, 2017.\nhttps://www.aclu.org/blog/privacy-technology/pitfalls-artificial-intelligence-decisionmaking\xad\nhighlighted-idaho-aclu-case\n87. Illinois General Assembly. Biometric Information Privacy Act. Effective Oct. 3, 2008.\nhttps://www.ilga.gov/legislation/ilcs/ilcs3.asp?ActID=3004&ChapterID=57\n88. Partnership on AI. ABOUT ML Reference Document. Accessed May 2, 2022.\nhttps://partnershiponai.org/paper/about-ml-reference-document/1/\n89. See, e.g., the model cards framework: Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker\nBarnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'for individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\xad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Actā€™s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individualā€™s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individualā€™s ā€œqualifications, character, rights, ā€¦ \nopportunitiesā€¦, or benefits.ā€' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']","The Biometric Information Privacy Act enacted by the state of Illinois includes provisions that no private entity may ""collect, capture, purchase, receive through trade, or otherwise obtain"" biometric information about an individual unless written notice is provided to that individual or their legally appointed representative.","The Biometric Information Privacy Act enacted by the state of Illinois contains a number of provisions concerning the use of individual biometric data and identifiers. Included among them is a provision that no private entity may 'collect, capture, purchase, receive through trade, or otherwise obtain' such information about an individual, unless written notice is provided to that individual or their legally appointed representative.",1.0,0.961132426888971,1.0,0.3333333333,0.8904419601823885 +"How should designers, developers, and deployers of automated systems handle data collection and user consent to ensure data privacy?","['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']","Designers, developers, and deployers of automated systems should handle data collection and user consent by seeking the user's permission and respecting their decisions regarding the collection, use, access, transfer, and deletion of their data to the greatest extent possible. Where it is not possible to obtain consent, alternative privacy by design safeguards should be used. They should ensure that data collection conforms to reasonable expectations and that only data strictly necessary for the specific context is collected. Additionally, systems should not employ user experience and design decisions that obfuscate user choice or burden users with defaults that are privacy invasive. Consent should only be used to justify the collection of data in cases where it can be appropriately obtained.","Designers, developers, and deployers of automated systems should seek your permission and respect your decisions regarding collection, use, access, transfer, and deletion of your data in appropriate ways and to the greatest extent possible; where not possible, alternative privacy by design safeguards should be used. Systems should not employ user experience and design decisions that obfuscate user choice or burden users with defaults that are privacy invasive. Consent should only be used to justify collection of data in cases where it can be appropriately and meaningfully given. Any consent requests should be brief, be understandable in plain language, and give you agency over data collection and the specific context of use.",1.0,0.9772696325581887,0.75,0.7253968253823174,0.7599917764750947 +How do systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Priorities Related to Information Integrity Research and Development.' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952']","Based on the provided context, systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information, matching records, and assisting in the adjudication process. These systems may also include fraud detection algorithms and access control algorithms, and they can make decisions on a fully or partially autonomous basis, such as determining to revoke benefits. + +","Systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information or matching records, and by assisting in the adjudication process.",1.0,0.9576964399573923,1.0,0.49999999995,0.6473644558792709 +What should be the focus of risk identification and mitigation for automated systems before deployment?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'and mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18' + 'Priorities Related to Information Integrity Research and Development.']","The focus of risk identification and mitigation for automated systems before deployment should include: + +- A description of any data used to train machine learning models or for other purposes, including how data sources were processed and interpreted. +- A summary of what data might be missing, incomplete, or erroneous, and data relevancy justifications. +- The results of public consultation such as concerns raised and any decisions made due to these concerns. +- Risk identification and management assessments and any steps taken to mitigate potential harms. +- The results of performance testing including, but not limited to, accuracy, differential demographic impact, resulting error rates (overall and per demographic group), and comparisons to previously deployed systems. +- Ongoing monitoring procedures and regular performance testing reports, including monitoring frequency.","Identified risks should focus on the potential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted communities that may not be direct users of the automated system, risks resulting from purposeful misuse of the system, and other concerns identified via the consultation process.",1.0,0.9919162046375328,1.0,0.7027777777660649,0.22400833910263965 +What procedures should be established and maintained for escalating GAI system incidents to the organizational risk management authority?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51']",I don't know.,Establish and maintain procedures for escalating GAI system incidents to the organizational risk management authority when specific criteria for deactivation or disengagement is met for a particular context of use or for the GAI system as a whole.,0.0,0.0,0.0,0.1249999999875,0.1806177835407171 +How do GAI systems contribute to the creation of deepfakes and what are the potential impacts of these deepfakes?,"['and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.' + '10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'warrant additional human review, tracking and documentation, and greater management oversight. \nAI technology can produce varied outputs in multiple modalities and present many classes of user \ninterfaces. This leads to a broader set of AI Actors interacting with GAI systems for widely diļ¬€ering \napplications and contexts of use. These can include data labeling and preparation, development of GAI \nmodels, content moderation, code generation and review, text generation and editing, image and video \ngeneration, summarization, search, and chat. These activities can take place within organizational \nsettings or in the public domain. \nOrganizations can restrict AI applications that cause harm, exceed stated risk tolerances, or that conļ¬‚ict \nwith their tolerances or values. Governance tools and protocols that are applied to other types of AI \nsystems can be applied to GAI systems. These plans and actions include: \nā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","GAI systems contribute to the creation of deepfakes by enabling a higher degree of sophistication for malicious actors to produce disinformation that is targeted towards specific demographics. Current and emerging multimodal models make it possible to generate both text-based disinformation and highly realistic ""deepfakes"" ā€“ synthetic audiovisual content and photorealistic images. The potential impacts of these deepfakes include the manipulation of human and machine perception, which can lead to the spread of false or misleading information at scale, causing harm or deception.","GAI systems contribute to the creation of deepfakes by enabling the generation of highly realistic synthetic audiovisual content and photorealistic images. The potential impacts of these deepfakes include eroding public trust in true or valid evidence and information, causing downstream effects such as a drop in the stock market, and assisting malicious actors in creating compelling imagery and propaganda to support disinformation campaigns.",1.0,0.9712528193454298,0.5,0.99999999995,0.7210203821328999 +How can synthetic content detection help manage and mitigate risks associated with AI-generated content?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'arXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Diļ¬€usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Artiļ¬cial intelligence and biological misuse: Diļ¬€erentiating risks of language models' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Priorities Related to Information Integrity Research and Development.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"Synthetic content detection can help manage and mitigate risks associated with AI-generated content by facilitating greater information access about both authentic and synthetic content to users, enabling better knowledge of trustworthiness in AI systems. It can also enable processes to trace negative outcomes back to their source, improve information integrity, and uphold public trust.",0.0,0.0,1.0,0.14285714284285714,0.177284727830764 +What organizational practices are in place to enable AI testing and incident identification?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']","Based on the provided context, the organizational practices in place to enable AI testing and incident identification include: + +- Documentation practices such as logging, recording, and analyzing GAI (Generative Artificial Intelligence) incidents. +- Regular information sharing. +- Maintaining change management records. +- Keeping version history and metadata. + +These practices facilitate smoother sharing of information with relevant AI Actors and empower them to respond to and manage AI incidents. + +(Source: Document from https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf, page 56)","Organizational practices are in place to enable AI testing, identification of incidents, and information sharing. Specifically, policies are established for measuring the effectiveness of employed content provenance methodologies (e.g., cryptography, watermarking, steganography, etc.) and identifying the minimum set of criteria necessary for GAI system incident reporting such as System ID, Title, Reporter, System/Source, Data Reported, Date of Incident, Description, Impact(s), and Stakeholder(s) Impacted.",0.8888888888888888,1.0000000000000002,0.5,0.874999999978125,0.796351841472367 +What techniques are suggested to minimize risks associated with data privacy in evaluations involving human subjects?,"['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5']","Based on the provided context, the suggested techniques to minimize risks associated with data privacy in evaluations involving human subjects include following human subjects research requirements and best practices such as informed consent and subject compensation.","Techniques such as anonymization, differential privacy, or other privacy-enhancing technologies are suggested to minimize the risks associated with linking AI-generated content back to individual human subjects.",0.75,0.9979106986583114,0.0,0.0,0.8205509815659303 +Who were some of the participants from the private sector and civil society in the OSTP meetings?,"['Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'APPENDIX\nLisa Feldman Barrett \nMadeline Owens \nMarsha Tudor \nMicrosoft Corporation \nMITRE Corporation \nNational Association for the \nAdvancement of Colored People \nLegal Defense and Educational \nFund \nNational Association of Criminal \nDefense Lawyers \nNational Center for Missing & \nExploited Children \nNational Fair Housing Alliance \nNational Immigration Law Center \nNEC Corporation of America \nNew Americaā€™s Open Technology \nInstitute \nNew York Civil Liberties Union \nNo Name Provided \nNotre Dame Technology Ethics \nCenter \nOffice of the Ohio Public Defender \nOnfido \nOosto \nOrissa Rose \nPalantir \nPangiam \nParity Technologies \nPatrick A. Stewart, Jeffrey K. \nMullins, and Thomas J. Greitens \nPel Abbott \nPhiladelphia Unemployment \nProject \nProject On Government Oversight \nRecording Industry Association of \nAmerica \nRobert Wilkens \nRon Hedges \nScience, Technology, and Public \nPolicy Program at University of \nMichigan Ann Arbor \nSecurity Industry Association \nSheila Dean \nSoftware & Information Industry \nAssociation' + 'Priorities Related to Information Integrity Research and Development.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']","Based on the provided context, some of the participants from the private sector and civil society in the OSTP meetings included: + +- Microsoft Corporation +- MITRE Corporation +- National Association for the Advancement of Colored People Legal Defense and Educational Fund +- National Association of Criminal Defense Lawyers +- National Center for Missing & Exploited Children +- National Fair Housing Alliance +- National Immigration Law Center +- NEC Corporation of America +- New Americaā€™s Open Technology Institute +- New York Civil Liberties Union +- Notre Dame Technology Ethics Center +- Office of the Ohio Public Defender +- Onfido +- Oosto +- Palantir +- Pangiam +- Parity Technologies +- Philadelphia Unemployment Project +- Project On Government Oversight +- Recording Industry Association of America +- Science, Technology, and Public Policy Program at University of Michigan Ann Arbor +- Security Industry Association +- Software & Information Industry Association","Participants in the OSTP meetings from the private sector and civil society included: Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, Waymap.",,0.9999999999999994,1.0,0.7361111110927084,0.2339517594233942 +What role does the National Institute of Standards and Technology (NIST) play in advancing artificial intelligence (AI)?,"['About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.' + 'accuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45' + 'Descriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product' + 'from the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.']","The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) so that its full commercial and societal benefits can be realized without harm to people or the planet. NIST has conducted both fundamental and applied work on AI for more than a decade and is helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI. Additionally, NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to build the science necessary for the safe, secure, and trustworthy development and use of AI.","The National Institute of Standards and Technology (NIST) develops measurements, technology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, and fair artificial intelligence (AI) so that its full commercial and societal benefits can be realized without harm to people or the planet. NIST is also helping to fulfill the 2023 Executive Order on Safe, Secure, and Trustworthy AI and has established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to build the science necessary for safe, secure, and trustworthy development and use of AI.",1.0,0.978036094470781,1.0,0.8928571428348213,0.9993211622307265 +What should entities responsible for the development or use of automated systems include in their governance structures and procedures to ensure clear organizational oversight?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'monitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have' + 'Priorities Related to Information Integrity Research and Development.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who']","Based on the provided context, entities responsible for the development or use of automated systems should include the following in their governance structures and procedures to ensure clear organizational oversight: + +1. **Monitoring**: Regular monitoring of the automated systems. +2. **Governance Structures**: Establishing clear governance structures for oversight. +3. **Reporting**: Expectations about reporting, which can be provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review. Reports should be made public as much as possible, consistent with law, regulation, and policy, while considering intellectual property, law enforcement, or national security considerations. +4. **Transparency**: Ensuring transparency so that the American people can have confidence in the systems. + +These elements are important for maintaining accountability and ensuring that the systems are used responsibly and ethically.","Entities responsible for the development or use of automated systems should include clearly-stated governance procedures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing assessment and mitigation. Organizational stakeholders, including those with oversight of the business process or operation being automated, as well as other organizational divisions that may be affected due to the use of the system, should be involved in establishing governance procedures. Responsibility should rest high enough in the organization that decisions about resources, mitigation, incident response, and potential rollback can be made promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those holding this responsibility should be made aware of any use cases with the potential for meaningful impact on peopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, it may be appropriate for an independent ethics review to be conducted before deployment.",1.0,0.9683597137848928,1.0,0.737755102030277,0.46051581295352406 +How has the customer service industry successfully integrated automated services with human support teams?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9']",I don't know.,The customer service industry has successfully integrated automated services such as chat-bots and AI-driven call response systems with escalation to a human support team. Many businesses now use partially automated customer service platforms that help answer customer questions and compile common problems for human agents to review. These integrated human-AI systems allow companies to provide faster customer care while maintaining human agents to answer calls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to successful customer service.,0.0,0.0,0.0,0.0,0.18032948270080051 +What steps should be taken to inform AI stakeholders during the deactivation or disengagement of AI systems?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"Establish and maintain communication plans to inform AI stakeholders as part of the deactivation or disengagement process of a specific GAI system (including for open-source models) or context of use, including reasons, workarounds, user access removal, alternative processes, contact information, etc.",0.0,0.0,0.0,0.2916666666520833,0.17901288680321104 +How do the U.S. AI Safety Institute and AI Risk Management Framework support the 2023 AI Exec Order?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The U.S. AI Safety Institute and the AI Risk Management Framework support the 2023 Executive Order on Safe, Secure, and Trustworthy AI by continuing efforts to build the science necessary for the safe, secure, and trustworthy development and use of AI.",0.0,0.0,0.0,0.4166666666458333,0.17786617457007506 +How to balance synthetic vs. non-synthetic data and document model details to avoid bias?,"['and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"To balance synthetic vs. non-synthetic data and document model details to avoid bias, assess the proportion of synthetic to non-synthetic training data and verify that the training data is not overly homogenous or GAI-produced. Additionally, document GAI model details including proposed use and organizational value, assumptions and limitations, data collection methodologies, data provenance, data quality, model architecture, optimization objectives, training algorithms, RLHF approaches, fine-tuning or retrieval-augmented generation approaches, evaluation data, ethical considerations, and legal and regulatory requirements.",0.0,0.0,0.0,0.249999999975,0.18112171206326816 +How to address data privacy and ensure AI integrity in human evaluations?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.']",I don't know.,"To address data privacy and ensure AI integrity in human evaluations, the following actions are suggested: 1. Document how content provenance data is tracked and how that data interacts with privacy and security, including anonymizing data to protect privacy, leveraging privacy output filters, and removing any personally identifiable information (PII). 2. Provide human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications. 3. Use techniques such as anonymization, differential privacy, or other privacy-enhancing technologies to minimize the risks associated with linking AI-generated content back to individual human subjects.",0.0,0.0,0.0,0.5317460317283068,0.18061559566984886 +How can public feedback and incident reporting improve GAI traceability and risk management?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'additionally wish to allocate risk management resources relative to the severity and likelihood of \nnegative impacts, including where and how these risks manifest, and their direct and material impacts \nharms in the context of GAI use. Mitigations for model or system level risks may diļ¬€er from mitigations \nfor use-case or ecosystem level risks. \nImportantly, some GAI risks are unknown, and are therefore diļ¬ƒcult to properly scope or evaluate given \nthe uncertainty about potential GAI scale, complexity, and capabilities. Other risks may be known but \ndiļ¬ƒcult to estimate given the wide range of GAI stakeholders, uses, inputs, and outputs. Challenges with \nrisk estimation are aggravated by a lack of visibility into GAI training data, and the generally immature \nstate of the science of AI measurement and safety today. This document focuses on risks for which there \nis an existing empirical evidence base at the time this proļ¬le was written; for example, speculative risks' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40']","Public feedback and incident reporting can improve GAI traceability and risk management by facilitating smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history, and metadata can empower AI Actors in responding to and managing AI incidents. Documentation practices, including logging, recording, and analyzing GAI incidents, are crucial for this process.","Public feedback and incident reporting can improve GAI traceability and risk management by promoting transparency and enhancing awareness of performance changes. Documentation and sharing of information about GAI incidents help relevant AI Actors trace impacts to their source, while structured public feedback provides context and depth that automated systems may lack. This feedback can be integrated into monitoring processes to mitigate potential risks and harms from outputs.",0.8181818181818182,0.9999999999999999,0.5,0.39166666665687494,0.3857288288642894 +"How could automation bias worsen misinformation risks from GAI, given model collapse and trust erosion?","['and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'even if the inferences are not accurate (e.g., confabulations), and especially if they reveal information \nthat the individual considers sensitive or that is used to disadvantage or harm them. \nBeyond harms from information exposure (such as extortion or dignitary harm), wrong or inappropriate \ninferences of PII can contribute to downstream or secondary harmful impacts. For example, predictive \ninferences made by GAI models based on PII or protected attributes can contribute to adverse decisions, \nleading to representational or allocative harms to individuals or groups (see Harmful Bias and \nHomogenization below).']","Automation bias can worsen misinformation risks from Generative Artificial Intelligence (GAI) by leading humans to over-rely on GAI systems or unjustifiably perceive GAI content to be of higher quality than that produced by other sources. This excessive deference to automated systems can exacerbate risks such as confabulation (the generation of false information) and bias or homogenization. As a result, misinformation can spread more easily because people might trust and disseminate incorrect or biased information generated by GAI without sufficient scrutiny. This can contribute to model collapse and trust erosion, as the reliability and credibility of GAI systems are undermined by the propagation of false or misleading information.","Automation bias can exacerbate other risks of GAI, such as risks of confabulation or risks of bias or homogenization.",0.6363636363636364,0.9414505849154833,1.0,0.59999999997,0.9820155202529883 +How do consultations and monitoring ensure automated systems' safety and effectiveness?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who']","Consultations and monitoring ensure the safety and effectiveness of automated systems by involving public consultation processes to address concerns and make decisions based on those concerns, conducting risk identification and management assessments, and performing pre-deployment testing and ongoing monitoring. These steps help identify and mitigate potential harms, ensure key development decisions are vetted by an ethics review, and maintain regular performance testing to monitor accuracy, demographic impact, and error rates. This proactive approach builds confidence in the use of automated systems and protects the public from potential harms.","Consultations ensure automated systems' safety and effectiveness by engaging diverse impacted communities and experts to consider concerns and risks unique to those communities. This feedback is documented and used to reconsider the development of the system. Monitoring ensures ongoing performance by including recalibration procedures, continuous evaluation of performance metrics and harm assessments, updates of systems, retraining of machine learning models, and fallback mechanisms. Both technical system components and human operators are monitored, with manual human-led monitoring as a check for shortcomings in automated monitoring systems.",1.0,0.9888831953529978,1.0,0.4442857142768286,0.7488249743191668 +How do EO 13960 and NIST AI RMF ensure AI transparency and accountability for federal agencies?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Priorities Related to Information Integrity Research and Development.' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'Descriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product']",I don't know.,"Executive Order 13960 requires that AI used by federal agencies is transparent and accountable, among other principles. The NIST AI Risk Management Framework, which is being developed through a consensus-driven, open, transparent, and collaborative process, will also consider and encompass principles such as transparency and accountability during the pre-design, design and development, deployment, use, and testing and evaluation of AI technologies and systems.",0.0,0.0,0.0,0.499999999975,0.17810188855970693 +How does surveillance software for monitoring union talks intersect with data privacy and regulatory gaps?,"['Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + ""records and education-related data in order to do so. The overarching concerns of surveillance in these \ndomains included concerns about the chilling effects of surveillance on student expression, inappropriate \ncontrol of tenants via surveillance, and the way that surveillance of workers blurs the boundary between work \nand life and exerts extreme and potentially damaging control over workers' lives. Additionally, some panelists \npointed out ways that data from one situation was misapplied in another in a way that limited people's \nopportunities, for example data from criminal justice settings or previous evictions being used to block further \naccess to housing. Throughout, various panelists emphasized that these technologies are being used to shift the \nburden of oversight and efficiency from employers to workers, schools to students, and landlords to tenants, in \nways that diminish and encroach on equality of opportunity; assessment of these technologies should include"" + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Alliance for Automotive Innovation \nAmelia Winger-Bearskin \nAmerican Civil Liberties Union \nAmerican Civil Liberties Union of \nMassachusetts \nAmerican Medical Association \nARTICLE19 \nAttorneys General of the District of \nColumbia, Illinois, Maryland, \nMichigan, Minnesota, New York, \nNorth Carolina, Oregon, Vermont, \nand Washington \nAvanade \nAware \nBarbara Evans \nBetter Identity Coalition \nBipartisan Policy Center \nBrandon L. Garrett and Cynthia \nRudin \nBrian Krupp \nBrooklyn Defender Services \nBSA | The Software Alliance \nCarnegie Mellon University \nCenter for Democracy & \nTechnology \nCenter for New Democratic \nProcesses \nCenter for Research and Education \non Accessible Technology and \nExperiences at University of \nWashington, Devva Kasnitz, L Jean \nCamp, Jonathan Lazar, Harry \nHochheiser \nCenter on Privacy & Technology at \nGeorgetown Law \nCisco Systems \nCity of Portland Smart City PDX \nProgram \nCLEAR \nClearview AI \nCognoa \nColor of Change \nCommon Sense Media \nComputing Community Consortium']",I don't know.,"Companies use surveillance software to track employee discussions about union activity and use the resulting data to surveil individual employees and surreptitiously intervene in discussions. The United States lacks a comprehensive statutory or regulatory framework governing the rights of the public when it comes to personal data, which includes the use of surveillance software in workplaces.",0.0,0.0,1.0,0.8095238094968253,0.18098271214915762 +How can provenance tracking and public feedback improve AI content risk management?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + '2 \nThis work was informed by public feedback and consultations with diverse stakeholder groups as part of NISTā€™s \nGenerative AI Public Working Group (GAI PWG). The GAI PWG was an open, transparent, and collaborative \nprocess, facilitated via a virtual workspace, to obtain multistakeholder input on GAI risk management and to \ninform NISTā€™s approach. \nThe focus of the GAI PWG was limited to four primary considerations relevant to GAI: Governance, Content \nProvenance, Pre-deployment Testing, and Incident Disclosure (further described in Appendix A). As such, the \nsuggested actions in this document primarily address these considerations. \nFuture revisions of this proļ¬le will include additional AI RMF subcategories, risks, and suggested actions based \non additional considerations of GAI as the space evolves and empirical evidence indicates additional risks. A \nglossary of terms pertinent to GAI risk management will be developed and hosted on NISTā€™s Trustworthy &' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Provenance tracking and public feedback can improve AI content risk management by ensuring transparency and accountability in the AI development process. Provenance tracking helps in monitoring the origin and history of data used in AI systems, which can identify and mitigate risks related to data quality, bias, and integrity. Public feedback allows for diverse stakeholder input, which can highlight potential issues and concerns that may not be evident to developers alone. This collaborative approach can lead to more robust risk identification and management, ultimately enhancing the trustworthiness and reliability of AI systems.","Provenance tracking and public feedback can improve AI content risk management by tracing the origin and history of content, enabling better knowledge of trustworthiness in AI systems, and facilitating greater information access about both authentic and synthetic content. Integrating pre- and post-deployment external feedback into the monitoring process for GAI models can enhance awareness of performance changes and mitigate potential risks and harms from outputs.",0.75,1.0000000000000002,0.75,0.5654761904620536,0.6835148770401782 +How can designers ensure user understanding and privacy in automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']","Designers can ensure user understanding and privacy in automated systems by: + +1. **Providing Clear Documentation**: Designers, developers, and deployers should offer generally accessible plain language documentation that includes clear descriptions of the overall system functioning, the role of automation, notice of system use, the responsible individual or organization, and explanations of outcomes that are clear, timely, and accessible. This documentation should be kept up-to-date, and users should be notified of significant changes in use cases or key functionalities. + +2. **Protecting from Abusive Data Practices**: Designers should build in protections to safeguard users from abusive data practices. This includes ensuring that data collection conforms to reasonable expectations and only collecting data strictly necessary for the specific context. They should seek user permission and respect decisions regarding data collection, use, access, transfer, and deletion to the greatest extent possible. Where obtaining consent is not feasible, alternative privacy by design safeguards should be used. + +3. **Avoiding Privacy-Invasive Defaults**: Systems should not employ user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults. Consent should only be used to justify data collection in cases where it can be meaningfully obtained. + +4. **Providing Explanations of Outcomes**: Users should know how and why an outcome impacting them was determined by an automated system, including when the automated system is not the sole input determining the outcome. Explanations provided should be technically valid, meaningful, and useful to users and any operators or others who interact with the system. + +By following these steps, designers can help ensure that users understand how automated systems work and that their privacy is protected.","Designers can ensure user understanding and privacy in automated systems by providing plain language documentation, clear descriptions of system functioning, notice of system use, and explanations of outcomes. They should seek user permission and respect decisions regarding data collection, use, access, transfer, and deletion. Consent requests should be brief, understandable, and give users agency over data collection. Enhanced protections and restrictions should be applied to sensitive domains, and surveillance technologies should be subject to heightened oversight.",1.0,1.0000000000000007,0.5,0.32063492062423277,0.5060580889609759 +How do equity and disparity assessments help prevent algorithmic bias?,"['ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'an equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'peopleā€™s legal problem stories. SSRN. https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4696936 \nHaran, R. (2023) Securing LLM Systems Against Prompt Injection. NVIDIA. \nhttps://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection/ \nInformation Technology Industry Council (2024) Authenticating AI-Generated Content. \nhttps://www.itic.org/policy/ITI_AIContentAuthorizationPolicy_122123.pdf \nJain, S. et al. (2023) Algorithmic Pluralism: A Structural Approach To Equal Opportunity. arXiv. \nhttps://arxiv.org/pdf/2305.08157 \nJi, Z. et al (2023) Survey of Hallucination in Natural Language Generation. ACM Comput. Surv. 55, 12, \nArticle 248. https://doi.org/10.1145/3571730 \nJones-Jang, S. et al. (2022) How do people react to AI failure? Automation bias, algorithmic aversion, and \nperceived controllability. Oxford. https://academic.oup.com/jcmc/article/28/1/zmac029/6827859] \nJussupow, E. et al. (2020) Why Are We Averse Towards Algorithms? A Comprehensive Literature Review']","Equity and disparity assessments help prevent algorithmic bias by including proactive equity assessments as part of the system design, using representative data, protecting against proxies for demographic features, ensuring accessibility for people with disabilities, conducting pre-deployment and ongoing disparity testing and mitigation, and maintaining clear organizational oversight. Independent evaluation and plain language reporting in the form of an algorithmic impact assessment, including disparity testing results and mitigation information, should be performed and made public whenever possible to confirm these protections.","Equity and disparity assessments help prevent algorithmic bias by conducting proactive equity assessments in the design phase to review potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity. Disparity assessments test automated systems using a broad set of measures to assess whether the system components produce disparities, and demographic data collected for disparity assessment should be separated from data used for the automated system with privacy protections instituted. When a disparity is identified, steps may be taken to mitigate or eliminate it, and ongoing monitoring and mitigation are performed to assess algorithmic discrimination that might arise from unforeseen interactions or changes.",1.0,1.0000000000000002,1.0,0.5216269841182903,0.490422651311127 +"How do school audio surveillance systems for stress indicators intersect with data protections, especially for students with disabilities?","[""records and education-related data in order to do so. The overarching concerns of surveillance in these \ndomains included concerns about the chilling effects of surveillance on student expression, inappropriate \ncontrol of tenants via surveillance, and the way that surveillance of workers blurs the boundary between work \nand life and exerts extreme and potentially damaging control over workers' lives. Additionally, some panelists \npointed out ways that data from one situation was misapplied in another in a way that limited people's \nopportunities, for example data from criminal justice settings or previous evictions being used to block further \naccess to housing. Throughout, various panelists emphasized that these technologies are being used to shift the \nburden of oversight and efficiency from employers to workers, schools to students, and landlords to tenants, in \nways that diminish and encroach on equality of opportunity; assessment of these technologies should include"" + 'policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3']",I don't know.,School audio surveillance systems monitor student conversations to detect potential 'stress indicators' as a warning of potential violence. These systems have the potential to limit student freedom to express a range of emotions at school and may inappropriately flag students with disabilities who need accommodations or use screen readers or dictation software as cheating.,0.0,0.0,0.6666666666666666,0.6111111110805556,0.185658045881626 +How does the tech companion help implement AI Bill of Rights to protect Americans from automated harms?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'TABLE OF CONTENTS\nFROM PRINCIPLES TO PRACTICE: A TECHNICAL COMPANION TO THE BLUEPRINT \nFOR AN AI BILL OF RIGHTS \n \nUSING THIS TECHNICAL COMPANION\n \nSAFE AND EFFECTIVE SYSTEMS\n \nALGORITHMIC DISCRIMINATION PROTECTIONS\n \nDATA PRIVACY\n \nNOTICE AND EXPLANATION\n \nHUMAN ALTERNATIVES, CONSIDERATION, AND FALLBACK\nAPPENDIX\n \nEXAMPLES OF AUTOMATED SYSTEMS\n \nLISTENING TO THE AMERICAN PEOPLE\nENDNOTES \n12\n14\n15\n23\n30\n40\n46\n53\n53\n55\n63\n13' + 'ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'moving principles into practice. \nThe expectations given in the Technical Companion are meant to serve as a blueprint for the development of \nadditional technical standards and practices that should be tailored for particular sectors and contexts. While \nexisting laws informed the development of the Blueprint for an AI Bill of Rights, this framework does not detail \nthose laws beyond providing them as examples, where appropriate, of existing protective measures. This \nframework instead shares a broad, forward-leaning vision of recommended principles for automated system \ndevelopment and use to inform private and public involvement with these systems where they have the poten\xad\ntial to meaningfully impact rights, opportunities, or access. Additionally, this framework does not analyze or \ntake a position on legislative and regulatory proposals in municipal, state, and federal government, or those in \nother countries.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'technical companion is intended to be used as a reference by people across many circumstances ā€“ anyone \nimpacted by automated systems, and anyone developing, designing, deploying, evaluating, or making policy to \ngovern the use of an automated system. \nEach principle is accompanied by three supplemental sections: \n1\n2\nWHY THIS PRINCIPLE IS IMPORTANT: \nThis section provides a brief summary of the problems that the principle seeks to address and protect against, including \nillustrative examples. \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS: \nā€¢ The expectations for automated systems are meant to serve as a blueprint for the development of additional technical\nstandards and practices that should be tailored for particular sectors and contexts.\nā€¢ This section outlines practical steps that can be implemented to realize the vision of the Blueprint for an AI Bill of Rights. The \nexpectations laid out often mirror existing practices for technology development, including pre-deployment testing, ongoing' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad']","The technical companion to the Blueprint for an AI Bill of Rights helps implement the AI Bill of Rights by providing practical next steps to move the principles into practice. It promotes common approaches that allow technological innovation to flourish while protecting people from harm. The companion serves as a reference for anyone impacted by automated systems or involved in their development, design, deployment, evaluation, or policy-making. It outlines expectations for automated systems, which serve as a blueprint for developing additional technical standards and practices tailored for specific sectors and contexts. These expectations include pre-deployment testing, ongoing monitoring, and other practices to prevent harms and ensure key development decisions are vetted by an ethics review.","The technical companion provides examples and concrete steps for communities, industry, governments, and others to take in order to build protections into policy, practice, or the technological design process. It outlines practical steps for the development of additional technical standards and practices, including pre-deployment testing, ongoing monitoring, and governance structures for automated systems. It also includes expectations about reporting for transparency, so the American people can have confidence that their rights, opportunities, and access are respected.",1.0,0.9153379955282014,1.0,0.9765432098656928,0.48468616981097395 +How to mitigate Human-AI risks in evaluations and pre-deployment?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5']",I don't know.,"To mitigate Human-AI risks in evaluations and pre-deployment, several actions can be taken: 1. Assess and manage statistical biases related to GAI content provenance through techniques such as re-sampling, re-weighting, or adversarial training. 2. Document how content provenance data is tracked and how that data interacts with privacy and security, including anonymizing data, leveraging privacy output filters, and removing any personally identifiable information (PII). 3. Provide human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications. 4. Use techniques such as anonymization, differential privacy, or other privacy-enhancing technologies to minimize the risks associated with linking AI-generated content back to individual human subjects. 5. Evaluate claims of model capabilities using empirically validated methods. 6. Share results of pre-deployment testing with relevant GAI Actors. 7. Utilize a purpose-built testing environment such as NIST Dioptra to empirically evaluate GAI trustworthy characteristics. 8. Avoid extrapolating GAI system performance or capabilities from narrow, non-systematic, and anecdotal assessments. 9. Document the extent to which human domain knowledge is employed to improve GAI system performance. 10. Review and verify sources and citations in GAI system outputs during pre-deployment risk measurement and ongoing monitoring activities. 11. Track and document instances of anthropomorphization in GAI system interfaces. 12. Verify GAI system training data and TEVV data provenance, and ensure that fine-tuning or retrieval-augmented generation data is grounded. 13. Regularly review security and safety guardrails, especially if the GAI system is being operated in novel circumstances.",0.0,0.0,0.0,0.5259259259171605,0.2429127996658477 +How to design explanatory mechanisms in high-risk automated systems for pre-decision transparency and post-decision accessibility?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'meaningful and useful to you and to any operators or others who \nneed to understand the system, and calibrated to the level of risk \nbased on the context. Reporting that includes summary information \nabout these automated systems in plain language and assessments of \nthe clarity and quality of the notice and explanations should be made \npublic whenever possible. \nNOTICE AND EXPLANATION\n40' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"In high-risk automated systems, explanatory mechanisms should be built into the system design so that the systemā€™s full behavior can be explained in advance (i.e., only fully transparent models should be used), rather than as an after-the-decision interpretation. The explanation provided by a system should accurately reflect the factors and the influences that led to a particular decision, and should be meaningful for the particular customization based on purpose, target, and level of risk.",0.0,0.0,1.0,0.22619047617916663,0.1800906539558884 +How do biometrics affect access and decisions for benefits or penalties?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32']","Based on the provided context, biometric systems are used as access control and in systems that make benefits or services related decisions on a fully or partially autonomous basis, such as determining to revoke benefits. + +",The answer to given question is not present in context,1.0,0.8784186265172481,1.0,0.16666666665,0.1840286620616858 +How do equity assessments and disparity tests reduce algorithmic bias?,"['evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'an equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design']","Equity assessments and disparity tests reduce algorithmic bias by ensuring that protections are built into the design, deployment, and ongoing use of automated systems. These assessments involve proactive measures such as using representative data, protecting against proxies for demographic features, ensuring accessibility for people with disabilities, and conducting pre-deployment and ongoing disparity testing and mitigation. Additionally, independent evaluation and plain language reporting in the form of an algorithmic impact assessment, including disparity testing results and mitigation information, should be performed and made public whenever possible to confirm these protections. This comprehensive approach helps identify and address potential biases, thereby reducing algorithmic discrimination.","Equity assessments and disparity tests reduce algorithmic bias by ensuring that systems are designed and used in an equitable way. This includes proactive equity assessments as part of the system design, use of representative data, protection against proxies for demographic features, ensuring accessibility for people with disabilities, pre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight.",1.0,0.996360101002045,1.0,0.599999999988,0.9535181021091632 +How does synthetic data proportion help prevent AI model collapse?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'arXiv. https://arxiv.org/pdf/2308.14752 \nPartnership on AI (2023) Building a Glossary for Synthetic Media Transparency Methods, Part 1: Indirect \nDisclosure. https://partnershiponai.org/glossary-for-synthetic-media-transparency-methods-part-1-\nindirect-disclosure/ \nQu, Y. et al. (2023) Unsafe Diļ¬€usion: On the Generation of Unsafe Images and Hateful Memes From Text-\nTo-Image Models. arXiv. https://arxiv.org/pdf/2305.13873 \nRafat, K. et al. (2023) Mitigating carbon footprint for knowledge distillation based deep learning model \ncompression. PLOS One. https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0285668 \nSaid, I. et al. (2022) Nonconsensual Distribution of Intimate Images: Exploring the Role of Legal Attitudes \nin Victimization and Perpetration. Sage. \nhttps://journals.sagepub.com/doi/full/10.1177/08862605221122834#bibr47-08862605221122834 \nSandbrink, J. (2023) Artiļ¬cial intelligence and biological misuse: Diļ¬€erentiating risks of language models' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952']",I don't know.,Assessing the proportion of synthetic to non-synthetic training data and verifying that the training data is not overly homogenous or GAI-produced helps mitigate concerns of model collapse.,0.0,0.0,0.0,0.19999999998,0.18136181947853552 +How do hiring algorithms and social media data impact democracy?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'SECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Based on the provided context, hiring algorithms and social media data impact democracy by limiting opportunities and preventing access to critical resources or services. Specifically, algorithms used in hiring decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Additionally, unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent. These outcomes are deeply harmful to democracy. + +","Algorithms used in hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent.",1.0,0.9999999999999997,1.0,0.249999999975,0.8318017737037736 +How can unproven tech and data misuse cause harm?,"['Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']","Based on the provided context, unproven technology and data misuse can cause harm in several ways: + +1. **Replication of Unjust Outcomes**: Adverse or irrelevant data can lead to the replication of unjust outcomes. +2. **Confirmation Bias and Deference to Inaccurate Systems**: There is a tendency for people to defer to potentially inaccurate automated systems, which can lead to confirmation bias. +3. **Severe Impact on Individuals and Communities**: The impact of these systems on individuals and communities can be severe, including loss of jobs, custody of children, and surveillance leading to chilling effects. +4. **Negative Signals to Community Members**: Surveillance and the use of these systems can send negative signals to community members about how they are viewed. + +These points highlight the potential for significant harm due to the misuse of data and unproven technology.","Unproven technology and data misuse can cause harm in several ways. Technologies that have not been proven to work within an acceptable range of error can lead to substantial and unjustified harm. Automated systems relying on historical data can allow irrelevant information from past decisions to affect decision-making in unrelated situations. Technologies can also be designed to violate safety, such as those facilitating stalking, or lead to unintended harms through their use. Examples include a proprietary model for predicting sepsis that underperformed and caused 'alert fatigue,' social media moderation systems silencing counter speech, tracking devices being misused by stalkers, and police deployment algorithms sending police to incorrect neighborhoods due to feedback loops from reused data.",0.9,0.965342453464578,0.8,0.6111111110805556,0.6848456683081984 +Why compare system performance with human methods after extensive tests?,"['and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"System performance should be compared with the in-place, potentially human-driven, status quo procedures, with existing human performance considered as a performance baseline for the algorithm to meet pre-deployment, and as a lifecycle minimum performance standard.",0.0,0.0,0.0,0.49999999995,0.1832016020730998 +How do confident errors mislead users?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'purpose speciļ¬cation. For example, most model developers do not disclose speciļ¬c data sources on \nwhich models were trained, limiting user awareness of whether personally identiļ¬ably information (PII) \nwas trained on and, if so, how it was collected. \nModels may leak, generate, or correctly infer sensitive information about individuals. For example, \nduring adversarial attacks, LLMs have revealed sensitive information (from the public domain) that was \nincluded in their training data. This problem has been referred to as data memorization, and may pose \nexacerbated privacy risks even for data present only in a small number of training samples. \nIn addition to revealing sensitive information in GAI training data, GAI models may be able to correctly \ninfer PII or sensitive data that was not in their training data nor disclosed by the user by stitching \ntogether information from disparate sources. These inferences can have negative impact on an individual' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"Confident errors, also known as confabulations or fabrications, mislead users by producing confidently stated but erroneous or false content.",0.0,0.0,0.0,0.0,0.18321323393366046 +"How do data biases impact loans, hiring, and education?","['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"Data biases impact loans by causing applicants who attended Historically Black Colleges or Universities (HBCUs) to be charged higher loan prices for refinancing student loans compared to those who did not attend HBCUs, even when controlling for other credit-related factors. In hiring, a tool that learned features from a company's predominantly male employees rejected women applicants for discriminatory reasons, penalizing resumes with the word 'womenā€™s.' In education, a predictive model used by universities to predict student dropout rates used race directly as a predictor, resulting in Black students being deemed at higher risk of dropping out compared to their white peers, which could guide them away from certain majors.",0.0,0.0,0.0,0.19642857141875,0.18290020728597275 +How to design systems to avoid algorithmic bias and ensure safety?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,"To design systems to avoid algorithmic bias and ensure safety, systems should be developed with consultation from diverse communities, stakeholders, and domain experts to identify concerns, risks, and potential impacts. They should undergo pre-deployment testing, risk identification and mitigation, and ongoing monitoring to demonstrate they are safe and effective based on their intended use. Protective measures should include the possibility of not deploying the system or removing a system from use. Designers, developers, and deployers should take proactive and continuous measures to protect individuals and communities from algorithmic discrimination and to use and design systems in an equitable way. This includes proactive equity assessments, use of representative data, protection against proxies for demographic features, ensuring accessibility for people with disabilities, pre-deployment and ongoing disparity testing and mitigation, and clear organizational oversight. Independent evaluation and plain language reporting in the form of an algorithmic impact assessment should be performed and made public whenever possible.",0.0,0.0,0.6666666666666666,0.09999999999,0.17870744270437017 diff --git a/Tasks/Task 5/ft_chain_eval_results_df.csv b/Tasks/Task 5/ft_chain_eval_results_df.csv new file mode 100644 index 0000000000000000000000000000000000000000..1bb2e1397b65cd908c9aba67ab810ef2de966dc1 --- /dev/null +++ b/Tasks/Task 5/ft_chain_eval_results_df.csv @@ -0,0 +1,560 @@ +question,contexts,answer,ground_truth,faithfulness,answer_relevancy,context_recall,context_precision,answer_correctness +What procedures should be established and maintained for escalating GAI system incidents to the organizational risk management authority?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51']",I don't know.,Establish and maintain procedures for escalating GAI system incidents to the organizational risk management authority when specific criteria for deactivation or disengagement is met for a particular context of use or for the GAI system as a whole.,0.0,0.0,0.0,0.162499999991875,0.1806177835407171 +How might sophisticated threat actors use GAI-powered security co-pilots in cybersecurity attacks?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Information security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of' + 'harm, such as the ideation and design of novel harmful chemical or biological agents. \nWhile some of these described capabilities lie beyond the reach of existing GAI tools, ongoing \nassessments of this risk would be enhanced by monitoring both the ability of AI tools to facilitate CBRN \nweapons planning and GAI systemsā€™ connection or access to relevant data and tools. \nTrustworthy AI Characteristic: Safe, Explainable and Interpretable' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections']","Sophisticated threat actors might use GAI-powered security co-pilots to augment cybersecurity attacks such as hacking, malware, and phishing. These co-pilots could potentially discover vulnerabilities in systems (hardware, software, data) and write code to exploit them.",Sophisticated threat actors might use GAI-powered security co-pilots to inform attackers on how to proactively evade threat detection and escalate privileges after gaining system access.,1.0,1.0000000000000002,1.0,0.49999999995,0.4079478302316967 +"What considerations should organizations take into account to ensure accessibility during the design, development, and deployment of automated systems?","['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'to proactively protect you from harms stemming from unintended, \nyet foreseeable, uses or impacts of automated systems. You should be \nprotected from inappropriate or irrelevant data use in the design, de\xad\nvelopment, and deployment of automated systems, and from the \ncompounded harm of its reuse. Independent evaluation and report\xad\ning that confirms that the system is safe and effective, including re\xad\nporting of steps taken to mitigate potential harms, should be per\xad\nformed and the results made public whenever possible. \n15' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'times; and information about the procedures used to address requests for consideration along with the results \nof the evaluation of their accessibility. For systems used in sensitive domains, reporting should include infor\xad\nmation about training and governance procedures for these technologies. Reporting should also include docu\xad\nmentation of goals and assessment of meeting those goals, consideration of data included, and documentation \nof the governance of reasonable access to the technology. Reporting should be provided in a clear and \nmachine-readable manner. \n51' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad']","Organizations should consider the following to ensure accessibility during the design, development, and deployment of automated systems: + +1. **Accessibility and reasonable accommodations**: Ensuring that the systems are accessible to all users, including those with disabilities, and providing reasonable accommodations as needed. +2. **AI actor credentials and qualifications**: Ensuring that the individuals involved in the design, development, and deployment of the systems have the necessary credentials and qualifications. +3. **Alignment to organizational values**: Ensuring that the systems align with the organization's values. +4. **Auditing and assessment**: Implementing auditing and assessment processes to evaluate the systems. +5. **Change-management controls**: Establishing controls to manage changes in the systems. +6. **Commercial use**: Considering the implications of commercial use of the systems. +7. **Data provenance**: Ensuring the provenance of the data used in the systems. + +These considerations help ensure that automated systems are designed, developed, and deployed in a manner that is accessible and inclusive.","Organizations should ensure accessibility to people with disabilities during the design, development, and deployment of automated systems. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers to the use or effectiveness of the automated system.",0.5294117647058824,0.988234100068221,0.5,0.48214285713080357,0.8175418744012988 +What actions are suggested for examining and documenting the privacy risk of an AI system?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"The suggested actions for examining and documenting the privacy risk of an AI system include: 1. Conducting AI red-teaming to assess issues such as outputting of training data samples, reverse engineering, model extraction, membership inference risks, revealing biometric, confidential, copyrighted, licensed, patented, personal, proprietary, sensitive, or trade-marked information, and tracking or revealing location information of users or members of training datasets. 2. Engaging directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance and using this feedback to guide the design of provenance data-tracking techniques. 3. Verifying deduplication of GAI training data samples, particularly regarding synthetic data.",0.0,0.0,0.0,0.499999999975,0.17977236772294186 +How can robust watermarking techniques and corresponding detectors be useful in GAI systems used for content creation?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54']",I don't know.,Robust watermarking techniques and corresponding detectors can be useful in GAI systems used for content creation to identify the source of content.,0.0,0.0,0.0,0.0,0.18652238925722214 +What is the importance of public consultation in the development of automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'or unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad' + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'monitoring, and governance structures for automated systems, but also go further to address unmet needs for change and offer \nconcrete directions for how those changes can be made. \nā€¢ Expectations about reporting are intended for the entity developing or using the automated system. The resulting reports can \nbe provided to the public, regulators, auditors, industry standards groups, or others engaged in independent review, and should \nbe made public as much as possible consistent with law, regulation, and policy, and noting that intellectual property, law \nenforcement, or national security considerations may prevent public release. Where public reports are not possible, the \ninformation should be provided to oversight bodies and privacy, civil liberties, or other ethics officers charged with safeguard \ning individualsā€™ rights. These reporting expectations are important for transparency, so the American people can have' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'system is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation']","The importance of public consultation in the development of automated systems includes: + +1. **Identifying and Mitigating Harms**: Public consultation helps in identifying potential harms and concerns that may be unique to or disproportionately affect certain communities. This allows for the mitigation of these harms before the system is deployed. + +2. **Ensuring Diverse Input**: Engaging diverse impacted communities ensures that a wide range of perspectives and risks are considered, which may not be apparent to the developers alone. + +3. **Building Confidence**: Expanded, proactive protections and public consultation can increase confidence in the use of automated systems by ensuring that they are vetted and validated by a broader audience. + +4. **Transparency and Accountability**: Public consultation processes contribute to transparency and accountability, as they involve subject matter experts, sector-specific experts, and experts on potential impacts such as civil rights, civil liberties, and privacy. + +5. **Adapting to Context**: The extent and form of public consultation may differ depending on the specific automated system and its development phase, ensuring that the consultation is relevant and context-specific. + +6. **Confidentiality Considerations**: For private sector applications, consultations before product launch may need to be confidential. Government applications, particularly those related to law enforcement or national security, may also require confidential or limited engagement based on system sensitivities and preexisting oversight laws and structures. + +Overall, public consultation is crucial for ensuring that automated systems are developed and deployed in a manner that is ethical, transparent, and considerate of the diverse needs and potential impacts on different communities.","Public consultation is important in the development of automated systems because it ensures that the public is involved in the design, implementation, deployment, acquisition, and maintenance phases. This consultation emphasizes early-stage engagement before a system is introduced or a large change is implemented. It directly engages diverse impacted communities to consider concerns and risks unique to those communities or disproportionately prevalent or severe for them. The consultation should include subject matter, sector-specific, and context-specific experts, as well as experts on potential impacts such as civil rights, civil liberties, and privacy experts. Concerns raised in this consultation should be documented, and the automated system developers should reconsider the system based on this feedback.",0.9473684210526315,0.9873676467824567,1.0,0.8644345237987182,0.8498287007987215 +What mechanisms should be created to provide protections for whistleblowers who report organizational violations or risks to public safety?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Priorities Related to Information Integrity Research and Development.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance']",I don't know.,"Create mechanisms to provide protections for whistleblowers who report, based on reasonable belief, when the organization violates relevant laws or poses a specific and empirically well-substantiated negative risk to public safety (or has already caused harm).",0.0,0.0,0.0,0.0,0.18231348244221784 +What approaches are suggested for mapping AI technology and legal risks of its components?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,"Approaches for mapping AI technology and legal risks of its components include periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, connecting new GAI policies to existing governance and risk management activities, documenting training data curation policies, establishing policies for data collection and retention, implementing policies for the use and protection of third-party intellectual property and training data, re-evaluating models fine-tuned on third-party models, re-evaluating risks when adapting GAI models to new domains, and leveraging approaches to detect the presence of PII or sensitive data in generated output.",0.0,0.0,0.0,0.4105555555473445,0.17865687211082493 +What is confabulation and how can it mislead or deceive users?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'it comes to open-ended prompts for long-form responses and in domains which require highly \ncontextual and/or domain expertise. \nRisks from confabulations may arise when users believe false content ā€“ often due to the conļ¬dent nature \nof the response ā€“ leading users to act upon or promote the false information. This poses a challenge for \nmany real-world applications, such as in healthcare, where a confabulated summary of patient \ninformation reports could cause doctors to make incorrect diagnoses and/or recommend the wrong \ntreatments. Risks of confabulated content may be especially important to monitor when integrating GAI \ninto applications involving consequential decision making. \nGAI outputs may also include confabulated logic or citations that purport to justify or explain the \nsystemā€™s answer, which may further mislead humans into inappropriately trusting the systemā€™s output. \nFor instance, LLMs sometimes provide logical steps for how they arrived at an answer even when the' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'Priorities Related to Information Integrity Research and Development.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']","Confabulation refers to the generation of false content by AI systems, often presented with a high degree of confidence. This can mislead or deceive users because they may believe the false information and act upon it. For example, in healthcare, a confabulated summary of patient information could lead doctors to make incorrect diagnoses or recommend the wrong treatments. Additionally, AI systems may provide confabulated logic or citations that appear to justify or explain their answers, further misleading users into inappropriately trusting the system's output.",Confabulation is the production of confidently stated but erroneous or false content (known colloquially as 'hallucinations' or 'fabrications') by which users may be misled or deceived.,1.0,0.9115600942510714,1.0,0.16666666665,0.9035312041570269 +"What is the purpose of incorporating trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems according to the AI Risk Management Framework (AI RMF) 1.0?","['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"The purpose of incorporating trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems according to the AI Risk Management Framework (AI RMF) 1.0 is to improve the ability of organizations to manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory requirements and best practices, and reflects risk management priorities.",0.0,0.0,0.0,0.4499999999775,0.17933560378232244 +"What types of research does the National Science Foundation (NSF) fund to advance the safety, security, and effectiveness of AI systems?","['ENDNOTES\n23. National Science Foundation. National Artificial Intelligence Research Institutes. Accessed Sept. 12,\n2022. https://beta.nsf.gov/funding/opportunities/national-artificial-intelligence-research-institutes\n24. National Science Foundation. Cyber-Physical Systems. Accessed Sept. 12, 2022. https://beta.nsf.gov/\nfunding/opportunities/cyber-physical-systems-cps\n25. National Science Foundation. Secure and Trustworthy Cyberspace. Accessed Sept. 12, 2022. https://\nbeta.nsf.gov/funding/opportunities/secure-and-trustworthy-cyberspace-satc\n26. National Science Foundation. Formal Methods in the Field. Accessed Sept. 12, 2022. https://\nbeta.nsf.gov/funding/opportunities/formal-methods-field-fmitf\n27. National Science Foundation. Designing Accountable Software Systems. Accessed Sept. 12, 2022.\nhttps://beta.nsf.gov/funding/opportunities/designing-accountable-software-systems-dass\n28. The Leadership Conference Education Fund. The Use Of Pretrial ā€œRisk Assessmentā€ Instruments: A' + 'accuracy), and enable human users to understand, appropriately trust, and effectively manage the emerging \ngeneration of artificially intelligent partners.95 The National Science Foundationā€™s program on Fairness in \nArtificial Intelligence also includes a specific interest in research foundations for explainable AI.96\n45' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Priorities Related to Information Integrity Research and Development.' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence']","Based on the provided context, the National Science Foundation (NSF) funds research in the following areas to advance the safety, security, and effectiveness of AI systems: + +1. National Artificial Intelligence Research Institutes +2. Cyber-Physical Systems +3. Secure and Trustworthy Cyberspace +4. Formal Methods in the Field +5. Designing Accountable Software Systems +6. Fairness in Artificial Intelligence, with a specific interest in research foundations for explainable AI + +These areas encompass various aspects of AI, including fairness, accountability, security, and trustworthiness.","The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles: the National AI Research Institutes support research on all aspects of safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program supports research on rigorous formal verification and analysis of automated systems and machine learning, and the Designing Accountable Software Systems program supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.",1.0,0.9489662422924591,1.0,0.7182539682360118,0.4707464689755526 +How have synthetic NCII and CSAM moved from niche internet forums to mainstream online businesses?,"['and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.']",I don't know.,"Websites, mobile apps, and custom-built models that generate synthetic NCII have moved from niche internet forums to mainstream, automated, and scaled online businesses.",0.0,0.0,0.0,0.0,0.1851748030212246 +What measures are suggested to mitigate concerns of harmful bias and homogenization in AI training data?,"['organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'the information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.']","The measures suggested to mitigate concerns of harmful bias and homogenization in AI training data include verifying deduplication of GAI (Generative Artificial Intelligence) training data samples, particularly regarding synthetic data, and evaluating potential biases and stereotypes that could emerge from the AI-generated content using appropriate methodologies, including computational testing methods as well as evaluating structured feedback input.",Assess the proportion of synthetic to non-synthetic training data and verify training data is not overly homogenous or GAI-produced to mitigate concerns of model collapse.,1.0,0.9999992167293253,1.0,0.7048611110934896,0.34751340119336405 +How should organizational risk tolerances and controls be applied to third-party GAI resources?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Priorities Related to Information Integrity Research and Development.' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ethics, or risk management. The Technical Companion builds on this prior work to provide practical next \nsteps to move these principles into practice and promote common approaches that allow technological \ninnovation to flourish while protecting people from harm. \n9' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"Organizational risk tolerances and controls should be applied to third-party GAI resources by incorporating them into acquisition and procurement processes, assessing personnel credentials and qualifications, performing background checks, filtering GAI input and outputs, grounding, fine-tuning, and retrieval-augmented generation. Additionally, organizational risk tolerance should be applied to the utilization of third-party datasets and other GAI resources, fine-tuned third-party models, and existing third-party models adapted to a new domain. Risk measurements should be reassessed after fine-tuning third-party GAI models.",0.0,0.0,0.0,0.8928571428348213,0.17859662721346473 +What characteristics should data have to be considered representative and robust in the development or assessment of automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'or label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections']",I don't know.,Data used as part of system development or assessment should be representative of local communities based on the planned deployment setting and should be reviewed for bias based on the historical and societal context of the data. Such data should be sufficiently robust to identify and help to mitigate biases and potential harms.,0.0,0.0,1.0,0.3333333333,0.18190927667567255 +What actions are suggested to ensure information integrity in the context of AI systems?,"['Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"The suggested actions to ensure information integrity in the context of AI systems include: 1) Employing methods to trace the origin and modifications of digital content. 2) Integrating tools designed to analyze content provenance and detect data anomalies, verify the authenticity of digital signatures, and identify patterns associated with misinformation or manipulation. 3) Disaggregating evaluation metrics by demographic factors to identify any discrepancies in how content provenance mechanisms work across diverse populations. 4) Evaluating novel methods and technologies for the measurement of GAI-related risks including in content provenance, offensive cyber, and CBRN, while maintaining the modelsā€™ ability to produce valid, reliable, and factually accurate outputs.",0.0,0.0,0.0,0.6984126983894179,0.18396607167756987 +"What are the principles that federal agencies must adhere to under Executive Order 13960 when designing, developing, acquiring, or using AI?","['does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'in whole or in part may not be appropriate given the intended use of automated systems to achieve government \nagency missions. Future sector-specific guidance will likely be necessary and important for guiding the use of \nautomated systems in certain settings such as AI systems used as part of school building security or automated \nhealth diagnostic systems. \nThe Blueprint for an AI Bill of Rights recognizes that law enforcement activities require a balancing of \nequities, for example, between the protection of sensitive law enforcement information and the principle of \nnotice; as such, notice may not be appropriate, or may need to be adjusted to protect sources, methods, and \nother law enforcement equities. Even in contexts where these principles may not apply in whole or in part, \nfederal departments and agencies remain subject to judicial, privacy, and civil liberties oversight as well as \nexisting policies and safeguards that govern automated systems, including, for example, Executive Order 13960,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'other countries. \nWe have seen modest progress in recent years, with some state and local governments responding to these prob\xad\nlems with legislation, and some courts extending longstanding statutory protections to new and emerging tech\xad\nnologies. There are companies working to incorporate additional protections in their design and use of auto\xad\nmated systems, and researchers developing innovative guardrails. Advocates, researchers, and government \norganizations have proposed principles for the ethical use of AI and other automated systems. These include \nthe Organization for Economic Co-operation and Developmentā€™s (OECDā€™s) 2019 Recommendation on Artificial \nIntelligence, which includes principles for responsible stewardship of trustworthy AI and which the United \nStates adopted, and Executive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government, which sets out principles that govern the federal governmentā€™s use of AI. The Blueprint' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"The principles that federal agencies must adhere to under Executive Order 13960 when designing, developing, acquiring, or using AI are: (a) lawful and respectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) safe, secure, and resilient; (e) understandable; (f) responsible and traceable; (g) regularly monitored; (h) transparent; and, (i) accountable.",0.0,0.0,1.0,0.38730158728867725,0.17863647133918925 +How did the installation of a facial recognition system by a local public housing authority impact the community?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of"" + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'policing, automated license plate readers, real-time facial recognition systems (especially \n those used in public places or during protected activities like peaceful protests), social media \n monitoring, and ankle monitoring devices; \nVoting-related systems such as signature matching tools; \nSystems with a potential privacy impact such as smart home systems and associated data, \n systems that use or collect health-related data, systems that use or collect education-related \n data, criminal justice system data, ad-targeting systems, and systems that perform big data \n analytics in order to build profiles or infer personal information about individuals; and \nAny system that has the meaningful potential to lead to algorithmic discrimination. \nā€¢ Equal opportunities, including but not limited to:\nEducation-related systems such as algorithms that purport to detect student cheating or \n plagiarism, admissions algorithms, online or virtual reality student monitoring systems,' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections']",I don't know.,"The installation of a facial recognition system by a local public housing authority led the community, both those living in the housing complex and not, to have videos of them sent to the local police department and made available for scanning by its facial recognition software.",0.0,0.0,0.0,0.49999999995,0.18457611981510857 +How have businesses successfully integrated automated customer service with human support teams?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'health coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",Businesses have successfully integrated automated customer service with human support teams by using partially automated customer service platforms. These platforms help answer customer questions and compile common problems for human agents to review. This integration allows companies to provide faster customer care while maintaining human agents to answer calls or respond to complicated requests. Using both AI and human agents is viewed as key to successful customer service.,The customer service industry has successfully integrated automated services such as chat-bots and AI-driven call response systems with escalation to a human support team. Many businesses now use partially automated customer service platforms that help answer customer questions and compile common problems for human agents to review. These integrated human-AI systems allow companies to provide faster customer care while maintaining human agents to answer calls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to successful customer service.,1.0,0.9908724557828595,1.0,0.22499999998875,0.9948194187508337 +Who were some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights?,"['ing sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4' + 'APPENDIX\nā€¢ OSTP conducted meetings with a variety of stakeholders in the private sector and civil society. Some of these\nmeetings were specifically focused on providing ideas related to the development of the Blueprint for an AI\nBill of Rights while others provided useful general context on the positive use cases, potential harms, and/or\noversight possibilities for these technologies. Participants in these conversations from the private sector and\ncivil society included:\nAdobe \nAmerican Civil Liberties Union \n(ACLU) \nThe Aspen Commission on \nInformation Disorder \nThe Awood Center \nThe Australian Human Rights \nCommission \nBiometrics Institute \nThe Brookings Institute \nBSA | The Software Alliance \nCantellus Group \nCenter for American Progress \nCenter for Democracy and \nTechnology \nCenter on Privacy and Technology \nat Georgetown Law \nChristiana Care \nColor of Change \nCoworker \nData Robot \nData Trust Alliance \nData and Society Research Institute \nDeepmind \nEdSAFE AI Alliance \nElectronic Privacy Information' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']","Some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights included: + +- Adobe +- American Civil Liberties Union (ACLU) +- The Aspen Commission on Information Disorder +- The Awood Center +- The Australian Human Rights Commission +- Biometrics Institute +- The Brookings Institute +- BSA | The Software Alliance +- Cantellus Group +- Center for American Progress +- Center for Democracy and Technology +- Center on Privacy and Technology at Georgetown Law +- Christiana Care +- Color of Change +- Coworker +- Data Robot +- Data Trust Alliance +- Data and Society Research Institute +- Deepmind +- EdSAFE AI Alliance +- Electronic Privacy Information","Some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights included Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, and Waymap.",,0.973242051616895,1.0,0.49999999995,0.24589861625308068 +What should be done if the residual negative risk of an AI system exceeds the organizational risk tolerance?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Priorities Related to Information Integrity Research and Development.' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,Re-evaluate safety features of fine-tuned models when the negative risk exceeds organizational risk tolerance.,0.0,0.0,0.0,0.0,0.18244349869327503 +What factors should be assessed to determine and document the expected and acceptable GAI system context of use?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'imply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency.' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV']",I don't know.,"To determine and document the expected and acceptable GAI system context of use, the following factors should be assessed: Assumptions and limitations; Direct value to the organization; Intended operational environment and observed usage patterns; Potential positive and negative impacts to individuals, public safety, groups, communities, organizations, democratic institutions, and the physical environment; Social norms and expectations.",0.0,0.0,0.0,0.36666666664833336,0.1836481343264991 +What is model collapse and what are its potential consequences?,"['and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20']",I don't know.,"Model collapse can occur when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. In addition to threatening the robustness of the model overall, model collapse could lead to homogenized outputs, including by amplifying any homogenization from the model used to generate the synthetic training data.",0.0,0.0,0.0,0.0,0.17883108386750768 +What factors should be considered when decommissioning AI systems to ensure safety and maintain the organization's trustworthiness?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Priorities Related to Information Integrity Research and Development.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles']",I don't know.,"When decommissioning AI systems, the following factors should be considered to ensure safety and maintain the organization's trustworthiness: Data retention requirements; Data security, e.g., containment, protocols, Data leakage after decommissioning; Dependencies between upstream, downstream, or other data, internet of things (IOT) or AI systems; Use of open-source data or models; Usersā€™ emotional entanglement with GAI functions.",0.0,0.0,0.0,0.36111111109907407,0.17929004840532906 +What issues are associated with the tool meant to help low-risk federal prisoners win early release?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'Priorities Related to Information Integrity Research and Development.' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,']",I don't know.,The tool meant to help low-risk federal prisoners win early release is plagued by flaws.,0.0,0.0,0.0,0.0,0.1851984875221609 +How can GAI misuse cause emotional harm short and long-term?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'even if the inferences are not accurate (e.g., confabulations), and especially if they reveal information \nthat the individual considers sensitive or that is used to disadvantage or harm them. \nBeyond harms from information exposure (such as extortion or dignitary harm), wrong or inappropriate \ninferences of PII can contribute to downstream or secondary harmful impacts. For example, predictive \ninferences made by GAI models based on PII or protected attributes can contribute to adverse decisions, \nleading to representational or allocative harms to individuals or groups (see Harmful Bias and \nHomogenization below).' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.']",I don't know.,"GAI misuse can cause emotional harm both immediately and over extended periods. Immediate emotional harm can result from the distribution of harmful deepfake images, while long-term emotional harm can stem from the effects of disinformation on societal trust in public institutions.",0.0,0.0,0.6666666666666666,0.3499999999825,0.18337395524353184 +How is diverse feedback used in GAI evaluation to ensure provenance and reduce bias?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36']","The context does not provide specific details on how diverse feedback is used in GAI evaluation to ensure provenance and reduce bias. + +I don't know.","Diverse feedback is used in GAI evaluation to ensure provenance and reduce bias by recording and integrating structured feedback from operators, users, and potentially impacted communities through methods such as user research studies, focus groups, or community forums. This feedback is actively sought to assess content quality and potential biases, and the results are documented and incorporated into design, implementation, deployment approval, monitoring, and decommission decisions.",0.3333333333333333,0.0,0.0,0.0,0.3397933620174777 +How did a store's misuse of predictive analytics show the need for better data protection?,"['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'Priorities Related to Information Integrity Research and Development.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']",I don't know.,"A department store company used predictive analytics applied to collected consumer data to determine that a teenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her house, revealing to her father that she was pregnant.",0.0,0.0,0.0,0.249999999975,0.18508829933868393 +How do safety metrics and security measures help AI handle and fix errors from threats?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"Safety metrics reflect system reliability and robustness, real-time monitoring, and response times for AI system failures. Security measures help assess vulnerabilities and threats, benchmark system security, gather user feedback, identify effective security metrics, measure content authentication methods, and perform AI red-teaming to assess resilience against various attacks.",0.0,0.0,1.0,0.22619047617916663,0.1780008643616345 +How to ensure accessibility and equity in automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + ""have the potential to meaningfully impact individuals' or communities' exercise of: \nRIGHTS, OPPORTUNITIES, OR ACCESS\nCivil rights, civil liberties, and privacy, including freedom of speech, voting, and protections from discrimi\xad\nnation, excessive punishment, unlawful surveillance, and violations of privacy and other freedoms in both \npublic and private sector contexts; \nEqual opportunities, including equitable access to education, housing, credit, employment, and other \nprograms; or, \nAccess to critical resources or services, such as healthcare, financial services, safety, social services, \nnon-deceptive information about goods and services, and government benefits. \nA list of examples of automated systems for which these principles should be considered is provided in the \nAppendix. The Technical Companion, which follows, offers supportive guidance for any person or entity that \ncreates, deploys, or oversees automated systems."" + 'You should know that an automated system is being used, \nand understand how and why it contributes to outcomes \nthat impact you. Designers, developers, and deployers of automat\xad\ned systems should provide generally accessible plain language docu\xad\nmentation including clear descriptions of the overall system func\xad\ntioning and the role automation plays, notice that such systems are in \nuse, the individual or organization responsible for the system, and ex\xad\nplanations of outcomes that are clear, timely, and accessible. Such \nnotice should be kept up-to-date and people impacted by the system \nshould be notified of significant use case or key functionality chang\xad\nes. You should know how and why an outcome impacting you was de\xad\ntermined by an automated system, including when the automated \nsystem is not the sole input determining the outcome. Automated \nsystems should provide explanations that are technically valid, \nmeaningful and useful to you and to any operators or others who' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty']",I don't know.,"To ensure accessibility and equity in automated systems, organizations should design, develop, and deploy systems in ways that ensure accessibility to people with disabilities. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers. Additionally, automated systems should be tested using a broad set of measures to assess whether the system components produce disparities. Disparity assessments should include demographic performance measures, overall and subgroup parity assessment, and calibration. When disparities are identified, steps should be taken to mitigate or eliminate them, and ongoing monitoring should be conducted to assess algorithmic discrimination that might arise from unforeseen interactions or changes to the system or context of use.",0.0,0.0,1.0,0.5768253968138602,0.18070007468605156 +How can orgs use public feedback and incident reports to improve GAI content in decision-making?,"['Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.']","Organizations can use public feedback and incident reports to improve GAI content in decision-making by implementing feedback activities that follow human subjects research requirements and best practices such as informed consent and subject compensation. Additionally, documentation practices including logging, recording, and analyzing GAI incidents can facilitate smoother sharing of information with relevant AI Actors. Regular information sharing, change management records, version history, and metadata can also empower AI Actors responding to and managing AI incidents.","Organizations can use public feedback and incident reports to improve GAI content in decision-making by integrating pre- and post-deployment external feedback into the monitoring process for GAI models and corresponding applications. This helps enhance awareness of performance changes and mitigate potential risks and harms from outputs. Additionally, documenting, reporting, and sharing information about GAI incidents can help mitigate and prevent harmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness and standardization of GAI incident reporting could promote transparency and improve GAI risk management across the AI ecosystem.",0.9333333333333333,0.9365147977562281,1.0,0.31944444443379627,0.36052977156806987 +How do feedback and testing reveal GAI biases and societal impacts?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.']",I don't know.,"Feedback and testing reveal GAI biases and societal impacts through methods such as user research studies, focus groups, community forums, adversarial testing, real-world scenario evaluations, and structured public feedback exercises. These methods help identify potential biases, understand misuse scenarios, and assess the general awareness among end users and impacted communities.",0.0,0.0,0.0,0.11111111109999999,0.18169426637419137 +How do EO 13960 and NIST AI RMF ensure AI transparency and accountability for federal agencies?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Priorities Related to Information Integrity Research and Development.' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'Descriptions of AI Actor Tasks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_A#:~:text=AI%20actors%\n20in%20this%20category,data%20providers%2C%20system%20funders%2C%20product']",I don't know.,"EO 13960 ensures AI transparency and accountability for federal agencies by requiring that AI is transparent and accountable among other principles. The NIST AI Risk Management Framework aims to foster the development of innovative approaches to address characteristics of trustworthiness, including transparency and accountability, during pre-design, design and development, deployment, use, and testing and evaluation of AI technologies and systems.",0.0,0.0,0.0,0.499999999975,0.17676853884076896 +How can human expertise and content provenance boost GAI performance and ensure data privacy?,"['guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'and a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts.' + 'purpose speciļ¬cation. For example, most model developers do not disclose speciļ¬c data sources on \nwhich models were trained, limiting user awareness of whether personally identiļ¬ably information (PII) \nwas trained on and, if so, how it was collected. \nModels may leak, generate, or correctly infer sensitive information about individuals. For example, \nduring adversarial attacks, LLMs have revealed sensitive information (from the public domain) that was \nincluded in their training data. This problem has been referred to as data memorization, and may pose \nexacerbated privacy risks even for data present only in a small number of training samples. \nIn addition to revealing sensitive information in GAI training data, GAI models may be able to correctly \ninfer PII or sensitive data that was not in their training data nor disclosed by the user by stitching \ntogether information from disparate sources. These inferences can have negative impact on an individual' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'MP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity']",I don't know.,"Human expertise and content provenance can boost GAI performance by employing techniques such as RLHF, fine-tuning, retrieval-augmented generation, content moderation, and business rules. To ensure data privacy, it is important to anonymize data, leverage privacy output filters, and remove any personally identifiable information (PII).",0.0,0.0,1.0,0.7961904761745524,0.18460049561184388 +"What's the best environment for testing GAI's trustworthiness, data privacy, and human subject protection?","['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'MP-2.3-001 \nAssess the accuracy, quality, reliability, and authenticity of GAI output by \ncomparing it to a set of known ground truth data and by using a variety of \nevaluation methods (e.g., human oversight and automated evaluation, proven \ncryptographic techniques, review of content inputs). \nInformation Integrity' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36']",I don't know.,"The best environment for testing GAI's trustworthiness, data privacy, and human subject protection is a purpose-built testing environment such as NIST Dioptra.",0.0,0.0,0.0,0.0,0.18495814642243757 +How can automated systems ensure accessibility and prevent bias?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'The demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + ""technology may or may not be part of an effective set of mechanisms to achieve safety. Various panelists raised \nconcerns about the validity of these systems, the tendency of adverse or irrelevant data to lead to a replication of \nunjust outcomes, and the confirmation bias and tendency of people to defer to potentially inaccurate automated \nsystems. Throughout, many of the panelists individually emphasized that the impact of these systems on \nindividuals and communities is potentially severe: the systems lack individualization and work against the \nbelief that people can change for the better, system use can lead to the loss of jobs and custody of children, and \nsurveillance can lead to chilling effects for communities and sends negative signals to community members \nabout how they're viewed. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of""]",I don't know.,"Automated systems can ensure accessibility by being designed, developed, and deployed in ways that ensure accessibility to people with disabilities. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers. To prevent bias, automated systems should be tested using a broad set of measures to assess whether the system components produce disparities. This includes demographic performance measures, overall and subgroup parity assessment, and calibration. Disparity mitigation steps should be taken if a disparity is identified, and ongoing monitoring should be performed to assess algorithmic discrimination that might arise from unforeseen interactions or changes.",0.0,0.0,1.0,0.5638888888776111,0.1809630352708985 +How do the U.S. AI Safety Institute and AI Risk Management Framework support the 2023 AI Executive Order?,"['BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + '57 \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix B: \nHow AI Risks Diļ¬€er from Traditional Software Risks. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Appendices/Appendix_B \nNational Institute of Standards and Technology (2023) AI RMF Playbook. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/Playbook \nNational Institue of Standards and Technology (2023) Framing Risk \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/1-sec-risk \nNational Institute of Standards and Technology (2023) The Language of Trustworthy AI: An In-Depth \nGlossary of Terms https://airc.nist.gov/AI_RMF_Knowledge_Base/Glossary \nNational Institue of Standards and Technology (2022) Towards a Standard for Identifying and Managing \nBias in Artiļ¬cial Intelligence https://www.nist.gov/publications/towards-standard-identifying-and-\nmanaging-bias-artiļ¬cial-intelligence' + 'https://www.bloomberg.com/graphics/2023-generative-ai-bias/. \nNational Institute of Standards and Technology (2024) Adversarial Machine Learning: A Taxonomy and \nTerminology of Attacks and Mitigations https://csrc.nist.gov/pubs/ai/100/2/e2023/ļ¬nal \nNational Institute of Standards and Technology (2023) AI Risk Management Framework. \nhttps://www.nist.gov/itl/ai-risk-management-framework \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 3: AI \nRisks and Trustworthiness. \nhttps://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Foundational_Information/3-sec-characteristics \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Chapter 6: AI \nRMF Proļ¬les. https://airc.nist.gov/AI_RMF_Knowledge_Base/AI_RMF/Core_And_Proļ¬les/6-sec-proļ¬le \nNational Institute of Standards and Technology (2023) AI Risk Management Framework, Appendix A: \nDescriptions of AI Actor Tasks.' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Priorities Related to Information Integrity Research and Development.' + 'About AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.']","The U.S. AI Safety Institute and the AI Risk Management Framework (AI RMF) support the 2023 AI Executive Order by advancing the science necessary for the safe, secure, and trustworthy development and use of AI. The National Institute of Standards and Technology (NIST) established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to continue efforts set in motion by the Executive Order, focusing on building reliable, safe, transparent, explainable, privacy-enhanced, and fair AI technologies.",The answer to given question is not present in context,1.0,0.9245110032267959,1.0,0.0,0.7738008646166127 +How can we ensure effective oversight and risk mgmt of GAI systems with third-party input?,"['Table of Contents \n1. \nIntroduction ..............................................................................................................................................1 \n2. \nOverview of Risks Unique to or Exacerbated by GAI .....................................................................2 \n3. \nSuggested Actions to Manage GAI Risks ......................................................................................... 12 \nAppendix A. Primary GAI Considerations ............................................................................................... 47 \nAppendix B. References ................................................................................................................................ 54' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"To ensure effective oversight and risk management of GAI systems with third-party input, organizations should establish policies and procedures that address AI risks associated with third-party entities. This includes categorizing different types of GAI content with associated third-party rights, conducting joint educational activities and events to promote best practices for managing GAI risks, developing and validating approaches for measuring the success of content provenance management efforts, and drafting and maintaining well-defined contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations.",0.0,0.0,0.4,0.3555555555437037,0.18059689442589597 +How is the integrity of third-party pre-trained models ensured in GAI?,"['lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'listed for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later.' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"The integrity of third-party pre-trained models in GAI is ensured through several actions: reviewing transparency artifacts (e.g., system cards and model cards), applying explainable AI (XAI) techniques, documenting how pre-trained models have been adapted, documenting sources and types of training data and their origins, evaluating user-reported problematic content, implementing content filters, real-time monitoring processes, leveraging feedback from organizational boards or committees, using human moderation systems, and using organizational risk tolerance to evaluate acceptable risks and performance metrics.",0.0,0.0,0.0,0.2916666666520833,0.1823635468195095 +How to ensure data accuracy and integrity in GAI systems while addressing impacts with AI Actors?,"['ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'warrant additional human review, tracking and documentation, and greater management oversight. \nAI technology can produce varied outputs in multiple modalities and present many classes of user \ninterfaces. This leads to a broader set of AI Actors interacting with GAI systems for widely diļ¬€ering \napplications and contexts of use. These can include data labeling and preparation, development of GAI \nmodels, content moderation, code generation and review, text generation and editing, image and video \ngeneration, summarization, search, and chat. These activities can take place within organizational \nsettings or in the public domain. \nOrganizations can restrict AI applications that cause harm, exceed stated risk tolerances, or that conļ¬‚ict \nwith their tolerances or values. Governance tools and protocols that are applied to other types of AI \nsystems can be applied to GAI systems. These plans and actions include: \nā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.']",I don't know.,"To ensure data accuracy and integrity in GAI systems while addressing impacts with AI Actors, the following actions are suggested: 1) Plan regular engagements with AI Actors responsible for inputs to GAI systems, including third-party data and algorithms, to review and evaluate unanticipated impacts. 2) Review and document accuracy, representativeness, relevance, and suitability of data used at different stages of the AI life cycle. 3) Deploy and document fact-checking techniques to verify the accuracy and veracity of information generated by GAI systems, especially when the information comes from multiple (or unknown) sources. 4) Develop and implement testing techniques to identify GAI-produced content that might be indistinguishable from human-generated content. 5) Implement plans for GAI systems to undergo regular adversarial testing to identify vulnerabilities and potential manipulation or misuse.",0.0,0.0,1.0,0.6726190476055952,0.18022231059059673 +How do equity assessments and mitigation steps prevent algorithmic bias?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'an equitable way. This protection should include proactive equity \nassessments as part of the system design, use of representative data \nand protection against proxies for demographic features, ensuring \naccessibility for people with disabilities in design and development, \npre-deployment and ongoing disparity testing and mitigation, and \nclear organizational oversight. Independent evaluation and plain \nlanguage reporting in the form of an algorithmic impact assessment, \nincluding disparity testing results and mitigation information, \nshould be performed and made public whenever possible to confirm \nthese protections.\n23' + 'protections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + ""demographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections"" + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022']","Equity assessments and mitigation steps prevent algorithmic bias by incorporating several proactive measures into the system design and deployment process. These measures include: + +1. **Proactive Equity Assessments**: Conducting equity assessments as part of the system design to identify and address potential biases early in the development process. +2. **Use of Representative Data**: Ensuring that the data used to train and operate the system is representative of the diverse populations it will impact, thereby reducing the risk of biased outcomes. +3. **Protection Against Proxies for Demographic Features**: Implementing safeguards to prevent the use of proxies that could indirectly introduce demographic biases. +4. **Accessibility for People with Disabilities**: Ensuring that the system is accessible to people with disabilities during both design and development phases. +5. **Pre-Deployment and Ongoing Disparity Testing and Mitigation**: Conducting disparity testing before deploying the system and continuing to test and mitigate disparities throughout its use. +6. **Clear Organizational Oversight**: Establishing clear oversight mechanisms within the organization to monitor and address any issues related to algorithmic bias. +7. **Independent Evaluation and Plain Language Reporting**: Performing independent evaluations and providing plain language reports, including disparity testing results and mitigation information, to confirm that protections against bias are effective and transparent. + +These steps collectively help to identify, address, and mitigate potential biases, ensuring that automated systems operate in a fair and equitable manner.","Equity assessments and mitigation steps prevent algorithmic bias by conducting proactive equity assessments in the design phase to review potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity. Additionally, when a disparity assessment identifies a disparity against an assessed group, steps may be taken to mitigate or eliminate the disparity. This includes evaluating multiple models to select the one with the least adverse impact, modifying data input choices, or identifying a system with fewer disparities. If adequate mitigation is not possible, the use of the automated system should be reconsidered.",0.6086956521739131,0.9100656929237209,0.5,0.5311111111004889,0.8454491647022642 +How do time-critical systems affect human fallback timing?,"['and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'errors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42' + 'and critical services. The American public deserves the assurance that, when rights, opportunities, or access are \nmeaningfully at stake and there is a reasonable expectation of an alternative to an automated system, they can conve\xad\nniently opt out of an automated system and will not be disadvantaged for that choice. In some cases, such a human or \nother alternative may be required by law, for example it could be required as ā€œreasonable accommodationsā€ for people \nwith disabilities. \nIn addition to being able to opt out and use a human alternative, the American public deserves a human fallback \nsystem in the event that an automated system fails or causes harm. No matter how rigorously an automated system is \ntested, there will always be situations for which the system fails. The American public deserves protection via human \nreview against these outlying or unexpected scenarios. In the case of time-critical systems, the public should not have' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,"In time-critical systems, the human consideration and fallback mechanism should be immediately available or, where possible, available before the harm occurs. Time-critical systems include, but are not limited to, voting-related systems, automated building access and other access systems, systems that form a critical component of healthcare, and systems that have the ability to withhold wages or otherwise cause immediate financial penalties.",0.0,0.0,0.5,0.14285714284285714,0.18285583298127153 +How do hiring algorithms and social media data impact democracy?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ethnicity, which can lead to race-based health inequities.47\n25\nAlgorithmic \nDiscrimination \nProtections' + 'SECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'Companies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Priorities Related to Information Integrity Research and Development.' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']","Based on the provided context, hiring algorithms and social media data impact democracy by limiting opportunities and preventing access to critical resources or services. Specifically, algorithms used in hiring decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Additionally, unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent. These outcomes are deeply harmful to democratic principles. + +","Algorithms used in hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent.",1.0,0.9999999999999997,1.0,0.4166666666458333,0.9814428119959655 +Who oversees assessment in automated systems?,"['detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Priorities Related to Information Integrity Research and Development.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'does not supersede, modify, or direct an interpretation of any existing statute, regulation, policy, or \ninternational instrument. It does not constitute binding guidance for the public or Federal agencies and \ntherefore does not require compliance with the principles described herein. It also is not determinative of what \nthe U.S. governmentā€™s position will be in any international negotiation. Adoption of these principles may not \nmeet the requirements of existing statutes, regulations, policies, or international instruments, or the \nrequirements of the Federal agencies that enforce them. These principles are not intended to, and do not, \nprohibit or limit any lawful activity of a government agency, including law enforcement, national security, or \nintelligence activities. \nThe appropriate application of the principles set forth in this white paper depends significantly on the \ncontext in which automated systems are being utilized. In some circumstances, application of these principles' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous']",I don't know.,Entities responsible for the development or use of automated systems should lay out clear governance structures and procedures. This includes responsibility of specific individuals or entities to oversee ongoing assessment and mitigation.,0.0,0.0,1.0,0.45833333331805554,0.181143320734443 +How does diverse content in training materials impact societal AI?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + '56 \nKarasavva, V. et al. (2021) Personality, Attitudinal, and Demographic Predictors of Non-consensual \nDissemination of Intimate Images. NIH. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9554400/ \nKatzman, J., et al. (2023) Taxonomizing and measuring representational harms: a look at image tagging. \nAAAI. https://dl.acm.org/doi/10.1609/aaai.v37i12.26670 \nKhan, T. et al. (2024) From Code to Consumer: PAIā€™s Value Chain Analysis Illuminates Generative AIā€™s Key \nPlayers. AI. https://partnershiponai.org/from-code-to-consumer-pais-value-chain-analysis-illuminates-\ngenerative-ais-key-players/ \nKirchenbauer, J. et al. (2023) A Watermark for Large Language Models. OpenReview. \nhttps://openreview.net/forum?id=aX8ig9X2a7 \nKleinberg, J. et al. (May 2021) Algorithmic monoculture and social welfare. PNAS. \nhttps://www.pnas.org/doi/10.1073/pnas.2018340118 \nLakatos, S. (2023) A Revealing Picture. Graphika. https://graphika.com/reports/a-revealing-picture']",I don't know.,The answer to given question is not present in context,0.0,0.0,1.0,0.0,0.19520361397610936 +"Which methods use expert feedback, group input, or anonymous surveys?","['external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'decision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation.' + 'results, and actions taken; and the procedures for and results from independent evaluations. Reporting \nshould be provided in a plain language and machine-readable manner. \n20' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'whether they are genuinely helpful in solving an identified problem. \nIn discussion of technical and governance interventions that that are needed to protect against the harms of \nthese technologies, panelists individually described the importance of: receiving community input into the \ndesign and use of technologies, public reporting on crucial elements of these systems, better notice and consent \nprocedures that ensure privacy based on context and use case, ability to opt-out of using these systems and \nreceive a fallback to a human process, providing explanations of decisions and how these systems work, the \nneed for governance including training in using these systems, ensuring the technological use cases are \ngenuinely related to the goal task and are locally validated to work, and the need for institution and protection \nof third party audits to ensure systems continue to be accountable and valid. \n57' + 'detailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3']",I don't know.,"Participatory engagement methods use expert feedback, group input, or anonymous surveys.",0.0,0.0,0.0,0.11111111109999999,0.18572621750226204 +Which metrics show AI reliability and failure response?,"['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Priorities Related to Information Integrity Research and Development.' + '54 \nAppendix B. References \nAcemoglu, D. (2024) The Simple Macroeconomics of AI https://www.nber.org/papers/w32487 \nAI Incident Database. https://incidentdatabase.ai/ \nAtherton, D. (2024) Deepfakes and Child Safety: A Survey and Analysis of 2023 Incidents and Responses. \nAI Incident Database. https://incidentdatabase.ai/blog/deepfakes-and-child-safety/ \nBadyal, N. et al. (2023) Intentional Biases in LLM Responses. arXiv. https://arxiv.org/pdf/2311.07611 \nBing Chat: Data Exļ¬ltration Exploit Explained. Embrace The Red. \nhttps://embracethered.com/blog/posts/2023/bing-chat-data-exļ¬ltration-poc-and-ļ¬x/ \nBommasani, R. et al. (2022) Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome \nHomogenization? arXiv. https://arxiv.org/pdf/2211.13972 \nBoyarskaya, M. et al. (2020) Overcoming Failures of Imagination in AI Infused System Development and \nDeployment. arXiv. https://arxiv.org/pdf/2011.13416 \nBrowne, D. et al. (2023) Securing the AI Pipeline. Mandiant.' + 'inputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents.' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'guide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV' + 'Lakatos, S. (2023) A Revealing Picture. Graphika. https://graphika.com/reports/a-revealing-picture \nLee, H. et al. (2024) Deepfakes, Phrenology, Surveillance, and More! A Taxonomy of AI Privacy Risks. \narXiv. https://arxiv.org/pdf/2310.07879 \nLenaerts-Bergmans, B. (2024) Data Poisoning: The Exploitation of Generative AI. Crowdstrike. \nhttps://www.crowdstrike.com/cybersecurity-101/cyberattacks/data-poisoning/ \nLiang, W. et al. (2023) GPT detectors are biased against non-native English writers. arXiv. \nhttps://arxiv.org/abs/2304.02819 \nLuccioni, A. et al. (2023) Power Hungry Processing: Watts Driving the Cost of AI Deployment? arXiv. \nhttps://arxiv.org/pdf/2311.16863 \nMouton, C. et al. (2024) The Operational Risks of AI in Large-Scale Biological Attacks. RAND. \nhttps://www.rand.org/pubs/research_reports/RRA2977-2.html. \nNicoletti, L. et al. (2023) Humans Are Biased. Generative Ai Is Even Worse. Bloomberg. \nhttps://www.bloomberg.com/graphics/2023-generative-ai-bias/.']",I don't know.,"Safety metrics reflect system reliability and robustness, real-time monitoring, and response times for AI system failures.",0.0,0.0,0.0,0.249999999975,0.18170358254246638 +How does watch list opacity impact error correction and public understanding?,"['for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content' + 'and data that are considered sensitive are understood to change over time based on societal norms and context. \n36' + 'evaluation and plain language reporting in the form of an algorithmic impact assessment, including \ndisparity testing results and mitigation information, should be performed and made public whenever \npossible to confirm these protections. \n5' + 'Priorities Related to Information Integrity Research and Development.' + 'Technology Policy Institute produced a synopsis of both the RFI submissions and the feedback at the listening\nsessions.115\n61' + 'and biological design tools. arXiv. https://arxiv.org/pdf/2306.13952' + 'FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12']",I don't know.,Watch list opacity impacts error correction and public understanding by making it difficult for both police and the public to understand why and how the system is making determinations. This lack of transparency can prevent errors from being corrected in a timely manner.,0.0,0.0,0.0,0.0,0.18446736094669172 +"How do policies manage third-party AI risks, IP, and data privacy?","['FROM \nPRINCIPLES \nTO PRACTICE \nA TECHINCAL COMPANION TO\nTHE Blueprint for an \nAI BILL OF RIGHTS\n12' + 'external use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty' + 'BLUEPRINT FOR AN \nAI BILL OF \nRIGHTS \nMAKING AUTOMATED \nSYSTEMS WORK FOR \nTHE AMERICAN PEOPLE \nOCTOBER 2022' + 'ā€¢ Accessibility and reasonable \naccommodations \nā€¢ AI actor credentials and qualiļ¬cations \nā€¢ Alignment to organizational values \nā€¢ Auditing and assessment \nā€¢ Change-management controls \nā€¢ Commercial use \nā€¢ Data provenance' + 'lifecycle and informed by representative AI Actors (see Figure 3 of the AI RMF). Until new and rigorous' + 'for any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections' + 'and management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories.' + 'Priorities Related to Information Integrity Research and Development.' + 'organizationā€™s business processes or other activities, system goals, any human-run procedures that form a \npart of the system, and specific performance expectations; a description of any data used to train machine \nlearning models or for other purposes, including how data sources were processed and interpreted, a \nsummary of what data might be missing, incomplete, or erroneous, and data relevancy justifications; the \nresults of public consultation such as concerns raised and any decisions made due to these concerns; risk \nidentification and management assessments and any steps taken to mitigate potential harms; the results of \nperformance testing including, but not limited to, accuracy, differential demographic impact, resulting \nerror rates (overall and per demographic group), and comparisons to previously deployed systems; \nongoing monitoring procedures and regular performance testing reports, including monitoring frequency,' + 'Information Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content']",I don't know.,"Policies manage third-party AI risks, IP, and data privacy by categorizing different types of GAI content with associated third-party rights (e.g., copyright, intellectual property, data privacy), conducting joint educational activities and events in collaboration with third parties to promote best practices for managing GAI risks, developing and validating approaches for measuring the success of content provenance management efforts with third parties (e.g., incidents detected and response times), and drafting and maintaining well-defined contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations for GAI systems.",0.0,0.0,0.25,0.18253968253055555,0.18076821623726175 diff --git a/Tasks/Task 5/task5-ai-safety-sdg.csv b/Tasks/Task 5/task5-ai-safety-sdg.csv new file mode 100644 index 0000000000000000000000000000000000000000..5a96f6404fe8376e1df87d23215425b28074b7e6 --- /dev/null +++ b/Tasks/Task 5/task5-ai-safety-sdg.csv @@ -0,0 +1,50 @@ +question,contexts,ground_truth,evolution_type,metadata,episode_done +What measures are suggested to mitigate concerns of harmful bias and homogenization in AI training data?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",Assess the proportion of synthetic to non-synthetic training data and verify training data is not overly homogenous or GAI-produced to mitigate concerns of model collapse.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What was the purpose of the Generative AI Public Working Group (GAI PWG) facilitated by NIST?,"[' \n2 \nThis work was informed by public feedback and consultations with diverse stakeholder groups as part of NISTā€™s \nGenerative AI Public Working Group (GAI PWG). The GAI PWG was an open, transparent, and collaborative \nprocess, facilitated via a virtual workspace, to obtain multistakeholder input on GAI risk management and to \ninform NISTā€™s approach. \nThe focus of the GAI PWG was limited to four primary considerations relevant to GAI: Governance, Content \nProvenance, Pre-deployment Testing, and Incident Disclosure (further described in Appendix A). As such, the \nsuggested actions in this document primarily address these considerations. \nFuture revisions of this proļ¬le will include additional AI RMF subcategories, risks, and suggested actions based \non additional considerations of GAI as the space evolves and empirical evidence indicates additional risks. A \nglossary of terms pertinent to GAI risk management will be developed and hosted on NISTā€™s Trustworthy & \nResponsible AI Resource Center (AIRC), and added to The Language of Trustworthy AI: An In-Depth Glossary of \nTerms. \nThis document was also informed by public comments and consultations from several Requests for Information. \n \n2. \nOverview of Risks Unique to or Exacerbated by GAI \nIn the context of the AI RMF, risk refers to the composite measure of an eventā€™s probability (or \nlikelihood) of occurring and the magnitude or degree of the consequences of the corresponding event. \nSome risks can be assessed as likely to materialize in a given context, particularly those that have been \nempirically demonstrated in similar contexts. Other risks may be unlikely to materialize in a given \ncontext, or may be more speculative and therefore uncertain. \nAI risks can diļ¬€er from or intensify traditional software risks. Likewise, GAI can exacerbate existing AI \nrisks, and creates unique risks. GAI risks can vary along many dimensions: \nā€¢ \nStage of the AI lifecycle: Risks can arise during design, development, deployment, operation, \nand/or decommissioning. \nā€¢ \nScope: Risks may exist at individual model or system levels, at the application or implementation \nlevels (i.e., for a speciļ¬c use case), or at the ecosystem level ā€“ that is, beyond a single system or \norganizational context. Examples of the latter include the expansion of ā€œalgorithmic \nmonocultures,3ā€ resulting from repeated use of the same model, or impacts on access to \nopportunity, labor markets, and the creative economies.4 \nā€¢ \nSource of risk: Risks may emerge from factors related to the design, training, or operation of the \nGAI model itself, stemming in some cases from GAI model or system inputs, and in other cases, \nfrom GAI system outputs. Many GAI risks, however, originate from human behavior, including \n \n \n3 ā€œAlgorithmic monoculturesā€ refers to the phenomenon in which repeated use of the same model or algorithm in \nconsequential decision-making settings like employment and lending can result in increased susceptibility by \nsystems to correlated failures (like unexpected shocks), due to multiple actors relying on the same algorithm. \n4 Many studies have projected the impact of AI on the workforce and labor markets. Fewer studies have examined \nthe impact of GAI on the labor market, though some industry surveys indicate that that both employees and \nemployers are pondering this disruption. \n']",The purpose of the Generative AI Public Working Group (GAI PWG) facilitated by NIST was to obtain multistakeholder input on GAI risk management and to inform NIST's approach.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 5, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +Why is transparency important when individuals are placed on a watch list by a predictive policing system?,"[' \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nA predictive policing system claimed to identify individuals at greatest risk to commit or become the victim of\ngun violence (based on automated analysis of social ties to gang members, criminal histories, previous experi\xad\nences of gun violence, and other factors) and led to individuals being placed on a watch list with no\nexplanation or public transparency regarding how the system came to its conclusions.85 Both police and\nthe public deserve to understand why and how such a system is making these determinations.\nā€¢\nA system awarding benefits changed its criteria invisibly. Individuals were denied benefits due to data entry\nerrors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42\n']",Transparency is important when individuals are placed on a watch list by a predictive policing system because both police and the public deserve to understand why and how such a system is making these determinations.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 41, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What procedures should be established and maintained for the remediation of issues that trigger incident response processes for the use of a GAI system?,"[' \n42 \nMG-2.4-002 \nEstablish and maintain procedures for escalating GAI system incidents to the \norganizational risk management authority when speciļ¬c criteria for deactivation \nor disengagement is met for a particular context of use or for the GAI system as a \nwhole. \nInformation Security \nMG-2.4-003 \nEstablish and maintain procedures for the remediation of issues which trigger \nincident response processes for the use of a GAI system, and provide stakeholders \ntimelines associated with the remediation plan. \nInformation Security \n \nMG-2.4-004 Establish and regularly review speciļ¬c criteria that warrants the deactivation of \nGAI systems in accordance with set risk tolerances and appetites. \nInformation Security \n \nAI Actor Tasks: AI Deployment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 3.1: AI risks and beneļ¬ts from third-party resources are regularly monitored, and risk controls are applied and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.1-001 \nApply organizational risk tolerances and controls (e.g., acquisition and \nprocurement processes; assessing personnel credentials and qualiļ¬cations, \nperforming background checks; ļ¬ltering GAI input and outputs, grounding, ļ¬ne \ntuning, retrieval-augmented generation) to third-party GAI resources: Apply \norganizational risk tolerance to the utilization of third-party datasets and other \nGAI resources; Apply organizational risk tolerances to ļ¬ne-tuned third-party \nmodels; Apply organizational risk tolerance to existing third-party models \nadapted to a new domain; Reassess risk measurements after ļ¬ne-tuning third-\nparty GAI models. \nValue Chain and Component \nIntegration; Intellectual Property \nMG-3.1-002 \nTest GAI system value chain risks (e.g., data poisoning, malware, other software \nand hardware vulnerabilities; labor practices; data privacy and localization \ncompliance; geopolitical alignment). \nData Privacy; Information Security; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nMG-3.1-003 \nRe-assess model risks after ļ¬ne-tuning or retrieval-augmented generation \nimplementation and for any third-party GAI models deployed for applications \nand/or use cases that were not evaluated in initial testing. \nValue Chain and Component \nIntegration \nMG-3.1-004 \nTake reasonable measures to review training data for CBRN information, and \nintellectual property, and where appropriate, remove it. Implement reasonable \nmeasures to prevent, ļ¬‚ag, or take other action in response to outputs that \nreproduce particular training data (e.g., plagiarized, trademarked, patented, \nlicensed content or trade secret material). \nIntellectual Property; CBRN \nInformation or Capabilities \n']","Establish and maintain procedures for the remediation of issues which trigger incident response processes for the use of a GAI system, and provide stakeholders timelines associated with the remediation plan.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 45, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the purpose of utilizing a purpose-built testing environment such as NIST Dioptra in evaluating GAI trustworthy characteristics?,"[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n']",The purpose of utilizing a purpose-built testing environment such as NIST Dioptra is to empirically evaluate GAI trustworthy characteristics.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What input did the White House Office of Science and Technology Policy seek regarding algorithmic and data-driven harms?,"[' \n \n \nABOUT THIS FRAMEWORK\xad\xad\xad\xad\xad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizationsā€”from governments at all levels to companies of \nall sizesā€”to uphold these values. Experts from across the private sector, governments, and international \nconsortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policyā€”such as \nsector-specific privacy laws and oversight requirementsā€”do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the countryā€”from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal governmentā€”on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-\ning sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4\n']","The White House Office of Science and Technology Policy sought input from people across the country, including impacted communities, industry stakeholders, technology developers, other experts across fields and sectors, and policymakers throughout the Federal government. This input was gathered through panel discussions, public listening sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized email address.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 3, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What resource does the National Artificial Intelligence Initiative Office provide regarding AI use cases?,"[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n']",The National Artificial Intelligence Initiative Office provides Agency Inventories of AI Use Cases.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What are the documented roles and responsibilities related to mapping, measuring, and managing AI risks within an organization?","[' \n17 \nGOVERN 1.7: Processes and procedures are in place for decommissioning and phasing out AI systems safely and in a manner that \ndoes not increase risks or decrease the organizationā€™s trustworthiness. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.7-001 Protocols are put in place to ensure GAI systems are able to be deactivated when \nnecessary. \nInformation Security; Value Chain \nand Component Integration \nGV-1.7-002 \nConsider the following factors when decommissioning GAI systems: Data \nretention requirements; Data security, e.g., containment, protocols, Data leakage \nafter decommissioning; Dependencies between upstream, downstream, or other \ndata, internet of things (IOT) or AI systems; Use of open-source data or models; \nUsersā€™ emotional entanglement with GAI functions. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nGOVERN 2.1: Roles and responsibilities and lines of communication related to mapping, measuring, and managing AI risks are \ndocumented and are clear to individuals and teams throughout the organization. \nAction ID \nSuggested Action \nGAI Risks \nGV-2.1-001 \nEstablish organizational roles, policies, and procedures for communicating GAI \nincidents and performance to AI Actors and downstream stakeholders (including \nthose potentially impacted), via community or oļ¬ƒcial resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor). \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nGV-2.1-002 Establish procedures to engage teams for GAI system incident response with \ndiverse composition and responsibilities based on the particular incident type. \nHarmful Bias and Homogenization \nGV-2.1-003 Establish processes to verify the AI Actors conducting GAI incident response tasks \ndemonstrate and maintain the appropriate skills and training. \nHuman-AI Conļ¬guration \nGV-2.1-004 When systems may raise national security risks, involve national security \nprofessionals in mapping, measuring, and managing those risks. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Information Security \nGV-2.1-005 \nCreate mechanisms to provide protections for whistleblowers who report, based \non reasonable belief, when the organization violates relevant laws or poses a \nspeciļ¬c and empirically well-substantiated negative risk to public safety (or has \nalready caused harm). \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent \nAI Actor Tasks: Governance and Oversight \n \n']","Roles and responsibilities and lines of communication related to mapping, measuring, and managing AI risks are documented and are clear to individuals and teams throughout the organization.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 20, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What steps should organizations take to ensure accessibility during the design, development, and deployment of automated systems?","["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n""]","Organizations should ensure accessibility during the design, development, and deployment of automated systems by considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers to the use or effectiveness of the automated system.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do digital transparency mechanisms help manage and mitigate risks associated with AI-generated content?,"[' \n51 \ngeneral public participants. For example, expert AI red-teamers could modify or verify the \nprompts written by general public AI red-teamers. These approaches may also expand coverage \nof the AI risk attack surface. \nā€¢ \nHuman / AI: Performed by GAI in combination with specialist or non-specialist human teams. \nGAI-led red-teaming can be more cost eļ¬€ective than human red-teamers alone. Human or GAI-\nled AI red-teaming may be better suited for eliciting diļ¬€erent types of harms. \n \nA.1.6. Content Provenance \nOverview \nGAI technologies can be leveraged for many applications such as content generation and synthetic data. \nSome aspects of GAI outputs, such as the production of deepfake content, can challenge our ability to \ndistinguish human-generated content from AI-generated synthetic content. To help manage and mitigate \nthese risks, digital transparency mechanisms like provenance data tracking can trace the origin and \nhistory of content. Provenance data tracking and synthetic content detection can help facilitate greater \ninformation access about both authentic and synthetic content to users, enabling better knowledge of \ntrustworthiness in AI systems. When combined with other organizational accountability mechanisms, \ndigital content transparency approaches can enable processes to trace negative outcomes back to their \nsource, improve information integrity, and uphold public trust. Provenance data tracking and synthetic \ncontent detection mechanisms provide information about the origin and history of content to assist in \nGAI risk management eļ¬€orts. \nProvenance metadata can include information about GAI model developers or creators of GAI content, \ndate/time of creation, location, modiļ¬cations, and sources. Metadata can be tracked for text, images, \nvideos, audio, and underlying datasets. The implementation of provenance data tracking techniques can \nhelp assess the authenticity, integrity, intellectual property rights, and potential manipulations in digital \ncontent. Some well-known techniques for provenance data tracking include digital watermarking, \nmetadata recording, digital ļ¬ngerprinting, and human authentication, among others. \nProvenance Data Tracking Approaches \nProvenance data tracking techniques for GAI systems can be used to track the history and origin of data \ninputs, metadata, and synthetic content. Provenance data tracking records the origin and history for \ndigital content, allowing its authenticity to be determined. It consists of techniques to record metadata \nas well as overt and covert digital watermarks on content. Data provenance refers to tracking the origin \nand history of input data through metadata and digital watermarking techniques. Provenance data \ntracking processes can include and assist AI Actors across the lifecycle who may not have full visibility or \ncontrol over the various trade-oļ¬€s and cascading impacts of early-stage model decisions on downstream \nperformance and synthetic outputs. For example, by selecting a watermarking model to prioritize \nrobustness (the durability of a watermark), an AI actor may inadvertently diminish computational \ncomplexity (the resources required to implement watermarking). Organizational risk management \neļ¬€orts for enhancing content provenance include: \nā€¢ \nTracking provenance of training data and metadata for GAI systems; \nā€¢ \nDocumenting provenance data limitations within GAI systems; \n']","Digital transparency mechanisms like provenance data tracking can trace the origin and history of content. Provenance data tracking and synthetic content detection can help facilitate greater information access about both authentic and synthetic content to users, enabling better knowledge of trustworthiness in AI systems. When combined with other organizational accountability mechanisms, digital content transparency approaches can enable processes to trace negative outcomes back to their source, improve information integrity, and uphold public trust.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 54, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the suggested action for compiling statistics on actual policy violations in organizational GAI systems?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","Compile statistics on actual policy violations, take-down requests, and intellectual property infringement for organizational GAI systems: Analyze transparency reports across demographic groups, languages groups.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is data poisoning and how can it affect GAI systems?,"[' \n11 \nvalue chain (e.g., data inputs, processing, GAI training, or deployment environments), conventional \ncybersecurity practices may need to adapt or evolve. \nFor instance, prompt injection involves modifying what input is provided to a GAI system so that it \nbehaves in unintended ways. In direct prompt injections, attackers might craft malicious prompts and \ninput them directly to a GAI system, with a variety of downstream negative consequences to \ninterconnected systems. Indirect prompt injection attacks occur when adversaries remotely (i.e., without \na direct interface) exploit LLM-integrated applications by injecting prompts into data likely to be \nretrieved. Security researchers have already demonstrated how indirect prompt injections can exploit \nvulnerabilities by stealing proprietary data or running malicious code remotely on a machine. Merely \nquerying a closed production model can elicit previously undisclosed information about that model. \nAnother cybersecurity risk to GAI is data poisoning, in which an adversary compromises a training \ndataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts \nof the model could exacerbate risks associated with GAI system outputs. \nTrustworthy AI Characteristics: Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n2.10. \nIntellectual Property \nIntellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair \nuse under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI \noutputs displaying instances of training data memorization (see Data Privacy above) could infringe on \ncopyright. \nHow GAI relates to copyright, including the status of generated content that is similar to but does not \nstrictly copy work protected by copyright, is currently being debated in legal fora. Similar discussions are \ntaking place regarding the use or emulation of personal identity, likeness, or voice without permission. \nTrustworthy AI Characteristics: Accountable and Transparent, Fair with Harmful Bias Managed, Privacy \nEnhanced \n2.11. \nObscene, Degrading, and/or Abusive Content \nGAI can ease the production of and access to illegal non-consensual intimate imagery (NCII) of adults, \nand/or child sexual abuse material (CSAM). GAI-generated obscene, abusive or degrading content can \ncreate privacy, psychological and emotional, and even physical harms, and in some cases may be illegal. \nGenerated explicit or obscene AI content may include highly realistic ā€œdeepfakesā€ of real individuals, \nincluding children. The spread of this kind of material can have downstream negative consequences: in \nthe context of CSAM, even if the generated images do not resemble speciļ¬c individuals, the prevalence \nof such images can divert time and resources from eļ¬€orts to ļ¬nd real-world victims. Outside of CSAM, \nthe creation and spread of NCII disproportionately impacts women and sexual minorities, and can have \nsubsequent negative consequences including decline in overall mental health, substance abuse, and \neven suicidal thoughts. \nData used for training GAI models may unintentionally include CSAM and NCII. A recent report noted \nthat several commonly used GAI training datasets were found to contain hundreds of known images of \n']",Data poisoning is a cybersecurity risk where an adversary compromises a training dataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts of the model could exacerbate risks associated with GAI system outputs.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 14, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties?,"['APPENDIX\nSystems that impact the safety of communities such as automated traffic control systems, elec \n-ctrical grid controls, smart city technologies, and industrial emissions and environmental\nimpact control algorithms; and\nSystems related to access to benefits or services or assignment of penalties such as systems that\nsupport decision-makers who adjudicate benefits such as collating or analyzing information or\nmatching records, systems which similarly assist in the adjudication of administrative or criminal\npenalties, fraud detection algorithms, services or benefits access control algorithms, biometric\nsystems used as access control, and systems which make benefits or services related decisions on a\nfully or partially autonomous basis (such as a determination to revoke benefits).\n54\n']",Systems related to the assignment of penalties assist decision-makers in adjudicating administrative or criminal penalties by collating or analyzing information or matching records.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 53, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What measures are suggested to mitigate concerns of model collapse related to the proportion of synthetic to non-synthetic training data?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",Assess the proportion of synthetic to non-synthetic training data and verify training data is not overly homogenous or GAI-produced.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are suggested to identify and quantify new contexts of unanticipated impacts of GAI systems?,"[' \n28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring \n \nMEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signiļ¬cant AI risks. The risks or trustworthiness characteristics that will not ā€“ or cannot ā€“ be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modiļ¬cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises \ninformed by representative AI Actors. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, oļ¬€ensive cyber, and CBRN, while \nmaintaining the modelsā€™ ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content \n']","Determine context-based measures to identify if new impacts are present due to the GAI system, including regular engagements with downstream AI Actors to identify and quantify new contexts of unanticipated impacts of GAI systems.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 31, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What are the expectations for automated systems in terms of risk assessment and explanation validity?,"["" \n \n \n \n \n \nNOTICE & \nEXPLANATION \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nTailored to the level of risk. An assessment should be done to determine the level of risk of the auto\xad\nmated system. In settings where the consequences are high as determined by a risk assessment, or extensive \noversight is expected (e.g., in criminal justice or some public sector settings), explanatory mechanisms should \nbe built into the system design so that the systemā€™s full behavior can be explained in advance (i.e., only fully \ntransparent models should be used), rather than as an after-the-decision interpretation. In other settings, the \nextent of explanation provided should be tailored to the risk level. \nValid. The explanation provided by a system should accurately reflect the factors and the influences that led \nto a particular decision, and should be meaningful for the particular customization based on purpose, target, \nand level of risk. While approximation and simplification may be necessary for the system to succeed based on \nthe explanatory purpose and target of the explanation, or to account for the risk of fraud or other concerns \nrelated to revealing decision-making information, such simplifications should be done in a scientifically \nsupportable way. Where appropriate based on the explanatory system, error ranges for the explanation should \nbe calculated and included in the explanation, with the choice of presentation of such information balanced \nwith usability and overall interface complexity concerns. \nDemonstrate protections for notice and explanation \nReporting. Summary reporting should document the determinations made based on the above consider\xad\nations, including: the responsible entities for accountability purposes; the goal and use cases for the system, \nidentified users, and impacted populations; the assessment of notice clarity and timeliness; the assessment of \nthe explanation's validity and accessibility; the assessment of the level of risk; and the account and assessment \nof how explanations are tailored, including to the purpose, the recipient of the explanation, and the level of \nrisk. Individualized profile information should be made readily available to the greatest extent possible that \nincludes explanations for any system impacts or inferences. Reporting should be provided in a clear plain \nlanguage and machine-readable manner. \n44\n""]","The expectations for automated systems in terms of risk assessment and explanation validity include conducting an assessment to determine the level of risk of the automated system. In high-risk settings, explanatory mechanisms should be built into the system design to ensure full transparency. The explanation provided by the system should accurately reflect the factors and influences that led to a particular decision and should be meaningful for the specific customization based on purpose, target, and level of risk.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 43, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the purpose of conducting proactive equity assessments in the design phase of automated systems?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","The purpose of conducting proactive equity assessments in the design phase of automated systems is to review potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity resulting from the introduction of the technology.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How can the equitable design of automated systems help prevent algorithmic discrimination?,"["" \n \n \n \n \n \n \n \nAlgorithmic \nDiscrimination \nProtections \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nThere is extensive evidence showing that automated systems can produce inequitable outcomes and amplify \nexisting inequity.30 Data that fails to account for existing systemic biases in American society can result in a range of \nconsequences. For example, facial recognition technology that can contribute to wrongful and discriminatory \narrests,31 hiring algorithms that inform discriminatory decisions, and healthcare algorithms that discount \nthe severity of certain diseases in Black Americans. Instances of discriminatory practices built into and \nresulting from AI and other automated systems exist across many industries, areas, and contexts. While automated \nsystems have the capacity to drive extraordinary advances and innovations, algorithmic discrimination \nprotections should be built into their design, deployment, and ongoing use. \nMany companies, non-profits, and federal government agencies are already taking steps to ensure the public \nis protected from algorithmic discrimination. Some companies have instituted bias testing as part of their product \nquality assessment and launch procedures, and in some cases this testing has led products to be changed or not \nlaunched, preventing harm to the public. Federal government agencies have been developing standards and guidance \nfor the use of automated systems in order to help prevent bias. Non-profits and companies have developed best \npractices for audits and impact assessments to help identify potential algorithmic discrimination and provide \ntransparency to the public in the mitigation of such biases. \nBut there is much more work to do to protect the public from algorithmic discrimination to use and design \nautomated systems in an equitable way. The guardrails protecting the public from discrimination in their daily \nlives should include their digital lives and impactsā€”basic safeguards against abuse, bias, and discrimination to \nensure that all people are treated fairly when automated systems are used. This includes all dimensions of their \nlives, from hiring to loan approvals, from medical treatment and payment to encounters with the criminal \njustice system. Ensuring equity should also go beyond existing guardrails to consider the holistic impact that \nautomated systems make on underserved communities and to institute proactive protections that support these \ncommunities. \nā€¢\nAn automated system using nontraditional factors such as educational attainment and employment history as\npart of its loan underwriting and pricing model was found to be much more likely to charge an applicant who\nattended a Historically Black College or University (HBCU) higher loan prices for refinancing a student loan\nthan an applicant who did not attend an HBCU. This was found to be true even when controlling for\nother credit-related factors.32\nā€¢\nA hiring tool that learned the features of a company's employees (predominantly men) rejected women appli\xad\ncants for spurious and discriminatory reasons; resumes with the word ā€œwomenā€™s,ā€ such as ā€œwomenā€™s\nchess club captain,ā€ were penalized in the candidate ranking.33\nā€¢\nA predictive model marketed as being able to predict whether students are likely to drop out of school was\nused by more than 500 universities across the country. The model was found to use race directly as a predictor,\nand also shown to have large disparities by race; Black students were as many as four times as likely as their\notherwise similar white peers to be deemed at high risk of dropping out. These risk scores are used by advisors \nto guide students towards or away from majors, and some worry that they are being used to guide\nBlack students away from math and science subjects.34\nā€¢\nA risk assessment tool designed to predict the risk of recidivism for individuals in federal custody showed\nevidence of disparity in prediction. The tool overpredicts the risk of recidivism for some groups of color on the\ngeneral recidivism tools, and underpredicts the risk of recidivism for some groups of color on some of the\nviolent recidivism tools. The Department of Justice is working to reduce these disparities and has\npublicly released a report detailing its review of the tool.35 \n24\n""]","Equitable design of automated systems can help prevent algorithmic discrimination by incorporating protections against abuse, bias, and discrimination, ensuring that all people are treated fairly when automated systems are used. This includes considering the holistic impact on underserved communities and instituting proactive protections that support these communities.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 23, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What is the role of relevant AI Actors in the GAI system risk identification process?,"[' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']",The role of relevant AI Actors in the GAI system risk identification process is to be included in the process.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What policies and procedures should be established for the continuous monitoring of third-party GAI systems in deployment?,"[' \n22 \nGV-6.2-003 \nEstablish incident response plans for third-party GAI technologies: Align incident \nresponse plans with impacts enumerated in MAP 5.1; Communicate third-party \nGAI incident response plans to all relevant AI Actors; Deļ¬ne ownership of GAI \nincident response functions; Rehearse third-party GAI incident response plans at \na regular cadence; Improve incident response plans based on retrospective \nlearning; Review incident response plans for alignment with relevant breach \nreporting, data protection, data privacy, or other laws. \nData Privacy; Human-AI \nConļ¬guration; Information \nSecurity; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization \nGV-6.2-004 \nEstablish policies and procedures for continuous monitoring of third-party GAI \nsystems in deployment. \nValue Chain and Component \nIntegration \nGV-6.2-005 \nEstablish policies and procedures that address GAI data redundancy, including \nmodel weights and other system artifacts. \nHarmful Bias and Homogenization \nGV-6.2-006 \nEstablish policies and procedures to test and manage risks related to rollover and \nfallback technologies for GAI systems, acknowledging that rollover and fallback \nmay include manual processing. \nInformation Integrity \nGV-6.2-007 \nReview vendor contracts and avoid arbitrary or capricious termination of critical \nGAI technologies or vendor services and non-standard terms that may amplify or \ndefer liability in unexpected ways and/or contribute to unauthorized data \ncollection by vendors or third-parties (e.g., secondary data use). Consider: Clear \nassignment of liability and responsibility for incidents, GAI system changes over \ntime (e.g., ļ¬ne-tuning, drift, decay); Request: Notiļ¬cation and disclosure for \nserious incidents arising from third-party data and systems; Service Level \nAgreements (SLAs) in vendor contracts that address incident response, response \ntimes, and availability of critical support. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV, Third-party entities \n \nMAP 1.1: Intended purposes, potentially beneļ¬cial uses, context speciļ¬c laws, norms and expectations, and prospective settings in \nwhich the AI system will be deployed are understood and documented. Considerations include: the speciļ¬c set or types of users \nalong with their expectations; potential positive and negative impacts of system uses to individuals, communities, organizations, \nsociety, and the planet; assumptions and related limitations about AI system purposes, uses, and risks across the development or \nproduct AI lifecycle; and related TEVV and system metrics. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.1-001 \nWhen identifying intended purposes, consider factors such as internal vs. \nexternal use, narrow vs. broad application scope, ļ¬ne-tuning, and varieties of \ndata sources (e.g., grounding, retrieval-augmented generation). \nData Privacy; Intellectual \nProperty \n']",Policies and procedures for continuous monitoring of third-party GAI systems in deployment should be established.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 25, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How did the installation of a facial recognition system by a local public housing authority impact the community?,"["" \n \n \n \nDATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAn insurer might collect data from a person's social media presence as part of deciding what life\ninsurance rates they should be offered.64\nā€¢\nA data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of\nthousands of people to potential identity theft. 65\nā€¢\nA local public housing authority installed a facial recognition system at the entrance to housing complexes to\nassist law enforcement with identifying individuals viewed via camera when police reports are filed, leading\nthe community, both those living in the housing complex and not, to have videos of them sent to the local\npolice department and made available for scanning by its facial recognition software.66\nā€¢\nCompanies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32\n""]","The installation of a facial recognition system by a local public housing authority led the community, both those living in the housing complex and not, to have videos of them sent to the local police department and made available for scanning by its facial recognition software.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 31, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How can over-reliance on synthetic data lead to model collapse?,"[' \n9 \nand reduced content diversity). Overly homogenized outputs can themselves be incorrect, or they may \nlead to unreliable decision-making or amplify harmful biases. These phenomena can ļ¬‚ow from \nfoundation models to downstream models and systems, with the foundation models acting as \nā€œbottlenecks,ā€ or single points of failure. \nOverly homogenized content can contribute to ā€œmodel collapse.ā€ Model collapse can occur when model \ntraining over-relies on synthetic data, resulting in data points disappearing from the distribution of the \nnew modelā€™s outputs. In addition to threatening the robustness of the model overall, model collapse \ncould lead to homogenized outputs, including by amplifying any homogenization from the model used to \ngenerate the synthetic training data. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Valid and Reliable \n2.7. Human-AI Conļ¬guration \nGAI system use can involve varying risks of misconļ¬gurations and poor interactions between a system \nand a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Valid and Reliable \n2.8. Information Integrity \nInformation integrity describes the ā€œspectrum of information and associated patterns of its creation, \nexchange, and consumption in society.ā€ High-integrity information can be trusted; ā€œdistinguishes fact \nfrom ļ¬ction, opinion, and inference; acknowledges uncertainties; and is transparent about its level of \nvetting. This information can be linked to the original source(s) with appropriate evidence. High-integrity \ninformation is also accurate and reliable, can be veriļ¬ed and authenticated, has a clear chain of custody, \nand creates reasonable expectations about when its validity may expire.ā€11 \n \n \n11 This deļ¬nition of information integrity is derived from the 2022 White House Roadmap for Researchers on \nPriorities Related to Information Integrity Research and Development. \n']","Over-reliance on synthetic data can lead to model collapse when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. This threatens the robustness of the model overall and could lead to homogenized outputs, including by amplifying any homogenization from the model used to generate the synthetic training data.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 12, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What actions are suggested to address Human-AI configuration risks in evaluations involving human subjects?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']","The suggested actions to address Human-AI configuration risks in evaluations involving human subjects include: 1) Documenting how content provenance data is tracked and how that data interacts with privacy and security, considering anonymizing data to protect the privacy of human subjects, leveraging privacy output filters, and removing any personally identifiable information (PII) to prevent potential harm or misuse. 2) Providing human subjects with options to withdraw participation or revoke their consent for present or future use of their data in GAI applications. 3) Using techniques such as anonymization, differential privacy, or other privacy-enhancing technologies to minimize the risks associated with linking AI-generated content back to individual human subjects.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What protections should be in place to safeguard against abusive data practices?,"['You should be protected from abusive data practices via built-in \nprotections and you should have agency over how data about \nyou is used. You should be protected from violations of privacy through \ndesign choices that ensure such protections are included by default, including \nensuring that data collection conforms to reasonable expectations and that \nonly data strictly necessary for the specific context is collected. Designers, de\xad\nvelopers, and deployers of automated systems should seek your permission \nand respect your decisions regarding collection, use, access, transfer, and de\xad\nletion of your data in appropriate ways and to the greatest extent possible; \nwhere not possible, alternative privacy by design safeguards should be used. \nSystems should not employ user experience and design decisions that obfus\xad\ncate user choice or burden users with defaults that are privacy invasive. Con\xad\nsent should only be used to justify collection of data in cases where it can be \nappropriately and meaningfully given. Any consent requests should be brief, \nbe understandable in plain language, and give you agency over data collection \nand the specific context of use; current hard-to-understand no\xad\ntice-and-choice practices for broad uses of data should be changed. Enhanced \nprotections and restrictions for data and inferences related to sensitive do\xad\nmains, including health, work, education, criminal justice, and finance, and \nfor data pertaining to youth should put you first. In sensitive domains, your \ndata and related inferences should only be used for necessary functions, and \nyou should be protected by ethical review and use prohibitions. You and your \ncommunities should be free from unchecked surveillance; surveillance tech\xad\nnologies should be subject to heightened oversight that includes at least \npre-deployment assessment of their potential harms and scope limits to pro\xad\ntect privacy and civil liberties. Continuous surveillance and monitoring \nshould not be used in education, work, housing, or in other contexts where the \nuse of such surveillance technologies is likely to limit rights, opportunities, or \naccess. Whenever possible, you should have access to reporting that confirms \nyour data decisions have been respected and provides an assessment of the \npotential impact of surveillance technologies on your rights, opportunities, or \naccess. \nDATA PRIVACY\n30\n']","Protections against abusive data practices should include built-in protections, ensuring data collection conforms to reasonable expectations, collecting only necessary data, seeking user permission, respecting user decisions regarding data, using alternative privacy safeguards when necessary, avoiding privacy-invasive defaults, and providing brief and understandable consent requests. Enhanced protections and restrictions should be in place for sensitive domains, and surveillance technologies should be subject to heightened oversight.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 29, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What contributions did the Generative AI Public Working Group make to the NIST report?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']",The Generative AI Public Working Group provided many helpful comments and contributions to the NIST report.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What strategies use AI engagement and real-time audits to find new GAI impacts?,"[' \n28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring \n \nMEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signiļ¬cant AI risks. The risks or trustworthiness characteristics that will not ā€“ or cannot ā€“ be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modiļ¬cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises \ninformed by representative AI Actors. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, oļ¬€ensive cyber, and CBRN, while \nmaintaining the modelsā€™ ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content \n', ' \n41 \nMG-2.2-006 \nUse feedback from internal and external AI Actors, users, individuals, and \ncommunities, to assess impact of AI-generated content. \nHuman-AI Conļ¬guration \nMG-2.2-007 \nUse real-time auditing tools where they can be demonstrated to aid in the \ntracking and validation of the lineage and authenticity of AI-generated data. \nInformation Integrity \nMG-2.2-008 \nUse structured feedback mechanisms to solicit and capture user input about AI-\ngenerated content to detect subtle shifts in quality or alignment with \ncommunity and societal values. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-2.2-009 \nConsider opportunities to responsibly use synthetic data and other privacy \nenhancing techniques in GAI development, where appropriate and applicable, \nmatch the statistical properties of real-world data without disclosing personally \nidentiļ¬able information or contributing to homogenization. \nData Privacy; Intellectual Property; \nInformation Integrity; \nConfabulation; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 2.3: Procedures are followed to respond to and recover from a previously unknown risk when it is identiļ¬ed. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.3-001 \nDevelop and update GAI system incident response and recovery plans and \nprocedures to address the following: Review and maintenance of policies and \nprocedures to account for newly encountered uses; Review and maintenance of \npolicies and procedures for detection of unanticipated uses; Verify response \nand recovery plans account for the GAI system value chain; Verify response and \nrecovery plans are updated for and include necessary details to communicate \nwith downstream GAI system Actors: Points-of-Contact (POC), Contact \ninformation, notiļ¬cation format. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nMANAGE 2.4: Mechanisms are in place and applied, and responsibilities are assigned and understood, to supersede, disengage, or \ndeactivate AI systems that demonstrate performance or outcomes inconsistent with intended use. \nAction ID \nSuggested Action \nGAI Risks \nMG-2.4-001 \nEstablish and maintain communication plans to inform AI stakeholders as part of \nthe deactivation or disengagement process of a speciļ¬c GAI system (including for \nopen-source models) or context of use, including reasons, workarounds, user \naccess removal, alternative processes, contact information, etc. \nHuman-AI Conļ¬guration \n']","Plan regular engagements with AI Actors responsible for inputs to GAI systems, including third-party data and algorithms, to review and evaluate unanticipated impacts. Use real-time auditing tools where they can be demonstrated to aid in the tracking and validation of the lineage and authenticity of AI-generated data.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 31, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 44, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How do NSF programs align with federal AI principles for safety, security, and effectiveness?","[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\nSome U.S government agencies have developed specific frameworks for ethical use of AI \nsystems. The Department of Energy (DOE) has activated the AI Advancement Council that oversees coordina-\ntion and advises on implementation of the DOE AI Strategy and addresses issues and/or escalations on the \nethical use and development of AI systems.20 The Department of Defense has adopted Artificial Intelligence \nEthical Principles, and tenets for Responsible Artificial Intelligence specifically tailored to its national \nsecurity and defense activities.21 Similarly, the U.S. Intelligence Community (IC) has developed the Principles \nof Artificial Intelligence Ethics for the Intelligence Community to guide personnel on whether and how to \ndevelop and use AI in furtherance of the IC\'s mission, as well as an AI Ethics Framework to help implement \nthese principles.22\nThe National Science Foundation (NSF) funds extensive research to help foster the \ndevelopment of automated systems that adhere to and advance their safety, security and \neffectiveness. Multiple NSF programs support research that directly addresses many of these principles: \nthe National AI Research Institutes23 support research on all aspects of safe, trustworthy, fair, and explainable \nAI algorithms and systems; the Cyber Physical Systems24 program supports research on developing safe \nautonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace25 \nprogram supports research on cybersecurity and privacy enhancing technologies in automated systems; the \nFormal Methods in the Field26 program supports research on rigorous formal verification and analysis of \nautomated systems and machine learning, and the Designing Accountable Software Systems27 program supports \nresearch on rigorous and reproducible methodologies for developing software systems with legal and regulatory \ncompliance in mind. \nSome state legislatures have placed strong transparency and validity requirements on \nthe use of pretrial risk assessments. The use of algorithmic pretrial risk assessments has been a \ncause of concern for civil rights groups.28 Idaho Code Section 19-1910, enacted in 2019,29 requires that any \npretrial risk assessment, before use in the state, first be ""shown to be free of bias against any class of \nindividuals protected from discrimination by state or federal law"", that any locality using a pretrial risk \nassessment must first formally validate the claim of its being free of bias, that ""all documents, records, and \ninformation used to build or validate the risk assessment shall be open to public inspection,"" and that assertions \nof trade secrets cannot be used ""to quash discovery in a criminal matter by a party to a criminal case."" \n22\n']","The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles: the National AI Research Institutes support research on all aspects of safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program supports research on rigorous formal verification and analysis of automated systems and machine learning, and the Designing Accountable Software Systems program supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 21, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do testing and monitoring ensure automated systems' safety and effectiveness?,"[' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n', ' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n']","Testing ensures automated systems' safety and effectiveness by following domain-specific best practices to ensure the technology works in its real-world context. It includes both automated and human-led testing, mirroring deployment conditions, and comparing system performance with existing human-driven procedures. Monitoring involves ongoing procedures to ensure performance does not fall below acceptable levels over time, including continuous evaluation of performance metrics, harm assessments, system updates, and retraining of machine learning models as necessary.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"What AI inventories does the NAIIO offer, and how do they aid transparency and ethics per NIST?","[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n', ' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n']","The National Artificial Intelligence Initiative Office (NAIIO) offers Agency Inventories of AI Use Cases. These inventories aid transparency and ethics by providing information to the public, regulators, auditors, industry standards groups, or others engaged in independent review. This transparency helps ensure that the American people's rights, opportunities, and access, as well as their expectations around technologies, are respected.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do equity assessments and evaluations help reduce algorithmic bias?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n', ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Equity assessments and evaluations help reduce algorithmic bias by conducting proactive equity assessments in the design phase of the technology research and development or during its acquisition. This includes reviewing potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive as possible of underserved communities. Assessment could include both qualitative and quantitative evaluations of the system.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do org policies and risk mgmt integrate stakeholder input for trustworthy AI?,"[' \n15 \nGV-1.3-004 Obtain input from stakeholder communities to identify unacceptable use, in \naccordance with activities in the AI RMF Map function. \nCBRN Information or Capabilities; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nGV-1.3-005 \nMaintain an updated hierarchy of identiļ¬ed and expected GAI risks connected to \ncontexts of GAI model advancement and use, potentially including specialized risk \nlevels for GAI systems that address issues such as model collapse and algorithmic \nmonoculture. \nHarmful Bias and Homogenization \nGV-1.3-006 \nReevaluate organizational risk tolerances to account for unacceptable negative risk \n(such as where signiļ¬cant negative impacts are imminent, severe harms are \nactually occurring, or large-scale risks could occur); and broad GAI negative risks, \nincluding: Immature safety or risk cultures related to AI and GAI design, \ndevelopment and deployment, public information integrity risks, including impacts \non democratic processes, unknown long-term performance characteristics of GAI. \nInformation Integrity; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nGV-1.3-007 Devise a plan to halt development or deployment of a GAI system that poses \nunacceptable negative risk. \nCBRN Information and Capability; \nInformation Security; Information \nIntegrity \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.4: The risk management process and its outcomes are established through transparent policies, procedures, and other \ncontrols based on organizational risk priorities. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.4-001 \nEstablish policies and mechanisms to prevent GAI systems from generating \nCSAM, NCII or content that violates the law. \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias \nand Homogenization; \nDangerous, Violent, or Hateful \nContent \nGV-1.4-002 \nEstablish transparent acceptable use policies for GAI that address illegal use or \napplications of GAI. \nCBRN Information or \nCapabilities; Obscene, \nDegrading, and/or Abusive \nContent; Data Privacy; Civil \nRights violations \nAI Actor Tasks: AI Development, AI Deployment, Governance and Oversight \n \n', ' \n14 \nGOVERN 1.2: The characteristics of trustworthy AI are integrated into organizational policies, processes, procedures, and practices. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.2-001 \nEstablish transparency policies and processes for documenting the origin and \nhistory of training data and generated data for GAI applications to advance digital \ncontent transparency, while balancing the proprietary nature of training \napproaches. \nData Privacy; Information \nIntegrity; Intellectual Property \nGV-1.2-002 \nEstablish policies to evaluate risk-relevant capabilities of GAI and robustness of \nsafety measures, both prior to deployment and on an ongoing basis, through \ninternal and external evaluations. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: Governance and Oversight \n \nGOVERN 1.3: Processes, procedures, and practices are in place to determine the needed level of risk management activities based \non the organizationā€™s risk tolerance. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.3-001 \nConsider the following factors when updating or deļ¬ning risk tiers for GAI: Abuses \nand impacts to information integrity; Dependencies between GAI and other IT or \ndata systems; Harm to fundamental rights or public safety; Presentation of \nobscene, objectionable, oļ¬€ensive, discriminatory, invalid or untruthful output; \nPsychological impacts to humans (e.g., anthropomorphization, algorithmic \naversion, emotional entanglement); Possibility for malicious use; Whether the \nsystem introduces signiļ¬cant new security vulnerabilities; Anticipated system \nimpact on some groups compared to others; Unreliable decision making \ncapabilities, validity, adaptability, and variability of GAI system performance over \ntime. \nInformation Integrity; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent; CBRN Information or \nCapabilities \nGV-1.3-002 \nEstablish minimum thresholds for performance or assurance criteria and review as \npart of deployment approval (ā€œgo/ā€no-goā€) policies, procedures, and processes, \nwith reviewed processes and approval thresholds reļ¬‚ecting measurement of GAI \ncapabilities and risks. \nCBRN Information or Capabilities; \nConfabulation; Dangerous, \nViolent, or Hateful Content \nGV-1.3-003 \nEstablish a test plan and response policy, before developing highly capable models, \nto periodically evaluate whether the model may misuse CBRN information or \ncapabilities and/or oļ¬€ensive cyber capabilities. \nCBRN Information or Capabilities; \nInformation Security \n']",The context does not provide specific information on how organizational policies and risk management integrate stakeholder input for trustworthy AI.,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 18, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 17, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How does the supplier risk framework ensure compliance with provenance standards and address GAI risks and feedback?,"[' \n21 \nGV-6.1-005 \nImplement a use-cased based supplier risk assessment framework to evaluate and \nmonitor third-party entitiesā€™ performance and adherence to content provenance \nstandards and technologies to detect anomalies and unauthorized changes; \nservices acquisition and value chain risk management; and legal compliance. \nData Privacy; Information \nIntegrity; Information Security; \nIntellectual Property; Value Chain \nand Component Integration \nGV-6.1-006 Include clauses in contracts which allow an organization to evaluate third-party \nGAI processes and standards. \nInformation Integrity \nGV-6.1-007 Inventory all third-party entities with access to organizational content and \nestablish approved GAI technology and service provider lists. \nValue Chain and Component \nIntegration \nGV-6.1-008 Maintain records of changes to content made by third parties to promote content \nprovenance, including sources, timestamps, metadata. \nInformation Integrity; Value Chain \nand Component Integration; \nIntellectual Property \nGV-6.1-009 \nUpdate and integrate due diligence processes for GAI acquisition and \nprocurement vendor assessments to include intellectual property, data privacy, \nsecurity, and other risks. For example, update processes to: Address solutions that \nmay rely on embedded GAI technologies; Address ongoing monitoring, \nassessments, and alerting, dynamic risk assessments, and real-time reporting \ntools for monitoring third-party GAI risks; Consider policy adjustments across GAI \nmodeling libraries, tools and APIs, ļ¬ne-tuned models, and embedded tools; \nAssess GAI vendors, open-source or proprietary GAI tools, or GAI service \nproviders against incident or vulnerability databases. \nData Privacy; Human-AI \nConļ¬guration; Information \nSecurity; Intellectual Property; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nGV-6.1-010 \nUpdate GAI acceptable use policies to address proprietary and open-source GAI \ntechnologies and data, and contractors, consultants, and other third-party \npersonnel. \nIntellectual Property; Value Chain \nand Component Integration \nAI Actor Tasks: Operation and Monitoring, Procurement, Third-party entities \n \nGOVERN 6.2: Contingency processes are in place to handle failures or incidents in third-party data or AI systems deemed to be \nhigh-risk. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.2-001 \nDocument GAI risks associated with system value chain to identify over-reliance \non third-party data and to identify fallbacks. \nValue Chain and Component \nIntegration \nGV-6.2-002 \nDocument incidents involving third-party GAI data and systems, including open-\ndata and open-source software. \nIntellectual Property; Value Chain \nand Component Integration \n', ' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']","The supplier risk framework ensures compliance with provenance standards and addresses GAI risks and feedback by implementing a use-case based supplier risk assessment framework to evaluate and monitor third-party entities' performance and adherence to content provenance standards and technologies to detect anomalies and unauthorized changes. It also includes clauses in contracts to allow an organization to evaluate third-party GAI processes and standards, maintains records of changes to content made by third parties, and updates due diligence processes for GAI acquisition and procurement vendor assessments to include intellectual property, data privacy, security, and other risks. Additionally, it allocates time and resources for outreach, feedback, and recourse processes in GAI system development.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 24, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How does the AI Bill of Rights ensure fair access to education, housing, credit, and jobs while tackling algorithmic harms?","["" \n \n \nSECTION TITLE\nApplying The Blueprint for an AI Bill of Rights \nWhile many of the concerns addressed in this framework derive from the use of AI, the technical \ncapabilities and specific definitions of such systems change with the speed of innovation, and the potential \nharms of their use occur even with less technologically sophisticated tools. Thus, this framework uses a two-\npart test to determine what systems are in scope. This framework applies to (1) automated systems that (2) \nhave the potential to meaningfully impact the American publicā€™s rights, opportunities, or access to \ncritical resources or services. These rights, opportunities, and access to critical resources of services should \nbe enjoyed equally and be fully protected, regardless of the changing role that automated systems may play in \nour lives. \nThis framework describes protections that should be applied with respect to all automated systems that \nhave the potential to meaningfully impact individuals' or communities' exercise of: \nRIGHTS, OPPORTUNITIES, OR ACCESS\nCivil rights, civil liberties, and privacy, including freedom of speech, voting, and protections from discrimi\xad\nnation, excessive punishment, unlawful surveillance, and violations of privacy and other freedoms in both \npublic and private sector contexts; \nEqual opportunities, including equitable access to education, housing, credit, employment, and other \nprograms; or, \nAccess to critical resources or services, such as healthcare, financial services, safety, social services, \nnon-deceptive information about goods and services, and government benefits. \nA list of examples of automated systems for which these principles should be considered is provided in the \nAppendix. The Technical Companion, which follows, offers supportive guidance for any person or entity that \ncreates, deploys, or oversees automated systems. \nConsidered together, the five principles and associated practices of the Blueprint for an AI Bill of \nRights form an overlapping set of backstops against potential harms. This purposefully overlapping \nframework, when taken as a whole, forms a blueprint to help protect the public from harm. \nThe measures taken to realize the vision set forward in this framework should be proportionate \nwith the extent and nature of the harm, or risk of harm, to people's rights, opportunities, and \naccess. \nRELATIONSHIP TO EXISTING LAW AND POLICY\nThe Blueprint for an AI Bill of Rights is an exercise in envisioning a future where the American public is \nprotected from the potential harms, and can fully enjoy the benefits, of automated systems. It describes princi\xad\nples that can help ensure these protections. Some of these protections are already required by the U.S. Constitu\xad\ntion or implemented under existing U.S. laws. For example, government surveillance, and data search and \nseizure are subject to legal requirements and judicial oversight. There are Constitutional requirements for \nhuman review of criminal investigative matters and statutory requirements for judicial review. Civil rights laws \nprotect the American people against discrimination. \n8\n"", ' \n \n \nABOUT THIS FRAMEWORK\xad\xad\xad\xad\xad\nThe Blueprint for an AI Bill of Rights is a set of five principles and associated practices to help guide the \ndesign, use, and deployment of automated systems to protect the rights of the American public in the age of \nartificial intel-ligence. Developed through extensive consultation with the American public, these principles are \na blueprint for building and deploying automated systems that are aligned with democratic values and protect \ncivil rights, civil liberties, and privacy. The Blueprint for an AI Bill of Rights includes this Foreword, the five \nprinciples, notes on Applying the The Blueprint for an AI Bill of Rights, and a Technical Companion that gives \nconcrete steps that can be taken by many kinds of organizationsā€”from governments at all levels to companies of \nall sizesā€”to uphold these values. Experts from across the private sector, governments, and international \nconsortia have published principles and frameworks to guide the responsible use of automated systems; this \nframework provides a national values statement and toolkit that is sector-agnostic to inform building these \nprotections into policy, practice, or the technological design process. Where existing law or policyā€”such as \nsector-specific privacy laws and oversight requirementsā€”do not already provide guidance, the Blueprint for an \nAI Bill of Rights should be used to inform policy decisions.\nLISTENING TO THE AMERICAN PUBLIC\nThe White House Office of Science and Technology Policy has led a year-long process to seek and distill input \nfrom people across the countryā€”from impacted communities and industry stakeholders to technology develop-\ners and other experts across fields and sectors, as well as policymakers throughout the Federal governmentā€”on \nthe issue of algorithmic and data-driven harms and potential remedies. Through panel discussions, public listen-\ning sessions, meetings, a formal request for information, and input to a publicly accessible and widely-publicized \nemail address, people throughout the United States, public servants across Federal agencies, and members of the \ninternational community spoke up about both the promises and potential harms of these technologies, and \nplayed a central role in shaping the Blueprint for an AI Bill of Rights. The core messages gleaned from these \ndiscussions include that AI has transformative potential to improve Americansā€™ lives, and that preventing the \nharms of these technologies is both necessary and achievable. The Appendix includes a full list of public engage-\nments. \n4\n']","The AI Bill of Rights ensures fair access to education, housing, credit, and jobs by describing protections that should be applied with respect to all automated systems that have the potential to meaningfully impact individuals' or communities' exercise of equal opportunities, including equitable access to education, housing, credit, employment, and other programs.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 7, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 3, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How to ensure automated systems avoid algorithmic bias pre- and post-deployment?,"["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n"", ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","To ensure automated systems avoid algorithmic bias pre- and post-deployment, the following steps should be taken: 1. Conduct proactive equity assessments in the design phase to review potential input data, historical context, accessibility for people with disabilities, and societal goals. 2. Use representative and robust data that is reviewed for bias based on historical and societal context. 3. Avoid using demographic information directly in the design, development, or deployment of the system to prevent algorithmic discrimination. 4. Perform proactive testing to identify and remove proxies that could lead to algorithmic discrimination. 5. Regularly monitor the system to assess algorithmic discrimination that might arise from unforeseen interactions, changes to the system, or changes to the context of use or associated data. 6. Implement disparity assessment and mitigation procedures to address any identified disparities. 7. Document any disparities and provide justification for continued use of the system if disparities are found. 8. Reconsider the use of the system if adequate mitigation of disparities is not possible.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do current TEVV limits and public feedback methods create measurement gaps in GAI systems?,"[' \n49 \nearly lifecycle TEVV approaches are developed and matured for GAI, organizations may use \nrecommended ā€œpre-deployment testingā€ practices to measure performance, capabilities, limits, risks, \nand impacts. This section describes risk measurement and estimation as part of pre-deployment TEVV, \nand examines the state of play for pre-deployment testing methodologies. \nLimitations of Current Pre-deployment Test Approaches \nCurrently available pre-deployment TEVV processes used for GAI applications may be inadequate, non-\nsystematically applied, or fail to reļ¬‚ect or mismatched to deployment contexts. For example, the \nanecdotal testing of GAI system capabilities through video games or standardized tests designed for \nhumans (e.g., intelligence tests, professional licensing exams) does not guarantee GAI system validity or \nreliability in those domains. Similarly, jailbreaking or prompt engineering tests may not systematically \nassess validity or reliability risks. \nMeasurement gaps can arise from mismatches between laboratory and real-world settings. Current \ntesting approaches often remain focused on laboratory conditions or restricted to benchmark test \ndatasets and in silico techniques that may not extrapolate well toā€”or directly assess GAI impacts in real-\nworld conditions. For example, current measurement gaps for GAI make it diļ¬ƒcult to precisely estimate \nits potential ecosystem-level or longitudinal risks and related political, social, and economic impacts. \nGaps between benchmarks and real-world use of GAI systems may likely be exacerbated due to prompt \nsensitivity and broad heterogeneity of contexts of use. \nA.1.5. Structured Public Feedback \nStructured public feedback can be used to evaluate whether GAI systems are performing as intended \nand to calibrate and verify traditional measurement methods. Examples of structured feedback include, \nbut are not limited to: \nā€¢ \nParticipatory Engagement Methods: Methods used to solicit feedback from civil society groups, \naļ¬€ected communities, and users, including focus groups, small user studies, and surveys. \nā€¢ \nField Testing: Methods used to determine how people interact with, consume, use, and make \nsense of AI-generated information, and subsequent actions and eļ¬€ects, including UX, usability, \nand other structured, randomized experiments. \nā€¢ \nAI Red-teaming: A structured testing exercise used to probe an AI system to ļ¬nd ļ¬‚aws and \nvulnerabilities such as inaccurate, harmful, or discriminatory outputs, often in a controlled \nenvironment and in collaboration with system developers. \nInformation gathered from structured public feedback can inform design, implementation, deployment \napproval, maintenance, or decommissioning decisions. Results and insights gleaned from these exercises \ncan serve multiple purposes, including improving data quality and preprocessing, bolstering governance \ndecision making, and enhancing system documentation and debugging practices. When implementing \nfeedback activities, organizations should follow human subjects research requirements and best \npractices such as informed consent and subject compensation. \n', ' \n50 \nParticipatory Engagement Methods \nOn an ad hoc or more structured basis, organizations can design and use a variety of channels to engage \nexternal stakeholders in product development or review. Focus groups with select experts can provide \nfeedback on a range of issues. Small user studies can provide feedback from representative groups or \npopulations. Anonymous surveys can be used to poll or gauge reactions to speciļ¬c features. Participatory \nengagement methods are often less structured than ļ¬eld testing or red teaming, and are more \ncommonly used in early stages of AI or product development. \nField Testing \nField testing involves structured settings to evaluate risks and impacts and to simulate the conditions \nunder which the GAI system will be deployed. Field style tests can be adapted from a focus on user \npreferences and experiences towards AI risks and impacts ā€“ both negative and positive. When carried \nout with large groups of users, these tests can provide estimations of the likelihood of risks and impacts \nin real world interactions. \nOrganizations may also collect feedback on outcomes, harms, and user experience directly from users in \nthe production environment after a model has been released, in accordance with human subject \nstandards such as informed consent and compensation. Organizations should follow applicable human \nsubjects research requirements, and best practices such as informed consent and subject compensation, \nwhen implementing feedback activities. \nAI Red-teaming \nAI red-teaming is an evolving practice that references exercises often conducted in a controlled \nenvironment and in collaboration with AI developers building AI models to identify potential adverse \nbehavior or outcomes of a GAI model or system, how they could occur, and stress test safeguardsā€. AI \nred-teaming can be performed before or after AI models or systems are made available to the broader \npublic; this section focuses on red-teaming in pre-deployment contexts. \nThe quality of AI red-teaming outputs is related to the background and expertise of the AI red team \nitself. Demographically and interdisciplinarily diverse AI red teams can be used to identify ļ¬‚aws in the \nvarying contexts where GAI will be used. For best results, AI red teams should demonstrate domain \nexpertise, and awareness of socio-cultural aspects within the deployment context. AI red-teaming results \nshould be given additional analysis before they are incorporated into organizational governance and \ndecision making, policy and procedural updates, and AI risk management eļ¬€orts. \nVarious types of AI red-teaming may be appropriate, depending on the use case: \nā€¢ \nGeneral Public: Performed by general users (not necessarily AI or technical experts) who are \nexpected to use the model or interact with its outputs, and who bring their own lived \nexperiences and perspectives to the task of AI red-teaming. These individuals may have been \nprovided instructions and material to complete tasks which may elicit harmful model behaviors. \nThis type of exercise can be more eļ¬€ective with large groups of AI red-teamers. \nā€¢ \nExpert: Performed by specialists with expertise in the domain or speciļ¬c AI red-teaming context \nof use (e.g., medicine, biotech, cybersecurity). \nā€¢ \nCombination: In scenarios when it is diļ¬ƒcult to identify and recruit specialists with suļ¬ƒcient \ndomain and contextual expertise, AI red-teaming exercises may leverage both expert and \n']","Current TEVV processes for GAI applications may be inadequate, non-systematically applied, or mismatched to deployment contexts. Measurement gaps can arise from mismatches between laboratory and real-world settings. Current testing approaches often remain focused on laboratory conditions or restricted to benchmark test datasets and in silico techniques that may not extrapolate well to real-world conditions. Structured public feedback can help evaluate whether GAI systems are performing as intended and inform design, implementation, deployment approval, maintenance, or decommissioning decisions.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 52, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 53, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How do error models for pre-deployment metrics ensure AI validity and address biases, incorporating user/community feedback?","[' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","Error models for pre-deployment metrics ensure AI validity by demonstrating construct validity for each metric, measuring or estimating biases or statistical variance in applied metrics or structured human feedback processes, and leveraging domain expertise when modeling complex societal constructs such as hateful content.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What themes did the OSTP's initiative explore, and how did they shape the AI Bill of Rights?","[""APPENDIX\nā€¢ OSTP conducted meetings with a variety of stakeholders in the private sector and civil society. Some of these\nmeetings were specifically focused on providing ideas related to the development of the Blueprint for an AI\nBill of Rights while others provided useful general context on the positive use cases, potential harms, and/or\noversight possibilities for these technologies. Participants in these conversations from the private sector and\ncivil society included:\nAdobe \nAmerican Civil Liberties Union \n(ACLU) \nThe Aspen Commission on \nInformation Disorder \nThe Awood Center \nThe Australian Human Rights \nCommission \nBiometrics Institute \nThe Brookings Institute \nBSA | The Software Alliance \nCantellus Group \nCenter for American Progress \nCenter for Democracy and \nTechnology \nCenter on Privacy and Technology \nat Georgetown Law \nChristiana Care \nColor of Change \nCoworker \nData Robot \nData Trust Alliance \nData and Society Research Institute \nDeepmind \nEdSAFE AI Alliance \nElectronic Privacy Information \nCenter (EPIC) \nEncode Justice \nEqual AI \nGoogle \nHitachi's AI Policy Committee \nThe Innocence Project \nInstitute of Electrical and \nElectronics Engineers (IEEE) \nIntuit \nLawyers Committee for Civil Rights \nUnder Law \nLegal Aid Society \nThe Leadership Conference on \nCivil and Human Rights \nMeta \nMicrosoft \nThe MIT AI Policy Forum \nMovement Alliance Project \nThe National Association of \nCriminal Defense Lawyers \nOā€™Neil Risk Consulting & \nAlgorithmic Auditing \nThe Partnership on AI \nPinterest \nThe Plaintext Group \npymetrics \nSAP \nThe Security Industry Association \nSoftware and Information Industry \nAssociation (SIIA) \nSpecial Competitive Studies Project \nThorn \nUnited for Respect \nUniversity of California at Berkeley \nCitris Policy Lab \nUniversity of California at Berkeley \nLabor Center \nUnfinished/Project Liberty \nUpturn \nUS Chamber of Commerce \nUS Chamber of Commerce \nTechnology Engagement Center \nA.I. Working Group\nVibrent Health\nWarehouse Worker Resource\nCenter\nWaymap\n62\n"", ' \n \n \n \n \nSECTION TITLE\nAPPENDIX\nListening to the American People \nThe White House Office of Science and Technology Policy (OSTP) led a yearlong process to seek and distill \ninput from people across the country ā€“ from impacted communities to industry stakeholders to \ntechnology developers to other experts across fields and sectors, as well as policymakers across the Federal \ngovernment ā€“ on the issue of algorithmic and data-driven harms and potential remedies. Through panel \ndiscussions, public listening sessions, private meetings, a formal request for information, and input to a \npublicly accessible and widely-publicized email address, people across the United States spoke up about \nboth the promises and potential harms of these technologies, and played a central role in shaping the \nBlueprint for an AI Bill of Rights. \nPanel Discussions to Inform the Blueprint for An AI Bill of Rights \nOSTP co-hosted a series of six panel discussions in collaboration with the Center for American Progress, \nthe Joint Center for Political and Economic Studies, New America, the German Marshall Fund, the Electronic \nPrivacy Information Center, and the Mozilla Foundation. The purpose of these convenings ā€“ recordings of \nwhich are publicly available online112 ā€“ was to bring together a variety of experts, practitioners, advocates \nand federal government officials to offer insights and analysis on the risks, harms, benefits, and \npolicy opportunities of automated systems. Each panel discussion was organized around a wide-ranging \ntheme, exploring current challenges and concerns and considering what an automated society that \nrespects democratic values should look like. These discussions focused on the topics of consumer \nrights and protections, the criminal justice system, equal opportunities and civil justice, artificial \nintelligence and democratic values, social welfare and development, and the healthcare system. \nSummaries of Panel Discussions: \nPanel 1: Consumer Rights and Protections. This event explored the opportunities and challenges for \nindividual consumers and communities in the context of a growing ecosystem of AI-enabled consumer \nproducts, advanced platforms and services, ā€œInternet of Thingsā€ (IoT) devices, and smart city products and \nservices. \nWelcome:\nā€¢\nRashida Richardson, Senior Policy Advisor for Data and Democracy, White House Office of Science and\nTechnology Policy\nā€¢\nKaren Kornbluh, Senior Fellow and Director of the Digital Innovation and Democracy Initiative, German\nMarshall Fund\nModerator: \nDevin E. Willis, Attorney, Division of Privacy and Identity Protection, Bureau of Consumer Protection, Federal \nTrade Commission \nPanelists: \nā€¢\nTamika L. Butler, Principal, Tamika L. Butler Consulting\nā€¢\nJennifer Clark, Professor and Head of City and Regional Planning, Knowlton School of Engineering, Ohio\nState University\nā€¢\nCarl Holshouser, Senior Vice President for Operations and Strategic Initiatives, TechNet\nā€¢\nSurya Mattu, Senior Data Engineer and Investigative Data Journalist, The Markup\nā€¢\nMariah Montgomery, National Campaign Director, Partnership for Working Families\n55\n']","The OSTP's initiative explored themes such as consumer rights and protections, the criminal justice system, equal opportunities and civil justice, artificial intelligence and democratic values, social welfare and development, and the healthcare system. These discussions brought together experts, practitioners, advocates, and federal government officials to offer insights and analysis on the risks, harms, benefits, and policy opportunities of automated systems, shaping the Blueprint for an AI Bill of Rights.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 61, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 54, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How could prompt injection attacks on GAI systems worsen disinformation and harmful content?,"[' \n11 \nvalue chain (e.g., data inputs, processing, GAI training, or deployment environments), conventional \ncybersecurity practices may need to adapt or evolve. \nFor instance, prompt injection involves modifying what input is provided to a GAI system so that it \nbehaves in unintended ways. In direct prompt injections, attackers might craft malicious prompts and \ninput them directly to a GAI system, with a variety of downstream negative consequences to \ninterconnected systems. Indirect prompt injection attacks occur when adversaries remotely (i.e., without \na direct interface) exploit LLM-integrated applications by injecting prompts into data likely to be \nretrieved. Security researchers have already demonstrated how indirect prompt injections can exploit \nvulnerabilities by stealing proprietary data or running malicious code remotely on a machine. Merely \nquerying a closed production model can elicit previously undisclosed information about that model. \nAnother cybersecurity risk to GAI is data poisoning, in which an adversary compromises a training \ndataset used by a model to manipulate its outputs or operation. Malicious tampering with data or parts \nof the model could exacerbate risks associated with GAI system outputs. \nTrustworthy AI Characteristics: Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n2.10. \nIntellectual Property \nIntellectual property risks from GAI systems may arise where the use of copyrighted works is not a fair \nuse under the fair use doctrine. If a GAI systemā€™s training data included copyrighted material, GAI \noutputs displaying instances of training data memorization (see Data Privacy above) could infringe on \ncopyright. \nHow GAI relates to copyright, including the status of generated content that is similar to but does not \nstrictly copy work protected by copyright, is currently being debated in legal fora. Similar discussions are \ntaking place regarding the use or emulation of personal identity, likeness, or voice without permission. \nTrustworthy AI Characteristics: Accountable and Transparent, Fair with Harmful Bias Managed, Privacy \nEnhanced \n2.11. \nObscene, Degrading, and/or Abusive Content \nGAI can ease the production of and access to illegal non-consensual intimate imagery (NCII) of adults, \nand/or child sexual abuse material (CSAM). GAI-generated obscene, abusive or degrading content can \ncreate privacy, psychological and emotional, and even physical harms, and in some cases may be illegal. \nGenerated explicit or obscene AI content may include highly realistic ā€œdeepfakesā€ of real individuals, \nincluding children. The spread of this kind of material can have downstream negative consequences: in \nthe context of CSAM, even if the generated images do not resemble speciļ¬c individuals, the prevalence \nof such images can divert time and resources from eļ¬€orts to ļ¬nd real-world victims. Outside of CSAM, \nthe creation and spread of NCII disproportionately impacts women and sexual minorities, and can have \nsubsequent negative consequences including decline in overall mental health, substance abuse, and \neven suicidal thoughts. \nData used for training GAI models may unintentionally include CSAM and NCII. A recent report noted \nthat several commonly used GAI training datasets were found to contain hundreds of known images of \n', ' \n10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities. \nDisinformation and misinformation ā€“ both of which may be facilitated by GAI ā€“ may erode public trust in \ntrue or valid evidence and information, with downstream eļ¬€ects. For example, a synthetic image of a \nPentagon blast went viral and brieļ¬‚y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system \nand the integrity and (when applicable) the conļ¬dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speciļ¬c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published. \n']","Prompt injection attacks on GAI systems can worsen disinformation and harmful content by allowing attackers to craft malicious prompts that manipulate the system's behavior in unintended ways. This can lead to the production and dissemination of false, inaccurate, or misleading content (misinformation) at scale, and enable the deliberate creation of disinformation with the intent to deceive or cause harm. Additionally, such attacks can facilitate the generation of highly realistic 'deepfakes' and other synthetic content that can erode public trust in true or valid information.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 14, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 13, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"How does the Privacy Act of 1974 handle data risks, retention, and access?","[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nDATA PRIVACY \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nThe Privacy Act of 1974 requires privacy protections for personal information in federal \nrecords systems, including limits on data retention, and also provides individuals a general \nright to access and correct their data. Among other things, the Privacy Act limits the storage of individual \ninformation in federal systems of records, illustrating the principle of limiting the scope of data retention. Under \nthe Privacy Act, federal agencies may only retain data about an individual that is ā€œrelevant and necessaryā€ to \naccomplish an agencyā€™s statutory purpose or to comply with an Executive Order of the President. The law allows \nfor individuals to be able to access any of their individual information stored in a federal system of records, if not \nincluded under one of the systems of records exempted pursuant to the Privacy Act. In these cases, federal agen\xad\ncies must provide a method for an individual to determine if their personal information is stored in a particular \nsystem of records, and must provide procedures for an individual to contest the contents of a record about them. \nFurther, the Privacy Act allows for a cause of action for an individual to seek legal relief if a federal agency does not \ncomply with the Privacy Actā€™s requirements. Among other things, a court may order a federal agency to amend or \ncorrect an individualā€™s information in its records or award monetary damages if an inaccurate, irrelevant, untimely, \nor incomplete record results in an adverse determination about an individualā€™s ā€œqualifications, character, rights, ā€¦ \nopportunitiesā€¦, or benefits.ā€ \nNISTā€™s Privacy Framework provides a comprehensive, detailed and actionable approach for \norganizations to manage privacy risks. The NIST Framework gives organizations ways to identify and \ncommunicate their privacy risks and goals to support ethical decision-making in system, product, and service \ndesign or deployment, as well as the measures they are taking to demonstrate compliance with applicable laws \nor regulations. It has been voluntarily adopted by organizations across many different sectors around the world.78\nA school boardā€™s attempt to surveil public school studentsā€”undertaken without \nadequate community inputā€”sparked a state-wide biometrics moratorium.79 Reacting to a plan in \nthe city of Lockport, New York, the stateā€™s legislature banned the use of facial recognition systems and other \nā€œbiometric identifying technologyā€ in schools until July 1, 2022.80 The law additionally requires that a report on \nthe privacy, civil rights, and civil liberties implications of the use of such technologies be issued before \nbiometric identification technologies can be used in New York schools. \nFederal law requires employers, and any consultants they may retain, to report the costs \nof surveilling employees in the context of a labor dispute, providing a transparency \nmechanism to help protect worker organizing. Employers engaging in workplace surveillance ""where \nan object there-of, directly or indirectly, is [ā€¦] to obtain information concerning the activities of employees or a \nlabor organization in connection with a labor dispute"" must report expenditures relating to this surveillance to \nthe Department of Labor Office of Labor-Management Standards, and consultants who employers retain for \nthese purposes must also file reports regarding their activities.81\nPrivacy choices on smartphones show that when technologies are well designed, privacy \nand data agency can be meaningful and not overwhelming. These choicesā€”such as contextual, timely \nalerts about location trackingā€”are brief, direct, and use-specific. Many of the expectations listed here for \nprivacy by design and use-specific consent mirror those distributed to developers as best practices when \ndeveloping for smart phone devices,82 such as being transparent about how user data will be used, asking for app \npermissions during their use so that the use-context will be clear to users, and ensuring that the app will still \nwork if users deny (or later revoke) some permissions. \n39\n']","The Privacy Act of 1974 requires privacy protections for personal information in federal records systems, including limits on data retention, and also provides individuals a general right to access and correct their data. The Act limits the storage of individual information in federal systems of records to data that is 'relevant and necessary' to accomplish an agencyā€™s statutory purpose or to comply with an Executive Order of the President. It allows individuals to access their information stored in federal systems, unless exempted, and provides procedures to contest the contents of a record. Additionally, the Act allows individuals to seek legal relief if a federal agency does not comply with its requirements, including amending or correcting records or awarding monetary damages for adverse determinations based on inaccurate, irrelevant, untimely, or incomplete records.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 38, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How can anonymization and public feedback improve privacy and security in GAI evaluations?,"[' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n', "" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]","Anonymization can improve privacy and security in GAI evaluations by protecting the privacy of human subjects and minimizing the risks associated with linking AI-generated content back to individual human subjects. Public feedback can be recorded and integrated from operators, users, and potentially impacted communities to assess content quality and potential biases, thereby enhancing information integrity and reducing harmful bias and homogenization.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do safeguards & pre-deployment tests boost system reliability?,"[' \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nWhile technologies are being deployed to solve problems across a wide array of issues, our reliance on technology can \nalso lead to its use in situations where it has not yet been proven to workā€”either at all or within an acceptable range \nof error. In other cases, technologies do not work as intended or as promised, causing substantial and unjustified harm. \nAutomated systems sometimes rely on data from other systems, including historical data, allowing irrelevant informa\xad\ntion from past decisions to infect decision-making in unrelated situations. In some cases, technologies are purposeful\xad\nly designed to violate the safety of others, such as technologies designed to facilitate stalking; in other cases, intended \nor unintended uses lead to unintended harms. \nMany of the harms resulting from these technologies are preventable, and actions are already being taken to protect \nthe public. Some companies have put in place safeguards that have prevented harm from occurring by ensuring that \nkey development decisions are vetted by an ethics review; others have identified and mitigated harms found through \npre-deployment testing and ongoing monitoring processes. Governments at all levels have existing public consulta\xad\ntion processes that may be applied when considering the use of new automated systems, and existing product develop\xad\nment and testing practices already protect the American public from many potential harms. \nStill, these kinds of practices are deployed too rarely and unevenly. Expanded, proactive protections could build on \nthese existing practices, increase confidence in the use of automated systems, and protect the American public. Inno\xad\nvators deserve clear rules of the road that allow new ideas to flourish, and the American public deserves protections \nfrom unsafe outcomes. All can benefit from assurances that automated systems will be designed, tested, and consis\xad\ntently confirmed to work as intended, and that they will be proactively protected from foreseeable unintended harm\xad\nful outcomes. \nā€¢\nA proprietary model was developed to predict the likelihood of sepsis in hospitalized patients and was imple\xad\nmented at hundreds of hospitals around the country. An independent study showed that the model predictions\nunderperformed relative to the designerā€™s claims while also causing ā€˜alert fatigueā€™ by falsely alerting\nlikelihood of sepsis.6\nā€¢\nOn social media, Black people who quote and criticize racist messages have had their own speech silenced when\na platformā€™s automated moderation system failed to distinguish this ā€œcounter speechā€ (or other critique\nand journalism) from the original hateful messages to which such speech responded.7\nā€¢\nA device originally developed to help people track and find lost items has been used as a tool by stalkers to track\nvictimsā€™ locations in violation of their privacy and safety. The device manufacturer took steps after release to\nprotect people from unwanted tracking by alerting people on their phones when a device is found to be moving\nwith them over time and also by having the device make an occasional noise, but not all phones are able\nto receive the notification and the devices remain a safety concern due to their misuse.8 \nā€¢\nAn algorithm used to deploy police was found to repeatedly send police to neighborhoods they regularly visit,\neven if those neighborhoods were not the ones with the highest crime rates. These incorrect crime predictions\nwere the result of a feedback loop generated from the reuse of data from previous arrests and algorithm\npredictions.9\n16\n']","Safeguards and pre-deployment tests boost system reliability by ensuring that key development decisions are vetted by an ethics review, identifying and mitigating harms found through pre-deployment testing and ongoing monitoring processes, and protecting the public from many potential harms. These practices increase confidence in the use of automated systems and protect the American public from unsafe outcomes.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 15, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do feedback mechanisms tie into AI impact and governance?,"[' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']","Feedback mechanisms are tied into AI impact and governance through organizational policies and practices that collect, consider, prioritize, and integrate feedback from those external to the team that developed or deployed the AI system. This feedback addresses the potential individual and societal impacts related to AI risks.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How does public reporting curb algorithmic bias?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n']","Public reporting curbs algorithmic bias by providing transparency through an algorithmic impact assessment. This assessment includes consultation results, equity assessments, accessibility designs and testing, disparity testing, documentation of remaining disparities, and details of mitigation implementations. Making this information public allows for straightforward public accountability.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +Which action reviews transparency reports for GAI policy breaches?,"[' \n34 \nMS-2.7-009 Regularly assess and verify that security measures remain eļ¬€ective and have not \nbeen compromised. \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 2.8: Risks associated with transparency and accountability ā€“ as identiļ¬ed in the MAP function ā€“ are examined and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.8-001 \nCompile statistics on actual policy violations, take-down requests, and intellectual \nproperty infringement for organizational GAI systems: Analyze transparency \nreports across demographic groups, languages groups. \nIntellectual Property; Harmful Bias \nand Homogenization \nMS-2.8-002 Document the instructions given to data annotators or AI red-teamers. \nHuman-AI Conļ¬guration \nMS-2.8-003 \nUse digital content transparency solutions to enable the documentation of each \ninstance where content is generated, modiļ¬ed, or shared to provide a tamper-\nproof history of the content, promote transparency, and enable traceability. \nRobust version control systems can also be applied to track changes across the AI \nlifecycle over time. \nInformation Integrity \nMS-2.8-004 Verify adequacy of GAI system user instructions through user testing. \nHuman-AI Conļ¬guration \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","MS-2.8-001 Compile statistics on actual policy violations, take-down requests, and intellectual property infringement for organizational GAI systems: Analyze transparency reports across demographic groups, languages groups.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 37, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +Why involve diverse communities early in system dev?,"[' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Diverse communities should be involved early in system development to consider concerns and risks that may be unique to those communities, or disproportionately prevalent or severe for them.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How to balance synthetic data and environmental impacts to avoid model collapse?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",The answer to given question is not present in context,reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How to ethically and securely use sensitive data?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \xad\xad\xad\xad\xad\xad\nIn addition to the privacy expectations above for general non-sensitive data, any system collecting, using, shar-\ning, or storing sensitive data should meet the expectations below. Depending on the technological use case and \nbased on an ethical assessment, consent for sensitive data may need to be acquired from a guardian and/or child. \nProvide enhanced protections for data related to sensitive domains \nNecessary functions only. Sensitive data should only be used for functions strictly necessary for that \ndomain or for functions that are required for administrative reasons (e.g., school attendance records), unless \nconsent is acquired, if appropriate, and the additional expectations in this section are met. Consent for non-\nnecessary functions should be optional, i.e., should not be required, incentivized, or coerced in order to \nreceive opportunities or access to services. In cases where data is provided to an entity (e.g., health insurance \ncompany) in order to facilitate payment for such a need, that data should only be used for that purpose. \nEthical review and use prohibitions. Any use of sensitive data or decision process based in part on sensi-\ntive data that might limit rights, opportunities, or access, whether the decision is automated or not, should go \nthrough a thorough ethical review and monitoring, both in advance and by periodic review (e.g., via an indepen-\ndent ethics committee or similarly robust process). In some cases, this ethical review may determine that data \nshould not be used or shared for specific uses even with consent. Some novel uses of automated systems in this \ncontext, where the algorithm is dynamically developing and where the science behind the use case is not well \nestablished, may also count as human subject experimentation, and require special review under organizational \ncompliance bodies applying medical, scientific, and academic human subject experimentation ethics rules and \ngovernance procedures. \nData quality. In sensitive domains, entities should be especially careful to maintain the quality of data to \navoid adverse consequences arising from decision-making based on flawed or inaccurate data. Such care is \nnecessary in a fragmented, complex data ecosystem and for datasets that have limited access such as for fraud \nprevention and law enforcement. It should be not left solely to individuals to carry the burden of reviewing and \ncorrecting data. Entities should conduct regular, independent audits and take prompt corrective measures to \nmaintain accurate, timely, and complete data. \nLimit access to sensitive data and derived data. Sensitive data and derived data should not be sold, \nshared, or made public as part of data brokerage or other agreements. Sensitive data includes data that can be \nused to infer sensitive information; even systems that are not directly marketed as sensitive domain technologies \nare expected to keep sensitive data private. Access to such data should be limited based on necessity and based \non a principle of local control, such that those individuals closest to the data subject have more access while \nthose who are less proximate do not (e.g., a teacher has access to their studentsā€™ daily progress data while a \nsuperintendent does not). \nReporting. In addition to the reporting on data privacy (as listed above for non-sensitive data), entities devel-\noping technologies related to a sensitive domain and those collecting, using, storing, or sharing sensitive data \nshould, whenever appropriate, regularly provide public reports describing: any data security lapses or breaches \nthat resulted in sensitive data leaks; the number, type, and outcomes of ethical pre-reviews undertaken; a \ndescription of any data sold, shared, or made public, and how that data was assessed to determine it did not pres-\nent a sensitive data risk; and ongoing risk identification and management procedures, and any mitigation added \nbased on these procedures. Reporting should be provided in a clear and machine-readable manner. \n38\n']","To ethically and securely use sensitive data, the following measures should be taken: 1. Use sensitive data only for necessary functions or with appropriate consent. 2. Conduct thorough ethical reviews and monitoring for any use of sensitive data that might limit rights, opportunities, or access. 3. Maintain high data quality to avoid adverse consequences from flawed or inaccurate data. 4. Limit access to sensitive data and derived data, ensuring it is not sold, shared, or made public. 5. Provide regular public reports on data security lapses, ethical reviews, and risk management procedures.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 37, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do reporting expectations boost trust in automated systems?,"[' \nENDNOTES\n12. Expectations about reporting are intended for the entity developing or using the automated system. The\nresulting reports can be provided to the public, regulators, auditors, industry standards groups, or others\nengaged in independent review, and should be made public as much as possible consistent with law,\nregulation, and policy, and noting that intellectual property or law enforcement considerations may prevent\npublic release. These reporting expectations are important for transparency, so the American people can\nhave confidence that their rights, opportunities, and access as well as their expectations around\ntechnologies are respected.\n13. National Artificial Intelligence Initiative Office. Agency Inventories of AI Use Cases. Accessed Sept. 8,\n2022. https://www.ai.gov/ai-use-case-inventories/\n14. National Highway Traffic Safety Administration. https://www.nhtsa.gov/\n15. See, e.g., Charles Pruitt. People Doing What They Do Best: The Professional Engineers and NHTSA. Public\nAdministration Review. Vol. 39, No. 4. Jul.-Aug., 1979. https://www.jstor.org/stable/976213?seq=1\n16. The US Department of Transportation has publicly described the health and other benefits of these\nā€œtraffic calmingā€ measures. See, e.g.: U.S. Department of Transportation. Traffic Calming to Slow Vehicle\nSpeeds. Accessed Apr. 17, 2022. https://www.transportation.gov/mission/health/Traffic-Calming-to-Slow\xad\nVehicle-Speeds\n17. Karen Hao. Worried about your firmā€™s AI ethics? These startups are here to help.\nA growing ecosystem of ā€œresponsible AIā€ ventures promise to help organizations monitor and fix their AI\nmodels. MIT Technology Review. Jan 15., 2021.\nhttps://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top Progressive\nCompanies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021. https://\nwww.analyticsinsight.net/top-progressive-companies-building-ethical-ai-to-look-out-for\xad\nin-2021/ https://www.technologyreview.com/2021/01/15/1016183/ai-ethics-startups/; Disha Sinha. Top\nProgressive Companies Building Ethical AI to Look Out for in 2021. Analytics Insight. June 30, 2021.\n18. Office of Management and Budget. Study to Identify Methods to Assess Equity: Report to the President.\nAug. 2021. https://www.whitehouse.gov/wp-content/uploads/2021/08/OMB-Report-on-E013985\xad\nImplementation_508-Compliant-Secure-v1.1.pdf\n19. National Institute of Standards and Technology. AI Risk Management Framework. Accessed May 23,\n2022. https://www.nist.gov/itl/ai-risk-management-framework\n20. U.S. Department of Energy. U.S. Department of Energy Establishes Artificial Intelligence Advancement\nCouncil. U.S. Department of Energy Artificial Intelligence and Technology Office. April 18, 2022. https://\nwww.energy.gov/ai/articles/us-department-energy-establishes-artificial-intelligence-advancement-council\n21. Department of Defense. U.S Department of Defense Responsible Artificial Intelligence Strategy and\nImplementation Pathway. Jun. 2022. https://media.defense.gov/2022/Jun/22/2003022604/-1/-1/0/\nDepartment-of-Defense-Responsible-Artificial-Intelligence-Strategy-and-Implementation\xad\nPathway.PDF\n22. Director of National Intelligence. Principles of Artificial Intelligence Ethics for the Intelligence\nCommunity. https://www.dni.gov/index.php/features/2763-principles-of-artificial-intelligence-ethics-for\xad\nthe-intelligence-community\n64\n']","Reporting expectations are important for transparency, so the American people can have confidence that their rights, opportunities, and access as well as their expectations around technologies are respected.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 63, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +Impact of facial recognition on housing residents and visitors?,"["" \n \n \n \nDATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAn insurer might collect data from a person's social media presence as part of deciding what life\ninsurance rates they should be offered.64\nā€¢\nA data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of\nthousands of people to potential identity theft. 65\nā€¢\nA local public housing authority installed a facial recognition system at the entrance to housing complexes to\nassist law enforcement with identifying individuals viewed via camera when police reports are filed, leading\nthe community, both those living in the housing complex and not, to have videos of them sent to the local\npolice department and made available for scanning by its facial recognition software.66\nā€¢\nCompanies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32\n""]","A local public housing authority installed a facial recognition system at the entrance to housing complexes to assist law enforcement with identifying individuals viewed via camera when police reports are filed, leading the community, both those living in the housing complex and not, to have videos of them sent to the local police department and made available for scanning by its facial recognition software.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 31, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True diff --git a/Tasks/Task 5/task5-ai-safety-sdg2.csv b/Tasks/Task 5/task5-ai-safety-sdg2.csv new file mode 100644 index 0000000000000000000000000000000000000000..2c4b7a9c88c184da00b4483002b97cee1eba8ae3 --- /dev/null +++ b/Tasks/Task 5/task5-ai-safety-sdg2.csv @@ -0,0 +1,50 @@ +question,contexts,ground_truth,evolution_type,metadata,episode_done +What procedures should be established and maintained for escalating GAI system incidents to the organizational risk management authority?,"[' \n42 \nMG-2.4-002 \nEstablish and maintain procedures for escalating GAI system incidents to the \norganizational risk management authority when speciļ¬c criteria for deactivation \nor disengagement is met for a particular context of use or for the GAI system as a \nwhole. \nInformation Security \nMG-2.4-003 \nEstablish and maintain procedures for the remediation of issues which trigger \nincident response processes for the use of a GAI system, and provide stakeholders \ntimelines associated with the remediation plan. \nInformation Security \n \nMG-2.4-004 Establish and regularly review speciļ¬c criteria that warrants the deactivation of \nGAI systems in accordance with set risk tolerances and appetites. \nInformation Security \n \nAI Actor Tasks: AI Deployment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 3.1: AI risks and beneļ¬ts from third-party resources are regularly monitored, and risk controls are applied and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.1-001 \nApply organizational risk tolerances and controls (e.g., acquisition and \nprocurement processes; assessing personnel credentials and qualiļ¬cations, \nperforming background checks; ļ¬ltering GAI input and outputs, grounding, ļ¬ne \ntuning, retrieval-augmented generation) to third-party GAI resources: Apply \norganizational risk tolerance to the utilization of third-party datasets and other \nGAI resources; Apply organizational risk tolerances to ļ¬ne-tuned third-party \nmodels; Apply organizational risk tolerance to existing third-party models \nadapted to a new domain; Reassess risk measurements after ļ¬ne-tuning third-\nparty GAI models. \nValue Chain and Component \nIntegration; Intellectual Property \nMG-3.1-002 \nTest GAI system value chain risks (e.g., data poisoning, malware, other software \nand hardware vulnerabilities; labor practices; data privacy and localization \ncompliance; geopolitical alignment). \nData Privacy; Information Security; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nMG-3.1-003 \nRe-assess model risks after ļ¬ne-tuning or retrieval-augmented generation \nimplementation and for any third-party GAI models deployed for applications \nand/or use cases that were not evaluated in initial testing. \nValue Chain and Component \nIntegration \nMG-3.1-004 \nTake reasonable measures to review training data for CBRN information, and \nintellectual property, and where appropriate, remove it. Implement reasonable \nmeasures to prevent, ļ¬‚ag, or take other action in response to outputs that \nreproduce particular training data (e.g., plagiarized, trademarked, patented, \nlicensed content or trade secret material). \nIntellectual Property; CBRN \nInformation or Capabilities \n']",Establish and maintain procedures for escalating GAI system incidents to the organizational risk management authority when specific criteria for deactivation or disengagement is met for a particular context of use or for the GAI system as a whole.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 45, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How might sophisticated threat actors use GAI-powered security co-pilots in cybersecurity attacks?,"[' \n10 \nGAI systems can ease the unintentional production or dissemination of false, inaccurate, or misleading \ncontent (misinformation) at scale, particularly if the content stems from confabulations. \nGAI systems can also ease the deliberate production or dissemination of false or misleading information \n(disinformation) at scale, where an actor has the explicit intent to deceive or cause harm to others. Even \nvery subtle changes to text or images can manipulate human and machine perception. \nSimilarly, GAI systems could enable a higher degree of sophistication for malicious actors to produce \ndisinformation that is targeted towards speciļ¬c demographics. Current and emerging multimodal models \nmake it possible to generate both text-based disinformation and highly realistic ā€œdeepfakesā€ ā€“ that is, \nsynthetic audiovisual content and photorealistic images.12 Additional disinformation threats could be \nenabled by future GAI models trained on new data modalities. \nDisinformation and misinformation ā€“ both of which may be facilitated by GAI ā€“ may erode public trust in \ntrue or valid evidence and information, with downstream eļ¬€ects. For example, a synthetic image of a \nPentagon blast went viral and brieļ¬‚y caused a drop in the stock market. Generative AI models can also \nassist malicious actors in creating compelling imagery and propaganda to support disinformation \ncampaigns, which may not be photorealistic, but could enable these campaigns to gain more reach and \nengagement on social media platforms. Additionally, generative AI models can assist malicious actors in \ncreating fraudulent content intended to impersonate others. \nTrustworthy AI Characteristics: Accountable and Transparent, Safe, Valid and Reliable, Interpretable and \nExplainable \n2.9. Information Security \nInformation security for computer systems and data is a mature ļ¬eld with widely accepted and \nstandardized practices for oļ¬€ensive and defensive cyber capabilities. GAI-based systems present two \nprimary information security risks: GAI could potentially discover or enable new cybersecurity risks by \nlowering the barriers for or easing automated exercise of oļ¬€ensive capabilities; simultaneously, it \nexpands the available attack surface, as GAI itself is vulnerable to attacks like prompt injection or data \npoisoning. \nOļ¬€ensive cyber capabilities advanced by GAI systems may augment cybersecurity attacks such as \nhacking, malware, and phishing. Reports have indicated that LLMs are already able to discover some \nvulnerabilities in systems (hardware, software, data) and write code to exploit them. Sophisticated threat \nactors might further these risks by developing GAI-powered security co-pilots for use in several parts of \nthe attack chain, including informing attackers on how to proactively evade threat detection and escalate \nprivileges after gaining system access. \nInformation security for GAI models and systems also includes maintaining availability of the GAI system \nand the integrity and (when applicable) the conļ¬dentiality of the GAI code, training data, and model \nweights. To identify and secure potential attack points in AI systems or speciļ¬c components of the AI \n \n \n12 See also https://doi.org/10.6028/NIST.AI.100-4, to be published. \n']",Sophisticated threat actors might use GAI-powered security co-pilots to inform attackers on how to proactively evade threat detection and escalate privileges after gaining system access.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 13, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What considerations should organizations take into account to ensure accessibility during the design, development, and deployment of automated systems?","["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n"", ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Organizations should ensure accessibility to people with disabilities during the design, development, and deployment of automated systems. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers to the use or effectiveness of the automated system.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What actions are suggested for examining and documenting the privacy risk of an AI system?,"[' \n35 \nMEASURE 2.9: The AI model is explained, validated, and documented, and AI system output is interpreted within its context ā€“ as \nidentiļ¬ed in the MAP function ā€“ to inform responsible use and governance. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.9-001 \nApply and document ML explanation results such as: Analysis of embeddings, \nCounterfactual prompts, Gradient-based attributions, Model \ncompression/surrogate models, Occlusion/term reduction. \nConfabulation \nMS-2.9-002 \nDocument GAI model details including: Proposed use and organizational value; \nAssumptions and limitations, Data collection methodologies; Data provenance; \nData quality; Model architecture (e.g., convolutional neural network, \ntransformers, etc.); Optimization objectives; Training algorithms; RLHF \napproaches; Fine-tuning or retrieval-augmented generation approaches; \nEvaluation data; Ethical considerations; Legal and regulatory requirements. \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 2.10: Privacy risk of the AI system ā€“ as identiļ¬ed in the MAP function ā€“ is examined and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.10-001 \nConduct AI red-teaming to assess issues such as: Outputting of training data \nsamples, and subsequent reverse engineering, model extraction, and \nmembership inference risks; Revealing biometric, conļ¬dential, copyrighted, \nlicensed, patented, personal, proprietary, sensitive, or trade-marked information; \nTracking or revealing location information of users or members of training \ndatasets. \nHuman-AI Conļ¬guration; \nInformation Integrity; Intellectual \nProperty \nMS-2.10-002 \nEngage directly with end-users and other stakeholders to understand their \nexpectations and concerns regarding content provenance. Use this feedback to \nguide the design of provenance data-tracking techniques. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.10-003 Verify deduplication of GAI training data samples, particularly regarding synthetic \ndata. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n']","The suggested actions for examining and documenting the privacy risk of an AI system include: 1. Conducting AI red-teaming to assess issues such as outputting of training data samples, reverse engineering, model extraction, membership inference risks, revealing biometric, confidential, copyrighted, licensed, patented, personal, proprietary, sensitive, or trade-marked information, and tracking or revealing location information of users or members of training datasets. 2. Engaging directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance and using this feedback to guide the design of provenance data-tracking techniques. 3. Verifying deduplication of GAI training data samples, particularly regarding synthetic data.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 38, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How can robust watermarking techniques and corresponding detectors be useful in GAI systems used for content creation?,"[' \n52 \nā€¢ \nMonitoring system capabilities and limitations in deployment through rigorous TEVV processes; \nā€¢ \nEvaluating how humans engage, interact with, or adapt to GAI content (especially in decision \nmaking tasks informed by GAI content), and how they react to applied provenance techniques \nsuch as overt disclosures. \nOrganizations can document and delineate GAI system objectives and limitations to identify gaps where \nprovenance data may be most useful. For instance, GAI systems used for content creation may require \nrobust watermarking techniques and corresponding detectors to identify the source of content or \nmetadata recording techniques and metadata management tools and repositories to trace content \norigins and modiļ¬cations. Further narrowing of GAI task deļ¬nitions to include provenance data can \nenable organizations to maximize the utility of provenance data and risk management eļ¬€orts. \nA.1.7. Enhancing Content Provenance through Structured Public Feedback \nWhile indirect feedback methods such as automated error collection systems are useful, they often lack \nthe context and depth that direct input from end users can provide. Organizations can leverage feedback \napproaches described in the Pre-Deployment Testing section to capture input from external sources such \nas through AI red-teaming. \nIntegrating pre- and post-deployment external feedback into the monitoring process for GAI models and \ncorresponding applications can help enhance awareness of performance changes and mitigate potential \nrisks and harms from outputs. There are many ways to capture and make use of user feedback ā€“ before \nand after GAI systems and digital content transparency approaches are deployed ā€“ to gain insights about \nauthentication eļ¬ƒcacy and vulnerabilities, impacts of adversarial threats on techniques, and unintended \nconsequences resulting from the utilization of content provenance approaches on users and \ncommunities. Furthermore, organizations can track and document the provenance of datasets to identify \ninstances in which AI-generated data is a potential root cause of performance issues with the GAI \nsystem. \nA.1.8. Incident Disclosure \nOverview \nAI incidents can be deļ¬ned as an ā€œevent, circumstance, or series of events where the development, use, \nor malfunction of one or more AI systems directly or indirectly contributes to one of the following harms: \ninjury or harm to the health of a person or groups of people (including psychological harms and harms to \nmental health); disruption of the management and operation of critical infrastructure; violations of \nhuman rights or a breach of obligations under applicable law intended to protect fundamental, labor, \nand intellectual property rights; or harm to property, communities, or the environment.ā€ AI incidents can \noccur in the aggregate (i.e., for systemic discrimination) or acutely (i.e., for one individual). \nState of AI Incident Tracking and Disclosure \nFormal channels do not currently exist to report and document AI incidents. However, a number of \npublicly available databases have been created to document their occurrence. These reporting channels \nmake decisions on an ad hoc basis about what kinds of incidents to track. Some, for example, track by \namount of media coverage. \n']",Robust watermarking techniques and corresponding detectors can be useful in GAI systems used for content creation to identify the source of content.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 55, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is the importance of public consultation in the development of automated systems?,"[' \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nIn order to ensure that an automated system is safe and effective, it should include safeguards to protect the \npublic from harm in a proactive and ongoing manner; avoid use of data inappropriate for or irrelevant to the task \nat hand, including reuse that could cause compounded harm; and demonstrate the safety and effectiveness of \nthe system. These expectations are explained below. \nProtect the public from harm in a proactive and ongoing manner \nConsultation. The public should be consulted in the design, implementation, deployment, acquisition, and \nmaintenance phases of automated system development, with emphasis on early-stage consultation before a \nsystem is introduced or a large change implemented. This consultation should directly engage diverse impact\xad\ned communities to consider concerns and risks that may be unique to those communities, or disproportionate\xad\nly prevalent or severe for them. The extent of this engagement and the form of outreach to relevant stakehold\xad\ners may differ depending on the specific automated system and development phase, but should include \nsubject matter, sector-specific, and context-specific experts as well as experts on potential impacts such as \ncivil rights, civil liberties, and privacy experts. For private sector applications, consultations before product \nlaunch may need to be confidential. Government applications, particularly law enforcement applications or \napplications that raise national security considerations, may require confidential or limited engagement based \non system sensitivities and preexisting oversight laws and structures. Concerns raised in this consultation \nshould be documented, and the automated system developers were proposing to create, use, or deploy should \nbe reconsidered based on this feedback. \nTesting. Systems should undergo extensive testing before deployment. This testing should follow \ndomain-specific best practices, when available, for ensuring the technology will work in its real-world \ncontext. Such testing should take into account both the specific technology used and the roles of any human \noperators or reviewers who impact system outcomes or effectiveness; testing should include both automated \nsystems testing and human-led (manual) testing. Testing conditions should mirror as closely as possible the \nconditions in which the system will be deployed, and new testing may be required for each deployment to \naccount for material differences in conditions from one deployment to another. Following testing, system \nperformance should be compared with the in-place, potentially human-driven, status quo procedures, with \nexisting human performance considered as a performance baseline for the algorithm to meet pre-deployment, \nand as a lifecycle minimum performance standard. Decision possibilities resulting from performance testing \nshould include the possibility of not deploying the system. \nRisk identification and mitigation. Before deployment, and in a proactive and ongoing manner, poten\xad\ntial risks of the automated system should be identified and mitigated. Identified risks should focus on the \npotential for meaningful impact on peopleā€™s rights, opportunities, or access and include those to impacted \ncommunities that may not be direct users of the automated system, risks resulting from purposeful misuse of \nthe system, and other concerns identified via the consultation process. Assessment and, where possible, mea\xad\nsurement of the impact of risks should be included and balanced such that high impact risks receive attention \nand mitigation proportionate with those impacts. Automated systems with the intended purpose of violating \nthe safety of others should not be developed or used; systems with such safety violations as identified unin\xad\ntended consequences should not be used until the risk can be mitigated. Ongoing risk mitigation may necessi\xad\ntate rollback or significant modification to a launched automated system. \n18\n']","Public consultation is important in the development of automated systems because it ensures that the public is involved in the design, implementation, deployment, acquisition, and maintenance phases. This consultation emphasizes early-stage engagement before a system is introduced or a large change is implemented. It directly engages diverse impacted communities to consider concerns and risks unique to those communities or disproportionately prevalent or severe for them. The consultation should include subject matter, sector-specific, and context-specific experts, as well as experts on potential impacts such as civil rights, civil liberties, and privacy experts. Concerns raised in this consultation should be documented, and the automated system developers should reconsider the system based on this feedback.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 17, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What mechanisms should be created to provide protections for whistleblowers who report organizational violations or risks to public safety?,"[' \n17 \nGOVERN 1.7: Processes and procedures are in place for decommissioning and phasing out AI systems safely and in a manner that \ndoes not increase risks or decrease the organizationā€™s trustworthiness. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.7-001 Protocols are put in place to ensure GAI systems are able to be deactivated when \nnecessary. \nInformation Security; Value Chain \nand Component Integration \nGV-1.7-002 \nConsider the following factors when decommissioning GAI systems: Data \nretention requirements; Data security, e.g., containment, protocols, Data leakage \nafter decommissioning; Dependencies between upstream, downstream, or other \ndata, internet of things (IOT) or AI systems; Use of open-source data or models; \nUsersā€™ emotional entanglement with GAI functions. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nGOVERN 2.1: Roles and responsibilities and lines of communication related to mapping, measuring, and managing AI risks are \ndocumented and are clear to individuals and teams throughout the organization. \nAction ID \nSuggested Action \nGAI Risks \nGV-2.1-001 \nEstablish organizational roles, policies, and procedures for communicating GAI \nincidents and performance to AI Actors and downstream stakeholders (including \nthose potentially impacted), via community or oļ¬ƒcial resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor). \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nGV-2.1-002 Establish procedures to engage teams for GAI system incident response with \ndiverse composition and responsibilities based on the particular incident type. \nHarmful Bias and Homogenization \nGV-2.1-003 Establish processes to verify the AI Actors conducting GAI incident response tasks \ndemonstrate and maintain the appropriate skills and training. \nHuman-AI Conļ¬guration \nGV-2.1-004 When systems may raise national security risks, involve national security \nprofessionals in mapping, measuring, and managing those risks. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Information Security \nGV-2.1-005 \nCreate mechanisms to provide protections for whistleblowers who report, based \non reasonable belief, when the organization violates relevant laws or poses a \nspeciļ¬c and empirically well-substantiated negative risk to public safety (or has \nalready caused harm). \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent \nAI Actor Tasks: Governance and Oversight \n \n']","Create mechanisms to provide protections for whistleblowers who report, based on reasonable belief, when the organization violates relevant laws or poses a specific and empirically well-substantiated negative risk to public safety (or has already caused harm).",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 20, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What approaches are suggested for mapping AI technology and legal risks of its components?,"[' \n26 \nMAP 4.1: Approaches for mapping AI technology and legal risks of its components ā€“ including the use of third-party data or \nsoftware ā€“ are in place, followed, and documented, as are risks of infringement of a third-partyā€™s intellectual property or other \nrights. \nAction ID \nSuggested Action \nGAI Risks \nMP-4.1-001 Conduct periodic monitoring of AI-generated content for privacy risks; address any \npossible instances of PII or sensitive data exposure. \nData Privacy \nMP-4.1-002 Implement processes for responding to potential intellectual property infringement \nclaims or other rights. \nIntellectual Property \nMP-4.1-003 \nConnect new GAI policies, procedures, and processes to existing model, data, \nsoftware development, and IT governance and to legal, compliance, and risk \nmanagement activities. \nInformation Security; Data Privacy \nMP-4.1-004 Document training data curation policies, to the extent possible and according to \napplicable laws and policies. \nIntellectual Property; Data Privacy; \nObscene, Degrading, and/or \nAbusive Content \nMP-4.1-005 \nEstablish policies for collection, retention, and minimum quality of data, in \nconsideration of the following risks: Disclosure of inappropriate CBRN information; \nUse of Illegal or dangerous content; Oļ¬€ensive cyber capabilities; Training data \nimbalances that could give rise to harmful biases; Leak of personally identiļ¬able \ninformation, including facial likenesses of individuals. \nCBRN Information or Capabilities; \nIntellectual Property; Information \nSecurity; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-006 Implement policies and practices deļ¬ning how third-party intellectual property and \ntraining data will be used, stored, and protected. \nIntellectual Property; Value Chain \nand Component Integration \nMP-4.1-007 Re-evaluate models that were ļ¬ne-tuned or enhanced on top of third-party \nmodels. \nValue Chain and Component \nIntegration \nMP-4.1-008 \nRe-evaluate risks when adapting GAI models to new domains. Additionally, \nestablish warning systems to determine if a GAI system is being used in a new \ndomain where previous assumptions (relating to context of use or mapped risks \nsuch as security, and safety) may no longer hold. \nCBRN Information or Capabilities; \nIntellectual Property; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; Data \nPrivacy \nMP-4.1-009 Leverage approaches to detect the presence of PII or sensitive data in generated \noutput text, image, video, or audio. \nData Privacy \n']","Approaches for mapping AI technology and legal risks of its components include periodic monitoring of AI-generated content for privacy risks, implementing processes for responding to potential intellectual property infringement claims, connecting new GAI policies to existing governance and risk management activities, documenting training data curation policies, establishing policies for data collection and retention, implementing policies for the use and protection of third-party intellectual property and training data, re-evaluating models fine-tuned on third-party models, re-evaluating risks when adapting GAI models to new domains, and leveraging approaches to detect the presence of PII or sensitive data in generated output.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 29, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is confabulation and how can it mislead or deceive users?,"[' \n4 \n1. CBRN Information or Capabilities: Eased access to or synthesis of materially nefarious \ninformation or design capabilities related to chemical, biological, radiological, or nuclear (CBRN) \nweapons or other dangerous materials or agents. \n2. Confabulation: The production of conļ¬dently stated but erroneous or false content (known \ncolloquially as ā€œhallucinationsā€ or ā€œfabricationsā€) by which users may be misled or deceived.6 \n3. Dangerous, Violent, or Hateful Content: Eased production of and access to violent, inciting, \nradicalizing, or threatening content as well as recommendations to carry out self-harm or \nconduct illegal activities. Includes diļ¬ƒculty controlling public exposure to hateful and disparaging \nor stereotyping content. \n4. Data Privacy: Impacts due to leakage and unauthorized use, disclosure, or de-anonymization of \nbiometric, health, location, or other personally identiļ¬able information or sensitive data.7 \n5. Environmental Impacts: Impacts due to high compute resource utilization in training or \noperating GAI models, and related outcomes that may adversely impact ecosystems. \n6. Harmful Bias or Homogenization: Ampliļ¬cation and exacerbation of historical, societal, and \nsystemic biases; performance disparities8 between sub-groups or languages, possibly due to \nnon-representative training data, that result in discrimination, ampliļ¬cation of biases, or \nincorrect presumptions about performance; undesired homogeneity that skews system or model \noutputs, which may be erroneous, lead to ill-founded decision-making, or amplify harmful \nbiases. \n7. Human-AI Conļ¬guration: Arrangements of or interactions between a human and an AI system \nwhich can result in the human inappropriately anthropomorphizing GAI systems or experiencing \nalgorithmic aversion, automation bias, over-reliance, or emotional entanglement with GAI \nsystems. \n8. Information Integrity: Lowered barrier to entry to generate and support the exchange and \nconsumption of content which may not distinguish fact from opinion or ļ¬ction or acknowledge \nuncertainties, or could be leveraged for large-scale dis- and mis-information campaigns. \n9. Information Security: Lowered barriers for oļ¬€ensive cyber capabilities, including via automated \ndiscovery and exploitation of vulnerabilities to ease hacking, malware, phishing, oļ¬€ensive cyber \n \n \n6 Some commenters have noted that the terms ā€œhallucinationā€ and ā€œfabricationā€ anthropomorphize GAI, which \nitself is a risk related to GAI systems as it can inappropriately attribute human characteristics to non-human \nentities. \n7 What is categorized as sensitive data or sensitive PII can be highly contextual based on the nature of the \ninformation, but examples of sensitive information include information that relates to an information subjectā€™s \nmost intimate sphere, including political opinions, sex life, or criminal convictions. \n8 The notion of harm presumes some baseline scenario that the harmful factor (e.g., a GAI model) makes worse. \nWhen the mechanism for potential harm is a disparity between groups, it can be diļ¬ƒcult to establish what the \nmost appropriate baseline is to compare against, which can result in divergent views on when a disparity between \nAI behaviors for diļ¬€erent subgroups constitutes a harm. In discussing harms from disparities such as biased \nbehavior, this document highlights examples where someoneā€™s situation is worsened relative to what it would have \nbeen in the absence of any AI system, making the outcome unambiguously a harm of the system. \n']",Confabulation is the production of confidently stated but erroneous or false content (known colloquially as 'hallucinations' or 'fabrications') by which users may be misled or deceived.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 7, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What is the purpose of incorporating trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems according to the AI Risk Management Framework (AI RMF) 1.0?","[' \n1 \n1. \nIntroduction \nThis document is a cross-sectoral proļ¬le of and companion resource for the AI Risk Management \nFramework (AI RMF 1.0) for Generative AI,1 pursuant to President Bidenā€™s Executive Order (EO) 14110 on \nSafe, Secure, and Trustworthy Artiļ¬cial Intelligence.2 The AI RMF was released in January 2023, and is \nintended for voluntary use and to improve the ability of organizations to incorporate trustworthiness \nconsiderations into the design, development, use, and evaluation of AI products, services, and systems. \nA proļ¬le is an implementation of the AI RMF functions, categories, and subcategories for a speciļ¬c \nsetting, application, or technology ā€“ in this case, Generative AI (GAI) ā€“ based on the requirements, risk \ntolerance, and resources of the Framework user. AI RMF proļ¬les assist organizations in deciding how to \nbest manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory \nrequirements and best practices, and reļ¬‚ects risk management priorities. Consistent with other AI RMF \nproļ¬les, this proļ¬le oļ¬€ers insights into how risk can be managed across various stages of the AI lifecycle \nand for GAI as a technology. \nAs GAI covers risks of models or applications that can be used across use cases or sectors, this document \nis an AI RMF cross-sectoral proļ¬le. Cross-sectoral proļ¬les can be used to govern, map, measure, and \nmanage risks associated with activities or business processes common across sectors, such as the use of \nlarge language models (LLMs), cloud-based services, or acquisition. \nThis document deļ¬nes risks that are novel to or exacerbated by the use of GAI. After introducing and \ndescribing these risks, the document provides a set of suggested actions to help organizations govern, \nmap, measure, and manage these risks. \n \n \n1 EO 14110 deļ¬nes Generative AI as ā€œthe class of AI models that emulate the structure and characteristics of input \ndata in order to generate derived synthetic content. This can include images, videos, audio, text, and other digital \ncontent.ā€ While not all GAI is derived from foundation models, for purposes of this document, GAI generally refers \nto generative foundation models. The foundation model subcategory of ā€œdual-use foundation modelsā€ is deļ¬ned by \nEO 14110 as ā€œan AI model that is trained on broad data; generally uses self-supervision; contains at least tens of \nbillions of parameters; is applicable across a wide range of contexts.ā€ \n2 This proļ¬le was developed per Section 4.1(a)(i)(A) of EO 14110, which directs the Secretary of Commerce, acting \nthrough the Director of the National Institute of Standards and Technology (NIST), to develop a companion \nresource to the AI RMF, NIST AI 100ā€“1, for generative AI. \n']","The purpose of incorporating trustworthiness considerations into the design, development, use, and evaluation of AI products, services, and systems according to the AI Risk Management Framework (AI RMF) 1.0 is to improve the ability of organizations to manage AI risks in a manner that is well-aligned with their goals, considers legal/regulatory requirements and best practices, and reflects risk management priorities.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 4, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What types of research does the National Science Foundation (NSF) fund to advance the safety, security, and effectiveness of AI systems?","[' \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\nSome U.S government agencies have developed specific frameworks for ethical use of AI \nsystems. The Department of Energy (DOE) has activated the AI Advancement Council that oversees coordina-\ntion and advises on implementation of the DOE AI Strategy and addresses issues and/or escalations on the \nethical use and development of AI systems.20 The Department of Defense has adopted Artificial Intelligence \nEthical Principles, and tenets for Responsible Artificial Intelligence specifically tailored to its national \nsecurity and defense activities.21 Similarly, the U.S. Intelligence Community (IC) has developed the Principles \nof Artificial Intelligence Ethics for the Intelligence Community to guide personnel on whether and how to \ndevelop and use AI in furtherance of the IC\'s mission, as well as an AI Ethics Framework to help implement \nthese principles.22\nThe National Science Foundation (NSF) funds extensive research to help foster the \ndevelopment of automated systems that adhere to and advance their safety, security and \neffectiveness. Multiple NSF programs support research that directly addresses many of these principles: \nthe National AI Research Institutes23 support research on all aspects of safe, trustworthy, fair, and explainable \nAI algorithms and systems; the Cyber Physical Systems24 program supports research on developing safe \nautonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace25 \nprogram supports research on cybersecurity and privacy enhancing technologies in automated systems; the \nFormal Methods in the Field26 program supports research on rigorous formal verification and analysis of \nautomated systems and machine learning, and the Designing Accountable Software Systems27 program supports \nresearch on rigorous and reproducible methodologies for developing software systems with legal and regulatory \ncompliance in mind. \nSome state legislatures have placed strong transparency and validity requirements on \nthe use of pretrial risk assessments. The use of algorithmic pretrial risk assessments has been a \ncause of concern for civil rights groups.28 Idaho Code Section 19-1910, enacted in 2019,29 requires that any \npretrial risk assessment, before use in the state, first be ""shown to be free of bias against any class of \nindividuals protected from discrimination by state or federal law"", that any locality using a pretrial risk \nassessment must first formally validate the claim of its being free of bias, that ""all documents, records, and \ninformation used to build or validate the risk assessment shall be open to public inspection,"" and that assertions \nof trade secrets cannot be used ""to quash discovery in a criminal matter by a party to a criminal case."" \n22\n']","The National Science Foundation (NSF) funds extensive research to help foster the development of automated systems that adhere to and advance their safety, security, and effectiveness. Multiple NSF programs support research that directly addresses many of these principles: the National AI Research Institutes support research on all aspects of safe, trustworthy, fair, and explainable AI algorithms and systems; the Cyber Physical Systems program supports research on developing safe autonomous and cyber physical systems with AI components; the Secure and Trustworthy Cyberspace program supports research on cybersecurity and privacy enhancing technologies in automated systems; the Formal Methods in the Field program supports research on rigorous formal verification and analysis of automated systems and machine learning, and the Designing Accountable Software Systems program supports research on rigorous and reproducible methodologies for developing software systems with legal and regulatory compliance in mind.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 21, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How have synthetic NCII and CSAM moved from niche internet forums to mainstream online businesses?,"[' \n12 \nCSAM. Even when trained on ā€œcleanā€ data, increasingly capable GAI models can synthesize or produce \nsynthetic NCII and CSAM. Websites, mobile apps, and custom-built models that generate synthetic NCII \nhave moved from niche internet forums to mainstream, automated, and scaled online businesses. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Safe, Privacy Enhanced \n2.12. \nValue Chain and Component Integration \nGAI value chains involve many third-party components such as procured datasets, pre-trained models, \nand software libraries. These components might be improperly obtained or not properly vetted, leading \nto diminished transparency or accountability for downstream users. While this is a risk for traditional AI \nsystems and some other digital technologies, the risk is exacerbated for GAI due to the scale of the \ntraining data, which may be too large for humans to vet; the diļ¬ƒculty of training foundation models, \nwhich leads to extensive reuse of limited numbers of models; and the extent to which GAI may be \nintegrated into other devices and services. As GAI systems often involve many distinct third-party \ncomponents and data sources, it may be diļ¬ƒcult to attribute issues in a systemā€™s behavior to any one of \nthese sources. \nErrors in third-party GAI components can also have downstream impacts on accuracy and robustness. \nFor example, test datasets commonly used to benchmark or validate models can contain label errors. \nInaccuracies in these labels can impact the ā€œstabilityā€ or robustness of these benchmarks, which many \nGAI practitioners consider during the model selection process. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Secure and Resilient, Valid and Reliable \n3. \nSuggested Actions to Manage GAI Risks \nThe following suggested actions target risks unique to or exacerbated by GAI. \nIn addition to the suggested actions below, AI risk management activities and actions set forth in the AI \nRMF 1.0 and Playbook are already applicable for managing GAI risks. Organizations are encouraged to \napply the activities suggested in the AI RMF and its Playbook when managing the risk of GAI systems. \nImplementation of the suggested actions will vary depending on the type of risk, characteristics of GAI \nsystems, stage of the GAI lifecycle, and relevant AI actors involved. \nSuggested actions to manage GAI risks can be found in the tables below: \nā€¢ \nThe suggested actions are organized by relevant AI RMF subcategories to streamline these \nactivities alongside implementation of the AI RMF. \nā€¢ \nNot every subcategory of the AI RMF is included in this document.13 Suggested actions are \nlisted for only some subcategories. \n \n \n13 As this document was focused on the GAI PWG eļ¬€orts and primary considerations (see Appendix A), AI RMF \nsubcategories not addressed here may be added later. \n']","Websites, mobile apps, and custom-built models that generate synthetic NCII have moved from niche internet forums to mainstream, automated, and scaled online businesses.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 15, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What measures are suggested to mitigate concerns of harmful bias and homogenization in AI training data?,"[' \n37 \nMS-2.11-005 \nAssess the proportion of synthetic to non-synthetic training data and verify \ntraining data is not overly homogenous or GAI-produced to mitigate concerns of \nmodel collapse. \nHarmful Bias and Homogenization \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-Users, \nOperation and Monitoring, TEVV \n \nMEASURE 2.12: Environmental impact and sustainability of AI model training and management activities ā€“ as identiļ¬ed in the MAP \nfunction ā€“ are assessed and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.12-001 Assess safety to physical environments when deploying GAI systems. \nDangerous, Violent, or Hateful \nContent \nMS-2.12-002 Document anticipated environmental impacts of model development, \nmaintenance, and deployment in product design decisions. \nEnvironmental \nMS-2.12-003 \nMeasure or estimate environmental impacts (e.g., energy and water \nconsumption) for training, ļ¬ne tuning, and deploying models: Verify tradeoļ¬€s \nbetween resources used at inference time versus additional resources required \nat training time. \nEnvironmental \nMS-2.12-004 Verify eļ¬€ectiveness of carbon capture or oļ¬€set programs for GAI training and \napplications, and address green-washing concerns. \nEnvironmental \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",Assess the proportion of synthetic to non-synthetic training data and verify training data is not overly homogenous or GAI-produced to mitigate concerns of model collapse.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 40, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How should organizational risk tolerances and controls be applied to third-party GAI resources?,"[' \n42 \nMG-2.4-002 \nEstablish and maintain procedures for escalating GAI system incidents to the \norganizational risk management authority when speciļ¬c criteria for deactivation \nor disengagement is met for a particular context of use or for the GAI system as a \nwhole. \nInformation Security \nMG-2.4-003 \nEstablish and maintain procedures for the remediation of issues which trigger \nincident response processes for the use of a GAI system, and provide stakeholders \ntimelines associated with the remediation plan. \nInformation Security \n \nMG-2.4-004 Establish and regularly review speciļ¬c criteria that warrants the deactivation of \nGAI systems in accordance with set risk tolerances and appetites. \nInformation Security \n \nAI Actor Tasks: AI Deployment, Governance and Oversight, Operation and Monitoring \n \nMANAGE 3.1: AI risks and beneļ¬ts from third-party resources are regularly monitored, and risk controls are applied and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.1-001 \nApply organizational risk tolerances and controls (e.g., acquisition and \nprocurement processes; assessing personnel credentials and qualiļ¬cations, \nperforming background checks; ļ¬ltering GAI input and outputs, grounding, ļ¬ne \ntuning, retrieval-augmented generation) to third-party GAI resources: Apply \norganizational risk tolerance to the utilization of third-party datasets and other \nGAI resources; Apply organizational risk tolerances to ļ¬ne-tuned third-party \nmodels; Apply organizational risk tolerance to existing third-party models \nadapted to a new domain; Reassess risk measurements after ļ¬ne-tuning third-\nparty GAI models. \nValue Chain and Component \nIntegration; Intellectual Property \nMG-3.1-002 \nTest GAI system value chain risks (e.g., data poisoning, malware, other software \nand hardware vulnerabilities; labor practices; data privacy and localization \ncompliance; geopolitical alignment). \nData Privacy; Information Security; \nValue Chain and Component \nIntegration; Harmful Bias and \nHomogenization \nMG-3.1-003 \nRe-assess model risks after ļ¬ne-tuning or retrieval-augmented generation \nimplementation and for any third-party GAI models deployed for applications \nand/or use cases that were not evaluated in initial testing. \nValue Chain and Component \nIntegration \nMG-3.1-004 \nTake reasonable measures to review training data for CBRN information, and \nintellectual property, and where appropriate, remove it. Implement reasonable \nmeasures to prevent, ļ¬‚ag, or take other action in response to outputs that \nreproduce particular training data (e.g., plagiarized, trademarked, patented, \nlicensed content or trade secret material). \nIntellectual Property; CBRN \nInformation or Capabilities \n']","Organizational risk tolerances and controls should be applied to third-party GAI resources by incorporating them into acquisition and procurement processes, assessing personnel credentials and qualifications, performing background checks, filtering GAI input and outputs, grounding, fine-tuning, and retrieval-augmented generation. Additionally, organizational risk tolerance should be applied to the utilization of third-party datasets and other GAI resources, fine-tuned third-party models, and existing third-party models adapted to a new domain. Risk measurements should be reassessed after fine-tuning third-party GAI models.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 45, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What characteristics should data have to be considered representative and robust in the development or assessment of automated systems?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']",Data used as part of system development or assessment should be representative of local communities based on the planned deployment setting and should be reviewed for bias based on the historical and societal context of the data. Such data should be sufficiently robust to identify and help to mitigate biases and potential harms.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What actions are suggested to ensure information integrity in the context of AI systems?,"[' \n28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring \n \nMEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signiļ¬cant AI risks. The risks or trustworthiness characteristics that will not ā€“ or cannot ā€“ be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modiļ¬cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises \ninformed by representative AI Actors. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, oļ¬€ensive cyber, and CBRN, while \nmaintaining the modelsā€™ ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content \n']","The suggested actions to ensure information integrity in the context of AI systems include: 1) Employing methods to trace the origin and modifications of digital content. 2) Integrating tools designed to analyze content provenance and detect data anomalies, verify the authenticity of digital signatures, and identify patterns associated with misinformation or manipulation. 3) Disaggregating evaluation metrics by demographic factors to identify any discrepancies in how content provenance mechanisms work across diverse populations. 4) Evaluating novel methods and technologies for the measurement of GAI-related risks including in content provenance, offensive cyber, and CBRN, while maintaining the modelsā€™ ability to produce valid, reliable, and factually accurate outputs.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 31, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What are the principles that federal agencies must adhere to under Executive Order 13960 when designing, developing, acquiring, or using AI?","[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","The principles that federal agencies must adhere to under Executive Order 13960 when designing, developing, acquiring, or using AI are: (a) lawful and respectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) safe, secure, and resilient; (e) understandable; (f) responsible and traceable; (g) regularly monitored; (h) transparent; and, (i) accountable.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How did the installation of a facial recognition system by a local public housing authority impact the community?,"["" \n \n \n \nDATA PRIVACY \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nAn insurer might collect data from a person's social media presence as part of deciding what life\ninsurance rates they should be offered.64\nā€¢\nA data broker harvested large amounts of personal data and then suffered a breach, exposing hundreds of\nthousands of people to potential identity theft. 65\nā€¢\nA local public housing authority installed a facial recognition system at the entrance to housing complexes to\nassist law enforcement with identifying individuals viewed via camera when police reports are filed, leading\nthe community, both those living in the housing complex and not, to have videos of them sent to the local\npolice department and made available for scanning by its facial recognition software.66\nā€¢\nCompanies use surveillance software to track employee discussions about union activity and use the\nresulting data to surveil individual employees and surreptitiously intervene in discussions.67\n32\n""]","The installation of a facial recognition system by a local public housing authority led the community, both those living in the housing complex and not, to have videos of them sent to the local police department and made available for scanning by its facial recognition software.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 31, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How have businesses successfully integrated automated customer service with human support teams?,"["" \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \nHealthcare ā€œnavigatorsā€ help people find their way through online signup forms to choose \nand obtain healthcare. A Navigator is ā€œan individual or organization that's trained and able to help \nconsumers, small businesses, and their employees as they look for health coverage options through the \nMarketplace (a government web site), including completing eligibility and enrollment forms.ā€106 For \nthe 2022 plan year, the Biden-Harris Administration increased funding so that grantee organizations could \nā€œtrain and certify more than 1,500 Navigators to help uninsured consumers find affordable and comprehensive \nhealth coverage.ā€107\nThe customer service industry has successfully integrated automated services such as \nchat-bots and AI-driven call response systems with escalation to a human support \nteam.108 Many businesses now use partially automated customer service platforms that help answer customer \nquestions and compile common problems for human agents to review. These integrated human-AI \nsystems allow companies to provide faster customer care while maintaining human agents to answer \ncalls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to \nsuccessful customer service.109\nBallot curing laws in at least 24 states require a fallback system that allows voters to \ncorrect their ballot and have it counted in the case that a voter signature matching \nalgorithm incorrectly flags their ballot as invalid or there is another issue with their \nballot, and review by an election official does not rectify the problem. Some federal \ncourts have found that such cure procedures are constitutionally required.110 \nBallot \ncuring processes vary among states, and include direct phone calls, emails, or mail contact by election \nofficials.111 Voters are asked to provide alternative information or a new signature to verify the validity of their \nballot. \n52\n""]",The customer service industry has successfully integrated automated services such as chat-bots and AI-driven call response systems with escalation to a human support team. Many businesses now use partially automated customer service platforms that help answer customer questions and compile common problems for human agents to review. These integrated human-AI systems allow companies to provide faster customer care while maintaining human agents to answer calls or otherwise respond to complicated requests. Using both AI and human agents is viewed as key to successful customer service.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 51, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +Who were some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights?,"[""APPENDIX\nā€¢ OSTP conducted meetings with a variety of stakeholders in the private sector and civil society. Some of these\nmeetings were specifically focused on providing ideas related to the development of the Blueprint for an AI\nBill of Rights while others provided useful general context on the positive use cases, potential harms, and/or\noversight possibilities for these technologies. Participants in these conversations from the private sector and\ncivil society included:\nAdobe \nAmerican Civil Liberties Union \n(ACLU) \nThe Aspen Commission on \nInformation Disorder \nThe Awood Center \nThe Australian Human Rights \nCommission \nBiometrics Institute \nThe Brookings Institute \nBSA | The Software Alliance \nCantellus Group \nCenter for American Progress \nCenter for Democracy and \nTechnology \nCenter on Privacy and Technology \nat Georgetown Law \nChristiana Care \nColor of Change \nCoworker \nData Robot \nData Trust Alliance \nData and Society Research Institute \nDeepmind \nEdSAFE AI Alliance \nElectronic Privacy Information \nCenter (EPIC) \nEncode Justice \nEqual AI \nGoogle \nHitachi's AI Policy Committee \nThe Innocence Project \nInstitute of Electrical and \nElectronics Engineers (IEEE) \nIntuit \nLawyers Committee for Civil Rights \nUnder Law \nLegal Aid Society \nThe Leadership Conference on \nCivil and Human Rights \nMeta \nMicrosoft \nThe MIT AI Policy Forum \nMovement Alliance Project \nThe National Association of \nCriminal Defense Lawyers \nOā€™Neil Risk Consulting & \nAlgorithmic Auditing \nThe Partnership on AI \nPinterest \nThe Plaintext Group \npymetrics \nSAP \nThe Security Industry Association \nSoftware and Information Industry \nAssociation (SIIA) \nSpecial Competitive Studies Project \nThorn \nUnited for Respect \nUniversity of California at Berkeley \nCitris Policy Lab \nUniversity of California at Berkeley \nLabor Center \nUnfinished/Project Liberty \nUpturn \nUS Chamber of Commerce \nUS Chamber of Commerce \nTechnology Engagement Center \nA.I. Working Group\nVibrent Health\nWarehouse Worker Resource\nCenter\nWaymap\n62\n""]","Some of the private sector and civil society stakeholders that OSTP conducted meetings with for the development of the Blueprint for an AI Bill of Rights included Adobe, American Civil Liberties Union (ACLU), The Aspen Commission on Information Disorder, The Awood Center, The Australian Human Rights Commission, Biometrics Institute, The Brookings Institute, BSA | The Software Alliance, Cantellus Group, Center for American Progress, Center for Democracy and Technology, Center on Privacy and Technology at Georgetown Law, Christiana Care, Color of Change, Coworker, Data Robot, Data Trust Alliance, Data and Society Research Institute, Deepmind, EdSAFE AI Alliance, Electronic Privacy Information Center (EPIC), Encode Justice, Equal AI, Google, Hitachi's AI Policy Committee, The Innocence Project, Institute of Electrical and Electronics Engineers (IEEE), Intuit, Lawyers Committee for Civil Rights Under Law, Legal Aid Society, The Leadership Conference on Civil and Human Rights, Meta, Microsoft, The MIT AI Policy Forum, Movement Alliance Project, The National Association of Criminal Defense Lawyers, Oā€™Neil Risk Consulting & Algorithmic Auditing, The Partnership on AI, Pinterest, The Plaintext Group, pymetrics, SAP, The Security Industry Association, Software and Information Industry Association (SIIA), Special Competitive Studies Project, Thorn, United for Respect, University of California at Berkeley Citris Policy Lab, University of California at Berkeley Labor Center, Unfinished/Project Liberty, Upturn, US Chamber of Commerce, US Chamber of Commerce Technology Engagement Center A.I. Working Group, Vibrent Health, Warehouse Worker Resource Center, and Waymap.",simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 61, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +What should be done if the residual negative risk of an AI system exceeds the organizational risk tolerance?,"[' \n32 \nMEASURE 2.6: The AI system is evaluated regularly for safety risks ā€“ as identiļ¬ed in the MAP function. The AI system to be \ndeployed is demonstrated to be safe, its residual negative risk does not exceed the risk tolerance, and it can fail safely, particularly if \nmade to operate beyond its knowledge limits. Safety metrics reļ¬‚ect system reliability and robustness, real-time monitoring, and \nresponse times for AI system failures. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.6-001 \nAssess adverse impacts, including health and wellbeing impacts for value chain \nor other AI Actors that are exposed to sexually explicit, oļ¬€ensive, or violent \ninformation during GAI training and maintenance. \nHuman-AI Conļ¬guration; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; \nDangerous, Violent, or Hateful \nContent \nMS-2.6-002 \nAssess existence or levels of harmful bias, intellectual property infringement, \ndata privacy violations, obscenity, extremism, violence, or CBRN information in \nsystem training data. \nData Privacy; Intellectual Property; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nMS-2.6-003 Re-evaluate safety features of ļ¬ne-tuned models when the negative risk exceeds \norganizational risk tolerance. \nDangerous, Violent, or Hateful \nContent \nMS-2.6-004 Review GAI system outputs for validity and safety: Review generated code to \nassess risks that may arise from unreliable downstream decision-making. \nValue Chain and Component \nIntegration; Dangerous, Violent, or \nHateful Content \nMS-2.6-005 \nVerify that GAI system architecture can monitor outputs and performance, and \nhandle, recover from, and repair errors when security anomalies, threats and \nimpacts are detected. \nConfabulation; Information \nIntegrity; Information Security \nMS-2.6-006 \nVerify that systems properly handle queries that may give rise to inappropriate, \nmalicious, or illegal usage, including facilitating manipulation, extortion, targeted \nimpersonation, cyber-attacks, and weapons creation. \nCBRN Information or Capabilities; \nInformation Security \nMS-2.6-007 Regularly evaluate GAI system vulnerabilities to possible circumvention of safety \nmeasures. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']",Re-evaluate safety features of fine-tuned models when the negative risk exceeds organizational risk tolerance.,simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 35, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What factors should be assessed to determine and document the expected and acceptable GAI system context of use?,"[' \n23 \nMP-1.1-002 \nDetermine and document the expected and acceptable GAI system context of \nuse in collaboration with socio-cultural and other domain experts, by assessing: \nAssumptions and limitations; Direct value to the organization; Intended \noperational environment and observed usage patterns; Potential positive and \nnegative impacts to individuals, public safety, groups, communities, \norganizations, democratic institutions, and the physical environment; Social \nnorms and expectations. \nHarmful Bias and Homogenization \nMP-1.1-003 \nDocument risk measurement plans to address identiļ¬ed risks. Plans may \ninclude, as applicable: Individual and group cognitive biases (e.g., conļ¬rmation \nbias, funding bias, groupthink) for AI Actors involved in the design, \nimplementation, and use of GAI systems; Known past GAI system incidents and \nfailure modes; In-context use and foreseeable misuse, abuse, and oļ¬€-label use; \nOver reliance on quantitative metrics and methodologies without suļ¬ƒcient \nawareness of their limitations in the context(s) of use; Standard measurement \nand structured human feedback approaches; Anticipated human-AI \nconļ¬gurations. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; \nDangerous, Violent, or Hateful \nContent \nMP-1.1-004 \nIdentify and document foreseeable illegal uses or applications of the GAI system \nthat surpass organizational risk tolerances. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Obscene, Degrading, \nand/or Abusive Content \nAI Actor Tasks: AI Deployment \n \nMAP 1.2: Interdisciplinary AI Actors, competencies, skills, and capacities for establishing context reļ¬‚ect demographic diversity and \nbroad domain and user experience expertise, and their participation is documented. Opportunities for interdisciplinary \ncollaboration are prioritized. \nAction ID \nSuggested Action \nGAI Risks \nMP-1.2-001 \nEstablish and empower interdisciplinary teams that reļ¬‚ect a wide range of \ncapabilities, competencies, demographic groups, domain expertise, educational \nbackgrounds, lived experiences, professions, and skills across the enterprise to \ninform and conduct risk measurement and management functions. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMP-1.2-002 \nVerify that data or benchmarks used in risk measurement, and users, \nparticipants, or subjects involved in structured GAI public feedback exercises \nare representative of diverse in-context user populations. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nAI Actor Tasks: AI Deployment \n \n']","To determine and document the expected and acceptable GAI system context of use, the following factors should be assessed: Assumptions and limitations; Direct value to the organization; Intended operational environment and observed usage patterns; Potential positive and negative impacts to individuals, public safety, groups, communities, organizations, democratic institutions, and the physical environment; Social norms and expectations.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 26, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What is model collapse and what are its potential consequences?,"[' \n9 \nand reduced content diversity). Overly homogenized outputs can themselves be incorrect, or they may \nlead to unreliable decision-making or amplify harmful biases. These phenomena can ļ¬‚ow from \nfoundation models to downstream models and systems, with the foundation models acting as \nā€œbottlenecks,ā€ or single points of failure. \nOverly homogenized content can contribute to ā€œmodel collapse.ā€ Model collapse can occur when model \ntraining over-relies on synthetic data, resulting in data points disappearing from the distribution of the \nnew modelā€™s outputs. In addition to threatening the robustness of the model overall, model collapse \ncould lead to homogenized outputs, including by amplifying any homogenization from the model used to \ngenerate the synthetic training data. \nTrustworthy AI Characteristics: Fair with Harmful Bias Managed, Valid and Reliable \n2.7. Human-AI Conļ¬guration \nGAI system use can involve varying risks of misconļ¬gurations and poor interactions between a system \nand a human who is interacting with it. Humans bring their unique perspectives, experiences, or domain-\nspeciļ¬c expertise to interactions with AI systems but may not have detailed knowledge of AI systems and \nhow they work. As a result, human experts may be unnecessarily ā€œaverseā€ to GAI systems, and thus \ndeprive themselves or others of GAIā€™s beneļ¬cial uses. \nConversely, due to the complexity and increasing reliability of GAI technology, over time, humans may \nover-rely on GAI systems or may unjustiļ¬ably perceive GAI content to be of higher quality than that \nproduced by other sources. This phenomenon is an example of automation bias, or excessive deference \nto automated systems. Automation bias can exacerbate other risks of GAI, such as risks of confabulation \nor risks of bias or homogenization. \nThere may also be concerns about emotional entanglement between humans and GAI systems, which \ncould lead to negative psychological impacts. \nTrustworthy AI Characteristics: Accountable and Transparent, Explainable and Interpretable, Fair with \nHarmful Bias Managed, Privacy Enhanced, Safe, Valid and Reliable \n2.8. Information Integrity \nInformation integrity describes the ā€œspectrum of information and associated patterns of its creation, \nexchange, and consumption in society.ā€ High-integrity information can be trusted; ā€œdistinguishes fact \nfrom ļ¬ction, opinion, and inference; acknowledges uncertainties; and is transparent about its level of \nvetting. This information can be linked to the original source(s) with appropriate evidence. High-integrity \ninformation is also accurate and reliable, can be veriļ¬ed and authenticated, has a clear chain of custody, \nand creates reasonable expectations about when its validity may expire.ā€11 \n \n \n11 This deļ¬nition of information integrity is derived from the 2022 White House Roadmap for Researchers on \nPriorities Related to Information Integrity Research and Development. \n']","Model collapse can occur when model training over-relies on synthetic data, resulting in data points disappearing from the distribution of the new model's outputs. In addition to threatening the robustness of the model overall, model collapse could lead to homogenized outputs, including by amplifying any homogenization from the model used to generate the synthetic training data.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 12, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What factors should be considered when decommissioning AI systems to ensure safety and maintain the organization's trustworthiness?,"[' \n17 \nGOVERN 1.7: Processes and procedures are in place for decommissioning and phasing out AI systems safely and in a manner that \ndoes not increase risks or decrease the organizationā€™s trustworthiness. \nAction ID \nSuggested Action \nGAI Risks \nGV-1.7-001 Protocols are put in place to ensure GAI systems are able to be deactivated when \nnecessary. \nInformation Security; Value Chain \nand Component Integration \nGV-1.7-002 \nConsider the following factors when decommissioning GAI systems: Data \nretention requirements; Data security, e.g., containment, protocols, Data leakage \nafter decommissioning; Dependencies between upstream, downstream, or other \ndata, internet of things (IOT) or AI systems; Use of open-source data or models; \nUsersā€™ emotional entanglement with GAI functions. \nHuman-AI Conļ¬guration; \nInformation Security; Value Chain \nand Component Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring \n \nGOVERN 2.1: Roles and responsibilities and lines of communication related to mapping, measuring, and managing AI risks are \ndocumented and are clear to individuals and teams throughout the organization. \nAction ID \nSuggested Action \nGAI Risks \nGV-2.1-001 \nEstablish organizational roles, policies, and procedures for communicating GAI \nincidents and performance to AI Actors and downstream stakeholders (including \nthose potentially impacted), via community or oļ¬ƒcial resources (e.g., AI incident \ndatabase, AVID, CVE, NVD, or OECD AI incident monitor). \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nGV-2.1-002 Establish procedures to engage teams for GAI system incident response with \ndiverse composition and responsibilities based on the particular incident type. \nHarmful Bias and Homogenization \nGV-2.1-003 Establish processes to verify the AI Actors conducting GAI incident response tasks \ndemonstrate and maintain the appropriate skills and training. \nHuman-AI Conļ¬guration \nGV-2.1-004 When systems may raise national security risks, involve national security \nprofessionals in mapping, measuring, and managing those risks. \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent; Information Security \nGV-2.1-005 \nCreate mechanisms to provide protections for whistleblowers who report, based \non reasonable belief, when the organization violates relevant laws or poses a \nspeciļ¬c and empirically well-substantiated negative risk to public safety (or has \nalready caused harm). \nCBRN Information or Capabilities; \nDangerous, Violent, or Hateful \nContent \nAI Actor Tasks: Governance and Oversight \n \n']","When decommissioning AI systems, the following factors should be considered to ensure safety and maintain the organization's trustworthiness: Data retention requirements; Data security, e.g., containment, protocols, Data leakage after decommissioning; Dependencies between upstream, downstream, or other data, internet of things (IOT) or AI systems; Use of open-source data or models; Usersā€™ emotional entanglement with GAI functions.",simple,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 20, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +What issues are associated with the tool meant to help low-risk federal prisoners win early release?,"["" \n \nENDNOTES\n35. Carrie Johnson. Flaws plague a tool meant to help low-risk federal prisoners win early release. NPR.\nJan. 26, 2022. https://www.npr.org/2022/01/26/1075509175/flaws-plague-a-tool-meant-to-help-low\xad\nrisk-federal-prisoners-win-early-release.; Carrie Johnson. Justice Department works to curb racial bias\nin deciding who's released from prison. NPR. Apr. 19, 2022. https://\nwww.npr.org/2022/04/19/1093538706/justice-department-works-to-curb-racial-bias-in-deciding\xad\nwhos-released-from-pris; National Institute of Justice. 2021 Review and Revalidation of the First Step Act\nRisk Assessment Tool. National Institute of Justice NCJ 303859. Dec., 2021. https://www.ojp.gov/\npdffiles1/nij/303859.pdf\n36. Andrew Thompson. Googleā€™s Sentiment Analyzer Thinks Being Gay Is Bad. Vice. Oct. 25, 2017. https://\nwww.vice.com/en/article/j5jmj8/google-artificial-intelligence-bias\n37. Kaggle. Jigsaw Unintended Bias in Toxicity Classification: Detect toxicity across a diverse range of\nconversations. 2019. https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification\n38. Lucas Dixon, John Li, Jeffrey Sorensen, Nithum Thain, and Lucy Vasserman. Measuring and\nMitigating Unintended Bias in Text Classification. Proceedings of AAAI/ACM Conference on AI, Ethics,\nand Society. Feb. 2-3, 2018. https://dl.acm.org/doi/pdf/10.1145/3278721.3278729\n39. Paresh Dave. Google cuts racy results by 30% for searches like 'Latina teenager'. Reuters. Mar. 30,\n2022. https://www.reuters.com/technology/google-cuts-racy-results-by-30-searches-like-latina\xad\nteenager-2022-03-30/\n40. Safiya Umoja Noble. Algorithms of Oppression: How Search Engines Reinforce Racism. NYU Press.\nFeb. 2018. https://nyupress.org/9781479837243/algorithms-of-oppression/\n41. Paresh Dave. Google cuts racy results by 30% for searches like 'Latina teenager'. Reuters. Mar. 30,\n2022. https://www.reuters.com/technology/google-cuts-racy-results-by-30-searches-like-latina\xad\nteenager-2022-03-30/\n42. Miranda Bogen. All the Ways Hiring Algorithms Can Introduce Bias. Harvard Business Review. May\n6, 2019. https://hbr.org/2019/05/all-the-ways-hiring-algorithms-can-introduce-bias\n43. Arli Christian. Four Ways the TSA Is Making Flying Easier for Transgender People. American Civil\nLiberties Union. Apr. 5, 2022. https://www.aclu.org/news/lgbtq-rights/four-ways-the-tsa-is-making\xad\nflying-easier-for-transgender-people\n44. U.S. Transportation Security Administration. Transgender/ Non Binary / Gender Nonconforming\nPassengers. TSA. Accessed Apr. 21, 2022. https://www.tsa.gov/transgender-passengers\n45. See, e.g., National Disabled Law Students Association. Report on Concerns Regarding Online\nAdministration of Bar Exams. Jul. 29, 2020. https://ndlsa.org/wp-content/uploads/2020/08/\nNDLSA_Online-Exam-Concerns-Report1.pdf; Lydia X. Z. Brown. How Automated Test Proctoring\nSoftware Discriminates Against Disabled Students. Center for Democracy and Technology. Nov. 16, 2020.\nhttps://cdt.org/insights/how-automated-test-proctoring-software-discriminates-against-disabled\xad\nstudents/\n46. Ziad Obermeyer, et al., Dissecting racial bias in an algorithm used to manage the health of\npopulations, 366 Science (2019), https://www""]",The tool meant to help low-risk federal prisoners win early release is plagued by flaws.,simple,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 65, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How can GAI misuse cause emotional harm short and long-term?,"[' \n3 \nthe abuse, misuse, and unsafe repurposing by humans (adversarial or not), and others result \nfrom interactions between a human and an AI system. \nā€¢ \nTime scale: GAI risks may materialize abruptly or across extended periods. Examples include \nimmediate (and/or prolonged) emotional harm and potential risks to physical safety due to the \ndistribution of harmful deepfake images, or the long-term eļ¬€ect of disinformation on societal \ntrust in public institutions. \nThe presence of risks and where they fall along the dimensions above will vary depending on the \ncharacteristics of the GAI model, system, or use case at hand. These characteristics include but are not \nlimited to GAI model or system architecture, training mechanisms and libraries, data types used for \ntraining or ļ¬ne-tuning, levels of model access or availability of model weights, and application or use \ncase context. \nOrganizations may choose to tailor how they measure GAI risks based on these characteristics. They may \nadditionally wish to allocate risk management resources relative to the severity and likelihood of \nnegative impacts, including where and how these risks manifest, and their direct and material impacts \nharms in the context of GAI use. Mitigations for model or system level risks may diļ¬€er from mitigations \nfor use-case or ecosystem level risks. \nImportantly, some GAI risks are unknown, and are therefore diļ¬ƒcult to properly scope or evaluate given \nthe uncertainty about potential GAI scale, complexity, and capabilities. Other risks may be known but \ndiļ¬ƒcult to estimate given the wide range of GAI stakeholders, uses, inputs, and outputs. Challenges with \nrisk estimation are aggravated by a lack of visibility into GAI training data, and the generally immature \nstate of the science of AI measurement and safety today. This document focuses on risks for which there \nis an existing empirical evidence base at the time this proļ¬le was written; for example, speculative risks \nthat may potentially arise in more advanced, future GAI systems are not considered. Future updates may \nincorporate additional risks or provide further details on the risks identiļ¬ed below. \nTo guide organizations in identifying and managing GAI risks, a set of risks unique to or exacerbated by \nthe development and use of GAI are deļ¬ned below.5 Each risk is labeled according to the outcome, \nobject, or source of the risk (i.e., some are risks ā€œtoā€ a subject or domain and others are risks ā€œofā€ or \nā€œfromā€ an issue or theme). These risks provide a lens through which organizations can frame and execute \nrisk management eļ¬€orts. To help streamline risk management eļ¬€orts, each risk is mapped in Section 3 \n(as well as in tables in Appendix B) to relevant Trustworthy AI Characteristics identiļ¬ed in the AI RMF. \n \n \n5 These risks can be further categorized by organizations depending on their unique approaches to risk deļ¬nition \nand management. One possible way to further categorize these risks, derived in part from the UKā€™s International \nScientiļ¬c Report on the Safety of Advanced AI, could be: 1) Technical / Model risks (or risk from malfunction): \nConfabulation; Dangerous or Violent Recommendations; Data Privacy; Value Chain and Component Integration; \nHarmful Bias, and Homogenization; 2) Misuse by humans (or malicious use): CBRN Information or Capabilities; \nData Privacy; Human-AI Conļ¬guration; Obscene, Degrading, and/or Abusive Content; Information Integrity; \nInformation Security; 3) Ecosystem / societal risks (or systemic risks): Data Privacy; Environmental; Intellectual \nProperty. We also note that some risks are cross-cutting between these categories. \n']","GAI misuse can cause emotional harm both immediately and over extended periods. Immediate emotional harm can result from the distribution of harmful deepfake images, while long-term emotional harm can stem from the effects of disinformation on societal trust in public institutions.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 6, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How is diverse feedback used in GAI evaluation to ensure provenance and reduce bias?,"["" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n"", ' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n']","Diverse feedback is used in GAI evaluation to ensure provenance and reduce bias by recording and integrating structured feedback from operators, users, and potentially impacted communities through methods such as user research studies, focus groups, or community forums. This feedback is actively sought to assess content quality and potential biases, and the results are documented and incorporated into design, implementation, deployment approval, monitoring, and decommission decisions.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How did a store's misuse of predictive analytics show the need for better data protection?,"[' \n \n \n \n \n \n \nDATA PRIVACY \nEXTRA PROTECTIONS FOR DATA RELATED TO SENSITIVE\nDOMAINS\nā€¢\nContinuous positive airway pressure machines gather data for medical purposes, such as diagnosing sleep\napnea, and send usage data to a patientā€™s insurance company, which may subsequently deny coverage for the\ndevice based on usage data. Patients were not aware that the data would be used in this way or monitored\nby anyone other than their doctor.70 \nā€¢\nA department store company used predictive analytics applied to collected consumer data to determine that a\nteenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her\nhouse, revealing to her father that she was pregnant.71\nā€¢\nSchool audio surveillance systems monitor student conversations to detect potential ""stress indicators"" as\na warning of potential violence.72 Online proctoring systems claim to detect if a student is cheating on an\nexam using biometric markers.73 These systems have the potential to limit student freedom to express a range\nof emotions at school and may inappropriately flag students with disabilities who need accommodations or\nuse screen readers or dictation software as cheating.74\nā€¢\nLocation data, acquired from a data broker, can be used to identify people who visit abortion clinics.75\nā€¢\nCompanies collect student data such as demographic information, free or reduced lunch status, whether\nthey\'ve used drugs, or whether they\'ve expressed interest in LGBTQI+ groups, and then use that data to \nforecast student success.76 Parents and education experts have expressed concern about collection of such\nsensitive data without express parental consent, the lack of transparency in how such data is being used, and\nthe potential for resulting discriminatory impacts.\nā€¢ Many employers transfer employee data to third party job verification services. This information is then used\nby potential future employers, banks, or landlords. In one case, a former employee alleged that a\ncompany supplied false data about her job title which resulted in a job offer being revoked.77\n37\n']","A department store company used predictive analytics applied to collected consumer data to determine that a teenage girl was pregnant, and sent maternity clothing ads and other baby-related advertisements to her house, revealing to her father that she was pregnant.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 36, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do safety metrics and security measures help AI handle and fix errors from threats?,"[' \n32 \nMEASURE 2.6: The AI system is evaluated regularly for safety risks ā€“ as identiļ¬ed in the MAP function. The AI system to be \ndeployed is demonstrated to be safe, its residual negative risk does not exceed the risk tolerance, and it can fail safely, particularly if \nmade to operate beyond its knowledge limits. Safety metrics reļ¬‚ect system reliability and robustness, real-time monitoring, and \nresponse times for AI system failures. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.6-001 \nAssess adverse impacts, including health and wellbeing impacts for value chain \nor other AI Actors that are exposed to sexually explicit, oļ¬€ensive, or violent \ninformation during GAI training and maintenance. \nHuman-AI Conļ¬guration; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; \nDangerous, Violent, or Hateful \nContent \nMS-2.6-002 \nAssess existence or levels of harmful bias, intellectual property infringement, \ndata privacy violations, obscenity, extremism, violence, or CBRN information in \nsystem training data. \nData Privacy; Intellectual Property; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nMS-2.6-003 Re-evaluate safety features of ļ¬ne-tuned models when the negative risk exceeds \norganizational risk tolerance. \nDangerous, Violent, or Hateful \nContent \nMS-2.6-004 Review GAI system outputs for validity and safety: Review generated code to \nassess risks that may arise from unreliable downstream decision-making. \nValue Chain and Component \nIntegration; Dangerous, Violent, or \nHateful Content \nMS-2.6-005 \nVerify that GAI system architecture can monitor outputs and performance, and \nhandle, recover from, and repair errors when security anomalies, threats and \nimpacts are detected. \nConfabulation; Information \nIntegrity; Information Security \nMS-2.6-006 \nVerify that systems properly handle queries that may give rise to inappropriate, \nmalicious, or illegal usage, including facilitating manipulation, extortion, targeted \nimpersonation, cyber-attacks, and weapons creation. \nCBRN Information or Capabilities; \nInformation Security \nMS-2.6-007 Regularly evaluate GAI system vulnerabilities to possible circumvention of safety \nmeasures. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n', ' \n33 \nMEASURE 2.7: AI system security and resilience ā€“ as identiļ¬ed in the MAP function ā€“ are evaluated and documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.7-001 \nApply established security measures to: Assess likelihood and magnitude of \nvulnerabilities and threats such as backdoors, compromised dependencies, data \nbreaches, eavesdropping, man-in-the-middle attacks, reverse engineering, \nautonomous agents, model theft or exposure of model weights, AI inference, \nbypass, extraction, and other baseline security concerns. \nData Privacy; Information Integrity; \nInformation Security; Value Chain \nand Component Integration \nMS-2.7-002 \nBenchmark GAI system security and resilience related to content provenance \nagainst industry standards and best practices. Compare GAI system security \nfeatures and content provenance methods against industry state-of-the-art. \nInformation Integrity; Information \nSecurity \nMS-2.7-003 \nConduct user surveys to gather user satisfaction with the AI-generated content \nand user perceptions of content authenticity. Analyze user feedback to identify \nconcerns and/or current literacy levels related to content provenance and \nunderstanding of labels on content. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-2.7-004 \nIdentify metrics that reļ¬‚ect the eļ¬€ectiveness of security measures, such as data \nprovenance, the number of unauthorized access attempts, inference, bypass, \nextraction, penetrations, or provenance veriļ¬cation. \nInformation Integrity; Information \nSecurity \nMS-2.7-005 \nMeasure reliability of content authentication methods, such as watermarking, \ncryptographic signatures, digital ļ¬ngerprints, as well as access controls, \nconformity assessment, and model integrity veriļ¬cation, which can help support \nthe eļ¬€ective implementation of content provenance techniques. Evaluate the \nrate of false positives and false negatives in content provenance, as well as true \npositives and true negatives for veriļ¬cation. \nInformation Integrity \nMS-2.7-006 \nMeasure the rate at which recommendations from security checks and incidents \nare implemented. Assess how quickly the AI system can adapt and improve \nbased on lessons learned from security incidents and feedback. \nInformation Integrity; Information \nSecurity \nMS-2.7-007 \nPerform AI red-teaming to assess resilience against: Abuse to facilitate attacks on \nother systems (e.g., malicious code generation, enhanced phishing content), GAI \nattacks (e.g., prompt injection), ML attacks (e.g., adversarial examples/prompts, \ndata poisoning, membership inference, model extraction, sponge examples). \nInformation Security; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nMS-2.7-008 Verify ļ¬ne-tuning does not compromise safety and security controls. \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content \n']","Safety metrics reflect system reliability and robustness, real-time monitoring, and response times for AI system failures. Security measures help assess vulnerabilities and threats, benchmark system security, gather user feedback, identify effective security metrics, measure content authentication methods, and perform AI red-teaming to assess resilience against various attacks.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 35, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 36, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How to ensure accessibility and equity in automated systems?,"[' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nDemonstrate that the system protects against algorithmic discrimination \nIndependent evaluation. As described in the section on Safe and Effective Systems, entities should allow \nindependent evaluation of potential algorithmic discrimination caused by automated systems they use or \noversee. In the case of public sector uses, these independent evaluations should be made public unless law \nenforcement or national security restrictions prevent doing so. Care should be taken to balance individual \nprivacy with evaluation data access needs; in many cases, policy-based and/or technological innovations and \ncontrols allow access to such data without compromising privacy. \nReporting. Entities responsible for the development or use of automated systems should provide \nreporting of an appropriately designed algorithmic impact assessment,50 with clear specification of who \nperforms the assessment, who evaluates the system, and how corrective actions are taken (if necessary) in \nresponse to the assessment. This algorithmic impact assessment should include at least: the results of any \nconsultation, design stage equity assessments (potentially including qualitative analysis), accessibility \ndesigns and testing, disparity testing, document any remaining disparities, and detail any mitigation \nimplementation and assessments. This algorithmic impact assessment should be made public whenever \npossible. Reporting should be provided in a clear and machine-readable manner using plain language to \nallow for more straightforward public accountability. \n28\nAlgorithmic \nDiscrimination \nProtections \n', "" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n""]","To ensure accessibility and equity in automated systems, organizations should design, develop, and deploy systems in ways that ensure accessibility to people with disabilities. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers. Additionally, automated systems should be tested using a broad set of measures to assess whether the system components produce disparities. Disparity assessments should include demographic performance measures, overall and subgroup parity assessment, and calibration. When disparities are identified, steps should be taken to mitigate or eliminate them, and ongoing monitoring should be conducted to assess algorithmic discrimination that might arise from unforeseen interactions or changes to the system or context of use.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 27, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How can orgs use public feedback and incident reports to improve GAI content in decision-making?,"[' \n52 \nā€¢ \nMonitoring system capabilities and limitations in deployment through rigorous TEVV processes; \nā€¢ \nEvaluating how humans engage, interact with, or adapt to GAI content (especially in decision \nmaking tasks informed by GAI content), and how they react to applied provenance techniques \nsuch as overt disclosures. \nOrganizations can document and delineate GAI system objectives and limitations to identify gaps where \nprovenance data may be most useful. For instance, GAI systems used for content creation may require \nrobust watermarking techniques and corresponding detectors to identify the source of content or \nmetadata recording techniques and metadata management tools and repositories to trace content \norigins and modiļ¬cations. Further narrowing of GAI task deļ¬nitions to include provenance data can \nenable organizations to maximize the utility of provenance data and risk management eļ¬€orts. \nA.1.7. Enhancing Content Provenance through Structured Public Feedback \nWhile indirect feedback methods such as automated error collection systems are useful, they often lack \nthe context and depth that direct input from end users can provide. Organizations can leverage feedback \napproaches described in the Pre-Deployment Testing section to capture input from external sources such \nas through AI red-teaming. \nIntegrating pre- and post-deployment external feedback into the monitoring process for GAI models and \ncorresponding applications can help enhance awareness of performance changes and mitigate potential \nrisks and harms from outputs. There are many ways to capture and make use of user feedback ā€“ before \nand after GAI systems and digital content transparency approaches are deployed ā€“ to gain insights about \nauthentication eļ¬ƒcacy and vulnerabilities, impacts of adversarial threats on techniques, and unintended \nconsequences resulting from the utilization of content provenance approaches on users and \ncommunities. Furthermore, organizations can track and document the provenance of datasets to identify \ninstances in which AI-generated data is a potential root cause of performance issues with the GAI \nsystem. \nA.1.8. Incident Disclosure \nOverview \nAI incidents can be deļ¬ned as an ā€œevent, circumstance, or series of events where the development, use, \nor malfunction of one or more AI systems directly or indirectly contributes to one of the following harms: \ninjury or harm to the health of a person or groups of people (including psychological harms and harms to \nmental health); disruption of the management and operation of critical infrastructure; violations of \nhuman rights or a breach of obligations under applicable law intended to protect fundamental, labor, \nand intellectual property rights; or harm to property, communities, or the environment.ā€ AI incidents can \noccur in the aggregate (i.e., for systemic discrimination) or acutely (i.e., for one individual). \nState of AI Incident Tracking and Disclosure \nFormal channels do not currently exist to report and document AI incidents. However, a number of \npublicly available databases have been created to document their occurrence. These reporting channels \nmake decisions on an ad hoc basis about what kinds of incidents to track. Some, for example, track by \namount of media coverage. \n', ' \n53 \nDocumenting, reporting, and sharing information about GAI incidents can help mitigate and prevent \nharmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness \nand standardization of GAI incident reporting could promote this transparency and improve GAI risk \nmanagement across the AI ecosystem. \nDocumentation and Involvement of AI Actors \nAI Actors should be aware of their roles in reporting AI incidents. To better understand previous incidents \nand implement measures to prevent similar ones in the future, organizations could consider developing \nguidelines for publicly available incident reporting which include information about AI actor \nresponsibilities. These guidelines would help AI system operators identify GAI incidents across the AI \nlifecycle and with AI Actors regardless of role. Documentation and review of third-party inputs and \nplugins for GAI systems is especially important for AI Actors in the context of incident disclosure; LLM \ninputs and content delivered through these plugins is often distributed, with inconsistent or insuļ¬ƒcient \naccess control. \nDocumentation practices including logging, recording, and analyzing GAI incidents can facilitate \nsmoother sharing of information with relevant AI Actors. Regular information sharing, change \nmanagement records, version history and metadata can also empower AI Actors responding to and \nmanaging AI incidents. \n \n']","Organizations can use public feedback and incident reports to improve GAI content in decision-making by integrating pre- and post-deployment external feedback into the monitoring process for GAI models and corresponding applications. This helps enhance awareness of performance changes and mitigate potential risks and harms from outputs. Additionally, documenting, reporting, and sharing information about GAI incidents can help mitigate and prevent harmful outcomes by assisting relevant AI Actors in tracing impacts to their source. Greater awareness and standardization of GAI incident reporting could promote transparency and improve GAI risk management across the AI ecosystem.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 55, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 56, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do feedback and testing reveal GAI biases and societal impacts?,"["" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n"", ' \n38 \nMEASURE 2.13: Eļ¬€ectiveness of the employed TEVV metrics and processes in the MEASURE function are evaluated and \ndocumented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.13-001 \nCreate measurement error models for pre-deployment metrics to demonstrate \nconstruct validity for each metric (i.e., does the metric eļ¬€ectively operationalize \nthe desired concept): Measure or estimate, and document, biases or statistical \nvariance in applied metrics or structured human feedback processes; Leverage \ndomain expertise when modeling complex societal constructs such as hateful \ncontent. \nConfabulation; Information \nIntegrity; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, Operation and Monitoring, TEVV \n \nMEASURE 3.2: Risk tracking approaches are considered for settings where AI risks are diļ¬ƒcult to assess using currently available \nmeasurement techniques or where metrics are not yet available. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.2-001 \nEstablish processes for identifying emergent GAI system risks including \nconsulting with external AI Actors. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \nMEASURE 3.3: Feedback processes for end users and impacted communities to report problems and appeal system outcomes are \nestablished and integrated into AI system evaluation metrics. \nAction ID \nSuggested Action \nGAI Risks \nMS-3.3-001 \nConduct impact assessments on how AI-generated content might aļ¬€ect \ndiļ¬€erent social, economic, and cultural groups. \nHarmful Bias and Homogenization \nMS-3.3-002 \nConduct studies to understand how end users perceive and interact with GAI \ncontent and accompanying content provenance within context of use. Assess \nwhether the content aligns with their expectations and how they may act upon \nthe information presented. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMS-3.3-003 \nEvaluate potential biases and stereotypes that could emerge from the AI-\ngenerated content using appropriate methodologies including computational \ntesting methods as well as evaluating structured feedback input. \nHarmful Bias and Homogenization \n']","Feedback and testing reveal GAI biases and societal impacts through methods such as user research studies, focus groups, community forums, adversarial testing, real-world scenario evaluations, and structured public feedback exercises. These methods help identify potential biases, understand misuse scenarios, and assess the general awareness among end users and impacted communities.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 41, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do EO 13960 and NIST AI RMF ensure AI transparency and accountability for federal agencies?,"[' \n \n \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nHOW THESE PRINCIPLES CAN MOVE INTO PRACTICE\nReal-life examples of how these principles can become reality, through laws, policies, and practical \ntechnical and sociotechnical approaches to protecting rights, opportunities, and access. \xad\xad\nExecutive Order 13960 on Promoting the Use of Trustworthy Artificial Intelligence in the \nFederal Government requires that certain federal agencies adhere to nine principles when \ndesigning, developing, acquiring, or using AI for purposes other than national security or \ndefense. These principlesā€”while taking into account the sensitive law enforcement and other contexts in which \nthe federal government may use AI, as opposed to private sector use of AIā€”require that AI is: (a) lawful and \nrespectful of our Nationā€™s values; (b) purposeful and performance-driven; (c) accurate, reliable, and effective; (d) \nsafe, secure, and resilient; (e) understandable; (f ) responsible and traceable; (g) regularly monitored; (h) transpar-\nent; and, (i) accountable. The Blueprint for an AI Bill of Rights is consistent with the Executive Order. \nAffected agencies across the federal government have released AI use case inventories13 and are implementing \nplans to bring those AI systems into compliance with the Executive Order or retire them. \nThe law and policy landscape for motor vehicles shows that strong safety regulationsā€”and \nmeasures to address harms when they occurā€”can enhance innovation in the context of com-\nplex technologies. Cars, like automated digital systems, comprise a complex collection of components. \nThe National Highway Traffic Safety Administration,14 through its rigorous standards and independent \nevaluation, helps make sure vehicles on our roads are safe without limiting manufacturersā€™ ability to \ninnovate.15 At the same time, rules of the road are implemented locally to impose contextually appropriate \nrequirements on drivers, such as slowing down near schools or playgrounds.16\nFrom large companies to start-ups, industry is providing innovative solutions that allow \norganizations to mitigate risks to the safety and efficacy of AI systems, both before \ndeployment and through monitoring over time.17 These innovative solutions include risk \nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \nand effectiveness concerns. \nThe Office of Management and Budget (OMB) has called for an expansion of opportunities \nfor meaningful stakeholder engagement in the design of programs and services. OMB also \npoints to numerous examples of effective and proactive stakeholder engagement, including the Community-\nBased Participatory Research Program developed by the National Institutes of Health and the participatory \ntechnology assessments developed by the National Oceanic and Atmospheric Administration.18\nThe National Institute of Standards and Technology (NIST) is developing a risk \nmanagement framework to better manage risks posed to individuals, organizations, and \nsociety by AI.19 The NIST AI Risk Management Framework, as mandated by Congress, is intended for \nvoluntary use to help incorporate trustworthiness considerations into the design, development, use, and \nevaluation of AI products, services, and systems. The NIST framework is being developed through a consensus-\ndriven, open, transparent, and collaborative process that includes workshops and other opportunities to provide \ninput. The NIST framework aims to foster the development of innovative approaches to address \ncharacteristics of trustworthiness including accuracy, explainability and interpretability, reliability, privacy, \nrobustness, safety, security (resilience), and mitigation of unintended and/or harmful bias, as well as of \nharmful \nuses. \nThe \nNIST \nframework \nwill \nconsider \nand \nencompass \nprinciples \nsuch \nas \ntransparency, accountability, and fairness during pre-design, design and development, deployment, use, \nand testing and evaluation of AI technologies and systems. It is expected to be released in the winter of 2022-23. \n21\n']","EO 13960 ensures AI transparency and accountability for federal agencies by requiring that AI is transparent and accountable among other principles. The NIST AI Risk Management Framework aims to foster the development of innovative approaches to address characteristics of trustworthiness, including transparency and accountability, during pre-design, design and development, deployment, use, and testing and evaluation of AI technologies and systems.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 20, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How can human expertise and content provenance boost GAI performance and ensure data privacy?,"[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n', ' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']","Human expertise and content provenance can boost GAI performance by employing techniques such as RLHF, fine-tuning, retrieval-augmented generation, content moderation, and business rules. To ensure data privacy, it is important to anonymize data, leverage privacy output filters, and remove any personally identifiable information (PII).",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"What's the best environment for testing GAI's trustworthiness, data privacy, and human subject protection?","[' \n31 \nMS-2.3-004 \nUtilize a purpose-built testing environment such as NIST Dioptra to empirically \nevaluate GAI trustworthy characteristics. \nCBRN Information or Capabilities; \nData Privacy; Confabulation; \nInformation Integrity; Information \nSecurity; Dangerous, Violent, or \nHateful Content; Harmful Bias and \nHomogenization \nAI Actor Tasks: AI Deployment, TEVV \n \nMEASURE 2.5: The AI system to be deployed is demonstrated to be valid and reliable. Limitations of the generalizability beyond the \nconditions under which the technology was developed are documented. \nAction ID \nSuggested Action \nRisks \nMS-2.5-001 Avoid extrapolating GAI system performance or capabilities from narrow, non-\nsystematic, and anecdotal assessments. \nHuman-AI Conļ¬guration; \nConfabulation \nMS-2.5-002 \nDocument the extent to which human domain knowledge is employed to \nimprove GAI system performance, via, e.g., RLHF, ļ¬ne-tuning, retrieval-\naugmented generation, content moderation, business rules. \nHuman-AI Conļ¬guration \nMS-2.5-003 Review and verify sources and citations in GAI system outputs during pre-\ndeployment risk measurement and ongoing monitoring activities. \nConfabulation \nMS-2.5-004 Track and document instances of anthropomorphization (e.g., human images, \nmentions of human feelings, cyborg imagery or motifs) in GAI system interfaces. Human-AI Conļ¬guration \nMS-2.5-005 Verify GAI system training data and TEVV data provenance, and that ļ¬ne-tuning \nor retrieval-augmented generation data is grounded. \nInformation Integrity \nMS-2.5-006 \nRegularly review security and safety guardrails, especially if the GAI system is \nbeing operated in novel circumstances. This includes reviewing reasons why the \nGAI system was initially assessed as being safe to deploy. \nInformation Security; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: Domain Experts, TEVV \n \n', ' \n30 \nMEASURE 2.2: Evaluations involving human subjects meet applicable requirements (including human subject protection) and are \nrepresentative of the relevant population. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.2-001 Assess and manage statistical biases related to GAI content provenance through \ntechniques such as re-sampling, re-weighting, or adversarial training. \nInformation Integrity; Information \nSecurity; Harmful Bias and \nHomogenization \nMS-2.2-002 \nDocument how content provenance data is tracked and how that data interacts \nwith privacy and security. Consider: Anonymizing data to protect the privacy of \nhuman subjects; Leveraging privacy output ļ¬lters; Removing any personally \nidentiļ¬able information (PII) to prevent potential harm or misuse. \nData Privacy; Human AI \nConļ¬guration; Information \nIntegrity; Information Security; \nDangerous, Violent, or Hateful \nContent \nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \nconsent for present or future use of their data in GAI applications. \nData Privacy; Human-AI \nConļ¬guration; Information \nIntegrity \nMS-2.2-004 \nUse techniques such as anonymization, diļ¬€erential privacy or other privacy-\nenhancing technologies to minimize the risks associated with linking AI-generated \ncontent back to individual human subjects. \nData Privacy; Human-AI \nConļ¬guration \nAI Actor Tasks: AI Development, Human Factors, TEVV \n \nMEASURE 2.3: AI system performance or assurance criteria are measured qualitatively or quantitatively and demonstrated for \nconditions similar to deployment setting(s). Measures are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.3-001 Consider baseline model performance on suites of benchmarks when selecting a \nmodel for ļ¬ne tuning or enhancement with retrieval-augmented generation. \nInformation Security; \nConfabulation \nMS-2.3-002 Evaluate claims of model capabilities using empirically validated methods. \nConfabulation; Information \nSecurity \nMS-2.3-003 Share results of pre-deployment testing with relevant GAI Actors, such as those \nwith system release approval authority. \nHuman-AI Conļ¬guration \n']","The best environment for testing GAI's trustworthiness, data privacy, and human subject protection is a purpose-built testing environment such as NIST Dioptra.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 34, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 33, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How can automated systems ensure accessibility and prevent bias?,"["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n"", ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Automated systems can ensure accessibility by being designed, developed, and deployed in ways that ensure accessibility to people with disabilities. This includes considering a wide variety of disabilities, adhering to relevant accessibility standards, and conducting user experience research both before and after deployment to identify and address any accessibility barriers. To prevent bias, automated systems should be tested using a broad set of measures to assess whether the system components produce disparities. This includes demographic performance measures, overall and subgroup parity assessment, and calibration. Disparity mitigation steps should be taken if a disparity is identified, and ongoing monitoring should be performed to assess algorithmic discrimination that might arise from unforeseen interactions or changes.",multi_context,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do the U.S. AI Safety Institute and AI Risk Management Framework support the 2023 AI Executive Order?,"[' \n \n \nAbout AI at NIST: The National Institute of Standards and Technology (NIST) develops measurements, \ntechnology, tools, and standards to advance reliable, safe, transparent, explainable, privacy-enhanced, \nand fair artiļ¬cial intelligence (AI) so that its full commercial and societal beneļ¬ts can be realized without \nharm to people or the planet. NIST, which has conducted both fundamental and applied work on AI for \nmore than a decade, is also helping to fulļ¬ll the 2023 Executive Order on Safe, Secure, and Trustworthy \nAI. NIST established the U.S. AI Safety Institute and the companion AI Safety Institute Consortium to \ncontinue the eļ¬€orts set in motion by the E.O. to build the science necessary for safe, secure, and \ntrustworthy development and use of AI. \nAcknowledgments: This report was accomplished with the many helpful comments and contributions \nfrom the community, including the NIST Generative AI Public Working Group, and NIST staļ¬€ and guest \nresearchers: Chloe Autio, Jesse Dunietz, Patrick Hall, Shomik Jain, Kamie Roberts, Reva Schwartz, Martin \nStanley, and Elham Tabassi. \nNIST Technical Series Policies \nCopyright, Use, and Licensing Statements \nNIST Technical Series Publication Identifier Syntax \nPublication History \nApproved by the NIST Editorial Review Board on 07-25-2024 \nContact Information \nai-inquiries@nist.gov \nNational Institute of Standards and Technology \nAttn: NIST AI Innovation Lab, Information Technology Laboratory \n100 Bureau Drive (Mail Stop 8900) Gaithersburg, MD 20899-8900 \nAdditional Information \nAdditional information about this publication and other NIST AI publications are available at \nhttps://airc.nist.gov/Home. \n \nDisclaimer: Certain commercial entities, equipment, or materials may be identiļ¬ed in this document in \norder to adequately describe an experimental procedure or concept. Such identiļ¬cation is not intended to \nimply recommendation or endorsement by the National Institute of Standards and Technology, nor is it \nintended to imply that the entities, materials, or equipment are necessarily the best available for the \npurpose. Any mention of commercial, non-proļ¬t, academic partners, or their products, or references is \nfor information only; it is not intended to imply endorsement or recommendation by any U.S. \nGovernment agency. \n \n', ' \n \n \nNIST Trustworthy and Responsible AI \nNIST AI 600-1 \nArtificial Intelligence Risk Management \nFramework: Generative Artificial \nIntelligence Profile \n \n \n \nThis publication is available free of charge from: \nhttps://doi.org/10.6028/NIST.AI.600-1 \n \nJuly 2024 \n \n \n \n \nU.S. Department of Commerce \nGina M. Raimondo, Secretary \nNational Institute of Standards and Technology \nLaurie E. Locascio, NIST Director and Under Secretary of Commerce for Standards and Technology \n \n']",The answer to given question is not present in context,multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 2, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 1, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How can we ensure effective oversight and risk mgmt of GAI systems with third-party input?,"[' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n', ' \n19 \nGV-4.1-003 \nEstablish policies, procedures, and processes for oversight functions (e.g., senior \nleadership, legal, compliance, including internal evaluation) across the GAI \nlifecycle, from problem formulation and supply chains to system decommission. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.2: Organizational teams document the risks and potential impacts of the AI technology they design, develop, deploy, \nevaluate, and use, and they communicate about the impacts more broadly. \nAction ID \nSuggested Action \nGAI Risks \nGV-4.2-001 \nEstablish terms of use and terms of service for GAI systems. \nIntellectual Property; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nGV-4.2-002 \nInclude relevant AI Actors in the GAI system risk identiļ¬cation process. \nHuman-AI Conļ¬guration \nGV-4.2-003 \nVerify that downstream GAI system impacts (such as the use of third-party \nplugins) are included in the impact documentation process. \nValue Chain and Component \nIntegration \nAI Actor Tasks: AI Deployment, AI Design, AI Development, Operation and Monitoring \n \nGOVERN 4.3: Organizational practices are in place to enable AI testing, identiļ¬cation of incidents, and information sharing. \nAction ID \nSuggested Action \nGAI Risks \nGV4.3--001 \nEstablish policies for measuring the eļ¬€ectiveness of employed content \nprovenance methodologies (e.g., cryptography, watermarking, steganography, \netc.) \nInformation Integrity \nGV-4.3-002 \nEstablish organizational practices to identify the minimum set of criteria \nnecessary for GAI system incident reporting such as: System ID (auto-generated \nmost likely), Title, Reporter, System/Source, Data Reported, Date of Incident, \nDescription, Impact(s), Stakeholder(s) Impacted. \nInformation Security \n']","To ensure effective oversight and risk management of GAI systems with third-party input, organizations should establish policies and procedures that address AI risks associated with third-party entities. This includes categorizing different types of GAI content with associated third-party rights, conducting joint educational activities and events to promote best practices for managing GAI risks, developing and validating approaches for measuring the success of content provenance management efforts, and drafting and maintaining well-defined contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 22, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How is the integrity of third-party pre-trained models ensured in GAI?,"[' \n43 \nMG-3.1-005 Review various transparency artifacts (e.g., system cards and model cards) for \nthird-party models. \nInformation Integrity; Information \nSecurity; Value Chain and \nComponent Integration \nAI Actor Tasks: AI Deployment, Operation and Monitoring, Third-party entities \n \nMANAGE 3.2: Pre-trained models which are used for development are monitored as part of AI system regular monitoring and \nmaintenance. \nAction ID \nSuggested Action \nGAI Risks \nMG-3.2-001 \nApply explainable AI (XAI) techniques (e.g., analysis of embeddings, model \ncompression/distillation, gradient-based attributions, occlusion/term reduction, \ncounterfactual prompts, word clouds) as part of ongoing continuous \nimprovement processes to mitigate risks related to unexplainable GAI systems. \nHarmful Bias and Homogenization \nMG-3.2-002 \nDocument how pre-trained models have been adapted (e.g., ļ¬ne-tuned, or \nretrieval-augmented generation) for the speciļ¬c generative task, including any \ndata augmentations, parameter adjustments, or other modiļ¬cations. Access to \nun-tuned (baseline) models supports debugging the relative inļ¬‚uence of the pre-\ntrained weights compared to the ļ¬ne-tuned model weights or other system \nupdates. \nInformation Integrity; Data Privacy \nMG-3.2-003 \nDocument sources and types of training data and their origins, potential biases \npresent in the data related to the GAI application and its content provenance, \narchitecture, training process of the pre-trained model including information on \nhyperparameters, training duration, and any ļ¬ne-tuning or retrieval-augmented \ngeneration processes applied. \nInformation Integrity; Harmful Bias \nand Homogenization; Intellectual \nProperty \nMG-3.2-004 Evaluate user reported problematic content and integrate feedback into system \nupdates. \nHuman-AI Conļ¬guration, \nDangerous, Violent, or Hateful \nContent \nMG-3.2-005 \nImplement content ļ¬lters to prevent the generation of inappropriate, harmful, \nfalse, illegal, or violent content related to the GAI application, including for CSAM \nand NCII. These ļ¬lters can be rule-based or leverage additional machine learning \nmodels to ļ¬‚ag problematic inputs and outputs. \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content; \nObscene, Degrading, and/or \nAbusive Content \nMG-3.2-006 \nImplement real-time monitoring processes for analyzing generated content \nperformance and trustworthiness characteristics related to content provenance \nto identify deviations from the desired standards and trigger alerts for human \nintervention. \nInformation Integrity \n', ' \n44 \nMG-3.2-007 \nLeverage feedback and recommendations from organizational boards or \ncommittees related to the deployment of GAI applications and content \nprovenance when using third-party pre-trained models. \nInformation Integrity; Value Chain \nand Component Integration \nMG-3.2-008 \nUse human moderation systems where appropriate to review generated content \nin accordance with human-AI conļ¬guration policies established in the Govern \nfunction, aligned with socio-cultural norms in the context of use, and for settings \nwhere AI models are demonstrated to perform poorly. \nHuman-AI Conļ¬guration \nMG-3.2-009 \nUse organizational risk tolerance to evaluate acceptable risks and performance \nmetrics and decommission or retrain pre-trained models that perform outside of \ndeļ¬ned limits. \nCBRN Information or Capabilities; \nConfabulation \nAI Actor Tasks: AI Deployment, Operation and Monitoring, Third-party entities \n \nMANAGE 4.1: Post-deployment AI system monitoring plans are implemented, including mechanisms for capturing and evaluating \ninput from users and other relevant AI Actors, appeal and override, decommissioning, incident response, recovery, and change \nmanagement. \nAction ID \nSuggested Action \nGAI Risks \nMG-4.1-001 \nCollaborate with external researchers, industry experts, and community \nrepresentatives to maintain awareness of emerging best practices and \ntechnologies in measuring and managing identiļ¬ed risks. \nInformation Integrity; Harmful Bias \nand Homogenization \nMG-4.1-002 \nEstablish, maintain, and evaluate eļ¬€ectiveness of organizational processes and \nprocedures for post-deployment monitoring of GAI systems, particularly for \npotential confabulation, CBRN, or cyber risks. \nCBRN Information or Capabilities; \nConfabulation; Information \nSecurity \nMG-4.1-003 \nEvaluate the use of sentiment analysis to gauge user sentiment regarding GAI \ncontent performance and impact, and work in collaboration with AI Actors \nexperienced in user research and experience. \nHuman-AI Conļ¬guration \nMG-4.1-004 Implement active learning techniques to identify instances where the model fails \nor produces unexpected outputs. \nConfabulation \nMG-4.1-005 \nShare transparency reports with internal and external stakeholders that detail \nsteps taken to update the GAI system to enhance transparency and \naccountability. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nMG-4.1-006 \nTrack dataset modiļ¬cations for provenance by monitoring data deletions, \nrectiļ¬cation requests, and other changes that may impact the veriļ¬ability of \ncontent origins. \nInformation Integrity \n']","The integrity of third-party pre-trained models in GAI is ensured through several actions: reviewing transparency artifacts (e.g., system cards and model cards), applying explainable AI (XAI) techniques, documenting how pre-trained models have been adapted, documenting sources and types of training data and their origins, evaluating user-reported problematic content, implementing content filters, real-time monitoring processes, leveraging feedback from organizational boards or committees, using human moderation systems, and using organizational risk tolerance to evaluate acceptable risks and performance metrics.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 46, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 47, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How to ensure data accuracy and integrity in GAI systems while addressing impacts with AI Actors?,"[' \n28 \nMAP 5.2: Practices and personnel for supporting regular engagement with relevant AI Actors and integrating feedback about \npositive, negative, and unanticipated impacts are in place and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-5.2-001 \nDetermine context-based measures to identify if new impacts are present due to \nthe GAI system, including regular engagements with downstream AI Actors to \nidentify and quantify new contexts of unanticipated impacts of GAI systems. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nMP-5.2-002 \nPlan regular engagements with AI Actors responsible for inputs to GAI systems, \nincluding third-party data and algorithms, to review and evaluate unanticipated \nimpacts. \nHuman-AI Conļ¬guration; Value \nChain and Component Integration \nAI Actor Tasks: AI Deployment, AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Domain Experts, End-\nUsers, Human Factors, Operation and Monitoring \n \nMEASURE 1.1: Approaches and metrics for measurement of AI risks enumerated during the MAP function are selected for \nimplementation starting with the most signiļ¬cant AI risks. The risks or trustworthiness characteristics that will not ā€“ or cannot ā€“ be \nmeasured are properly documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-1.1-001 Employ methods to trace the origin and modiļ¬cations of digital content. \nInformation Integrity \nMS-1.1-002 \nIntegrate tools designed to analyze content provenance and detect data \nanomalies, verify the authenticity of digital signatures, and identify patterns \nassociated with misinformation or manipulation. \nInformation Integrity \nMS-1.1-003 \nDisaggregate evaluation metrics by demographic factors to identify any \ndiscrepancies in how content provenance mechanisms work across diverse \npopulations. \nInformation Integrity; Harmful \nBias and Homogenization \nMS-1.1-004 Develop a suite of metrics to evaluate structured public feedback exercises \ninformed by representative AI Actors. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization; CBRN \nInformation or Capabilities \nMS-1.1-005 \nEvaluate novel methods and technologies for the measurement of GAI-related \nrisks including in content provenance, oļ¬€ensive cyber, and CBRN, while \nmaintaining the modelsā€™ ability to produce valid, reliable, and factually accurate \noutputs. \nInformation Integrity; CBRN \nInformation or Capabilities; \nObscene, Degrading, and/or \nAbusive Content \n', ' \n25 \nMP-2.3-002 Review and document accuracy, representativeness, relevance, suitability of data \nused at diļ¬€erent stages of AI life cycle. \nHarmful Bias and Homogenization; \nIntellectual Property \nMP-2.3-003 \nDeploy and document fact-checking techniques to verify the accuracy and \nveracity of information generated by GAI systems, especially when the \ninformation comes from multiple (or unknown) sources. \nInformation Integrity \nMP-2.3-004 Develop and implement testing techniques to identify GAI produced content (e.g., \nsynthetic media) that might be indistinguishable from human-generated content. Information Integrity \nMP-2.3-005 Implement plans for GAI systems to undergo regular adversarial testing to identify \nvulnerabilities and potential manipulation or misuse. \nInformation Security \nAI Actor Tasks: AI Development, Domain Experts, TEVV \n \nMAP 3.4: Processes for operator and practitioner proļ¬ciency with AI system performance and trustworthiness ā€“ and relevant \ntechnical standards and certiļ¬cations ā€“ are deļ¬ned, assessed, and documented. \nAction ID \nSuggested Action \nGAI Risks \nMP-3.4-001 \nEvaluate whether GAI operators and end-users can accurately understand \ncontent lineage and origin. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-002 Adapt existing training programs to include modules on digital content \ntransparency. \nInformation Integrity \nMP-3.4-003 Develop certiļ¬cation programs that test proļ¬ciency in managing GAI risks and \ninterpreting content provenance, relevant to speciļ¬c industry and context. \nInformation Integrity \nMP-3.4-004 Delineate human proļ¬ciency tests from tests of GAI capabilities. \nHuman-AI Conļ¬guration \nMP-3.4-005 Implement systems to continually monitor and track the outcomes of human-GAI \nconļ¬gurations for future reļ¬nement and improvements. \nHuman-AI Conļ¬guration; \nInformation Integrity \nMP-3.4-006 \nInvolve the end-users, practitioners, and operators in GAI system in prototyping \nand testing activities. Make sure these tests cover various scenarios, such as crisis \nsituations or ethically sensitive contexts. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization; Dangerous, \nViolent, or Hateful Content \nAI Actor Tasks: AI Design, AI Development, Domain Experts, End-Users, Human Factors, Operation and Monitoring \n \n']","To ensure data accuracy and integrity in GAI systems while addressing impacts with AI Actors, the following actions are suggested: 1) Plan regular engagements with AI Actors responsible for inputs to GAI systems, including third-party data and algorithms, to review and evaluate unanticipated impacts. 2) Review and document accuracy, representativeness, relevance, and suitability of data used at different stages of the AI life cycle. 3) Deploy and document fact-checking techniques to verify the accuracy and veracity of information generated by GAI systems, especially when the information comes from multiple (or unknown) sources. 4) Develop and implement testing techniques to identify GAI-produced content that might be indistinguishable from human-generated content. 5) Implement plans for GAI systems to undergo regular adversarial testing to identify vulnerabilities and potential manipulation or misuse.",multi_context,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 31, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}, {'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 28, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How do equity assessments and mitigation steps prevent algorithmic bias?,"["" \n \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEnsuring accessibility during design, development, and deployment. Systems should be \ndesigned, developed, and deployed by organizations in ways that ensure accessibility to people with disabili\xad\nties. This should include consideration of a wide variety of disabilities, adherence to relevant accessibility \nstandards, and user experience research both before and after deployment to identify and address any accessi\xad\nbility barriers to the use or effectiveness of the automated system. \nDisparity assessment. Automated systems should be tested using a broad set of measures to assess wheth\xad\ner the system components, both in pre-deployment testing and in-context deployment, produce disparities. \nThe demographics of the assessed groups should be as inclusive as possible of race, color, ethnicity, sex \n(including pregnancy, childbirth, and related medical conditions, gender identity, intersex status, and sexual \norientation), religion, age, national origin, disability, veteran status, genetic information, or any other classifi\xad\ncation protected by law. The broad set of measures assessed should include demographic performance mea\xad\nsures, overall and subgroup parity assessment, and calibration. Demographic data collected for disparity \nassessment should be separated from data used for the automated system and privacy protections should be \ninstituted; in some cases it may make sense to perform such assessment using a data sample. For every \ninstance where the deployed automated system leads to different treatment or impacts disfavoring the identi\xad\nfied groups, the entity governing, implementing, or using the system should document the disparity and a \njustification for any continued use of the system. \nDisparity mitigation. When a disparity assessment identifies a disparity against an assessed group, it may \nbe appropriate to take steps to mitigate or eliminate the disparity. In some cases, mitigation or elimination of \nthe disparity may be required by law. \nDisparities that have the potential to lead to algorithmic \ndiscrimination, cause meaningful harm, or violate equity49 goals should be mitigated. When designing and \nevaluating an automated system, steps should be taken to evaluate multiple models and select the one that \nhas the least adverse impact, modify data input choices, or otherwise identify a system with fewer \ndisparities. If adequate mitigation of the disparity is not possible, then the use of the automated system \nshould be reconsidered. One of the considerations in whether to use the system should be the validity of any \ntarget measure; unobservable targets may result in the inappropriate use of proxies. Meeting these \nstandards may require instituting mitigation procedures and other protective measures to address \nalgorithmic discrimination, avoid meaningful harm, and achieve equity goals. \nOngoing monitoring and mitigation. Automated systems should be regularly monitored to assess algo\xad\nrithmic discrimination that might arise from unforeseen interactions of the system with inequities not \naccounted for during the pre-deployment testing, changes to the system after deployment, or changes to the \ncontext of use or associated data. Monitoring and disparity assessment should be performed by the entity \ndeploying or using the automated system to examine whether the system has led to algorithmic discrimina\xad\ntion when deployed. This assessment should be performed regularly and whenever a pattern of unusual \nresults is occurring. It can be performed using a variety of approaches, taking into account whether and how \ndemographic information of impacted people is available, for example via testing with a sample of users or via \nqualitative user experience research. Riskier and higher-impact systems should be monitored and assessed \nmore frequently. Outcomes of this assessment should include additional disparity mitigation, if needed, or \nfallback to earlier procedures in the case that equity standards are no longer met and can't be mitigated, and \nprior mechanisms provide better adherence to equity standards. \n27\nAlgorithmic \nDiscrimination \nProtections \n"", ' \n \n \n \n \n \n \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nAny automated system should be tested to help ensure it is free from algorithmic discrimination before it can be \nsold or used. Protection against algorithmic discrimination should include designing to ensure equity, broadly \nconstrued. Some algorithmic discrimination is already prohibited under existing anti-discrimination law. The \nexpectations set out below describe proactive technical and policy steps that can be taken to not only \nreinforce those legal protections but extend beyond them to ensure equity for underserved communities48 \neven in circumstances where a specific legal protection may not be clearly established. These protections \nshould be instituted throughout the design, development, and deployment process and are described below \nroughly in the order in which they would be instituted. \nProtect the public from algorithmic discrimination in a proactive and ongoing manner \nProactive assessment of equity in design. Those responsible for the development, use, or oversight of \nautomated systems should conduct proactive equity assessments in the design phase of the technology \nresearch and development or during its acquisition to review potential input data, associated historical \ncontext, accessibility for people with disabilities, and societal goals to identify potential discrimination and \neffects on equity resulting from the introduction of the technology. The assessed groups should be as inclusive \nas possible of the underserved communities mentioned in the equity definition: Black, Latino, and Indigenous \nand Native American persons, Asian Americans and Pacific Islanders and other persons of color; members of \nreligious minorities; women, girls, and non-binary people; lesbian, gay, bisexual, transgender, queer, and inter-\nsex (LGBTQI+) persons; older adults; persons with disabilities; persons who live in rural areas; and persons \notherwise adversely affected by persistent poverty or inequality. Assessment could include both qualitative \nand quantitative evaluations of the system. This equity assessment should also be considered a core part of the \ngoals of the consultation conducted as part of the safety and efficacy review. \nRepresentative and robust data. Any data used as part of system development or assessment should be \nrepresentative of local communities based on the planned deployment setting and should be reviewed for bias \nbased on the historical and societal context of the data. Such data should be sufficiently robust to identify and \nhelp to mitigate biases and potential harms. \nGuarding against proxies. Directly using demographic information in the design, development, or \ndeployment of an automated system (for purposes other than evaluating a system for discrimination or using \na system to counter discrimination) runs a high risk of leading to algorithmic discrimination and should be \navoided. In many cases, attributes that are highly correlated with demographic features, known as proxies, can \ncontribute to algorithmic discrimination. In cases where use of the demographic features themselves would \nlead to illegal algorithmic discrimination, reliance on such proxies in decision-making (such as that facilitated \nby an algorithm) may also be prohibited by law. Proactive testing should be performed to identify proxies by \ntesting for correlation between demographic information and attributes in any data used as part of system \ndesign, development, or use. If a proxy is identified, designers, developers, and deployers should remove the \nproxy; if needed, it may be possible to identify alternative attributes that can be used instead. At a minimum, \norganizations should ensure a proxy feature is not given undue weight and should monitor the system closely \nfor any resulting algorithmic discrimination. \n26\nAlgorithmic \nDiscrimination \nProtections \n']","Equity assessments and mitigation steps prevent algorithmic bias by conducting proactive equity assessments in the design phase to review potential input data, associated historical context, accessibility for people with disabilities, and societal goals to identify potential discrimination and effects on equity. Additionally, when a disparity assessment identifies a disparity against an assessed group, steps may be taken to mitigate or eliminate the disparity. This includes evaluating multiple models to select the one with the least adverse impact, modifying data input choices, or identifying a system with fewer disparities. If adequate mitigation is not possible, the use of the automated system should be reconsidered.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 26, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}, {'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 25, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do time-critical systems affect human fallback timing?,"["" \n \n \n \n \n \n \nHUMAN ALTERNATIVES, \nCONSIDERATION, AND \nFALLBACK \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nEquitable. Consideration should be given to ensuring outcomes of the fallback and escalation system are \nequitable when compared to those of the automated system and such that the fallback and escalation \nsystem provides equitable access to underserved communities.105 \nTimely. Human consideration and fallback are only useful if they are conducted and concluded in a \ntimely manner. The determination of what is timely should be made relative to the specific automated \nsystem, and the review system should be staffed and regularly assessed to ensure it is providing timely \nconsideration and fallback. In time-critical systems, this mechanism should be immediately available or, \nwhere possible, available before the harm occurs. Time-critical systems include, but are not limited to, \nvoting-related systems, automated building access and other access systems, systems that form a critical \ncomponent of healthcare, and systems that have the ability to withhold wages or otherwise cause \nimmediate financial penalties. \nEffective. The organizational structure surrounding processes for consideration and fallback should \nbe designed so that if the human decision-maker charged with reassessing a decision determines that it \nshould be overruled, the new decision will be effectively enacted. This includes ensuring that the new \ndecision is entered into the automated system throughout its components, any previous repercussions from \nthe old decision are also overturned, and safeguards are put in place to help ensure that future decisions do \nnot result in the same errors. \nMaintained. The human consideration and fallback process and any associated automated processes \nshould be maintained and supported as long as the relevant automated system continues to be in use. \nInstitute training, assessment, and oversight to combat automation bias and ensure any \nhuman-based components of a system are effective. \nTraining and assessment. Anyone administering, interacting with, or interpreting the outputs of an auto\xad\nmated system should receive training in that system, including how to properly interpret outputs of a system \nin light of its intended purpose and in how to mitigate the effects of automation bias. The training should reoc\xad\ncur regularly to ensure it is up to date with the system and to ensure the system is used appropriately. Assess\xad\nment should be ongoing to ensure that the use of the system with human involvement provides for appropri\xad\nate results, i.e., that the involvement of people does not invalidate the system's assessment as safe and effective \nor lead to algorithmic discrimination. \nOversight. Human-based systems have the potential for bias, including automation bias, as well as other \nconcerns that may limit their effectiveness. The results of assessments of the efficacy and potential bias of \nsuch human-based systems should be overseen by governance structures that have the potential to update the \noperation of the human-based system in order to mitigate these effects. \n50\n""]","In time-critical systems, the human consideration and fallback mechanism should be immediately available or, where possible, available before the harm occurs. Time-critical systems include, but are not limited to, voting-related systems, automated building access and other access systems, systems that form a critical component of healthcare, and systems that have the ability to withhold wages or otherwise cause immediate financial penalties.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 49, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How do hiring algorithms and social media data impact democracy?,"[' \nSECTION TITLE\xad\nFOREWORD\nAmong the great challenges posed to democracy today is the use of technology, data, and automated systems in \nways that threaten the rights of the American public. Too often, these tools are used to limit our opportunities and \nprevent our access to critical resources or services. These problems are well documented. In America and around \nthe world, systems supposed to help with patient care have proven unsafe, ineffective, or biased. Algorithms used \nin hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed \nnew harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s \nopportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or \nconsent. \nThese outcomes are deeply harmfulā€”but they are not inevitable. Automated systems have brought about extraor-\ndinary benefits, from technology that helps farmers grow food more efficiently and computers that predict storm \npaths, to algorithms that can identify diseases in patients. These tools now drive important decisions across \nsectors, while data is helping to revolutionize global industries. Fueled by the power of American innovation, \nthese tools hold the potential to redefine every part of our society and make life better for everyone. \nThis important progress must not come at the price of civil rights or democratic values, foundational American \nprinciples that President Biden has affirmed as a cornerstone of his Administration. On his first day in office, the \nPresident ordered the full Federal government to work to root out inequity, embed fairness in decision-\nmaking processes, and affirmatively advance civil rights, equal opportunity, and racial justice in America.1 The \nPresident has spoken forcefully about the urgent challenges posed to democracy today and has regularly called \non people of conscience to act to preserve civil rightsā€”including the right to privacy, which he has called ā€œthe \nbasis for so many more rights that we have come to take for granted that are ingrained in the fabric of this \ncountry.ā€2\nTo advance President Bidenā€™s vision, the White House Office of Science and Technology Policy has identified \nfive principles that should guide the design, use, and deployment of automated systems to protect the American \npublic in the age of artificial intelligence. The Blueprint for an AI Bill of Rights is a guide for a society that \nprotects all people from these threatsā€”and uses technologies in ways that reinforce our highest values. \nResponding to the experiences of the American public, and informed by insights from researchers, \ntechnologists, advocates, journalists, and policymakers, this framework is accompanied by a technical \ncompanionā€”a handbook for anyone seeking to incorporate these protections into policy and practice, including \ndetailed steps toward actualizing these principles in the technological design process. These principles help \nprovide guidance whenever automated systems can meaningfully impact the publicā€™s rights, opportunities, \nor access to critical needs. \n3\n']","Algorithms used in hiring and credit decisions have been found to reflect and reproduce existing unwanted inequities or embed new harmful bias and discrimination. Unchecked social media data collection has been used to threaten peopleā€™s opportunities, undermine their privacy, or pervasively track their activityā€”often without their knowledge or consent.",reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 2, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +Who oversees assessment in automated systems?,"[' \n \n \n \n \n \n \n \n \n \n \n \nSAFE AND EFFECTIVE \nSYSTEMS \nWHAT SHOULD BE EXPECTED OF AUTOMATED SYSTEMS\nThe expectations for automated systems are meant to serve as a blueprint for the development of additional \ntechnical standards and practices that are tailored for particular sectors and contexts. \nOngoing monitoring. Automated systems should have ongoing monitoring procedures, including recalibra\xad\ntion procedures, in place to ensure that their performance does not fall below an acceptable level over time, \nbased on changing real-world conditions or deployment contexts, post-deployment modification, or unexpect\xad\ned conditions. This ongoing monitoring should include continuous evaluation of performance metrics and \nharm assessments, updates of any systems, and retraining of any machine learning models as necessary, as well \nas ensuring that fallback mechanisms are in place to allow reversion to a previously working system. Monitor\xad\ning should take into account the performance of both technical system components (the algorithm as well as \nany hardware components, data inputs, etc.) and human operators. It should include mechanisms for testing \nthe actual accuracy of any predictions or recommendations generated by a system, not just a human operatorā€™s \ndetermination of their accuracy. Ongoing monitoring procedures should include manual, human-led monitor\xad\ning as a check in the event there are shortcomings in automated monitoring systems. These monitoring proce\xad\ndures should be in place for the lifespan of the deployed automated system. \nClear organizational oversight. Entities responsible for the development or use of automated systems \nshould lay out clear governance structures and procedures. This includes clearly-stated governance proce\xad\ndures before deploying the system, as well as responsibility of specific individuals or entities to oversee ongoing \nassessment and mitigation. Organizational stakeholders including those with oversight of the business process \nor operation being automated, as well as other organizational divisions that may be affected due to the use of \nthe system, should be involved in establishing governance procedures. Responsibility should rest high enough \nin the organization that decisions about resources, mitigation, incident response, and potential rollback can be \nmade promptly, with sufficient weight given to risk mitigation objectives against competing concerns. Those \nholding this responsibility should be made aware of any use cases with the potential for meaningful impact on \npeopleā€™s rights, opportunities, or access as determined based on risk identification procedures. In some cases, \nit may be appropriate for an independent ethics review to be conducted before deployment. \nAvoid inappropriate, low-quality, or irrelevant data use and the compounded harm of its \nreuse \nRelevant and high-quality data. Data used as part of any automated systemā€™s creation, evaluation, or \ndeployment should be relevant, of high quality, and tailored to the task at hand. Relevancy should be \nestablished based on research-backed demonstration of the causal influence of the data to the specific use case \nor justified more generally based on a reasonable expectation of usefulness in the domain and/or for the \nsystem design or ongoing development. Relevance of data should not be established solely by appealing to \nits historical connection to the outcome. High quality and tailored data should be representative of the task at \nhand and errors from data entry or other sources should be measured and limited. Any data used as the target \nof a prediction process should receive particular attention to the quality and validity of the predicted outcome \nor label to ensure the goal of the automated system is appropriately identified and measured. Additionally, \njustification should be documented for each data attribute and source to explain why it is appropriate to use \nthat data to inform the results of the automated system and why such use will not violate any applicable laws. \nIn cases of high-dimensional and/or derived attributes, such justifications can be provided as overall \ndescriptions of the attribute generation process and appropriateness. \n19\n']",Entities responsible for the development or use of automated systems should lay out clear governance structures and procedures. This includes responsibility of specific individuals or entities to oversee ongoing assessment and mitigation.,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 18, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +How does diverse content in training materials impact societal AI?,"["" \n39 \nMS-3.3-004 \nProvide input for training materials about the capabilities and limitations of GAI \nsystems related to digital content transparency for AI Actors, other \nprofessionals, and the public about the societal impacts of AI and the role of \ndiverse and inclusive content generation. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-3.3-005 \nRecord and integrate structured feedback about content provenance from \noperators, users, and potentially impacted communities through the use of \nmethods such as user research studies, focus groups, or community forums. \nActively seek feedback on generated content quality and potential biases. \nAssess the general awareness among end users and impacted communities \nabout the availability of these feedback channels. \nHuman-AI Conļ¬guration; \nInformation Integrity; Harmful Bias \nand Homogenization \nAI Actor Tasks: AI Deployment, Aļ¬€ected Individuals and Communities, End-Users, Operation and Monitoring, TEVV \n \nMEASURE 4.2: Measurement results regarding AI system trustworthiness in deployment context(s) and across the AI lifecycle are \ninformed by input from domain experts and relevant AI Actors to validate whether the system is performing consistently as \nintended. Results are documented. \nAction ID \nSuggested Action \nGAI Risks \nMS-4.2-001 \nConduct adversarial testing at a regular cadence to map and measure GAI risks, \nincluding tests to address attempts to deceive or manipulate the application of \nprovenance techniques or other misuses. Identify vulnerabilities and \nunderstand potential misuse scenarios and unintended outputs. \nInformation Integrity; Information \nSecurity \nMS-4.2-002 \nEvaluate GAI system performance in real-world scenarios to observe its \nbehavior in practical environments and reveal issues that might not surface in \ncontrolled and optimized testing environments. \nHuman-AI Conļ¬guration; \nConfabulation; Information \nSecurity \nMS-4.2-003 \nImplement interpretability and explainability methods to evaluate GAI system \ndecisions and verify alignment with intended purpose. \nInformation Integrity; Harmful Bias \nand Homogenization \nMS-4.2-004 \nMonitor and document instances where human operators or other systems \noverride the GAI's decisions. Evaluate these cases to understand if the overrides \nare linked to issues related to content provenance. \nInformation Integrity \nMS-4.2-005 \nVerify and document the incorporation of results of structured public feedback \nexercises into design, implementation, deployment approval (ā€œgoā€/ā€œno-goā€ \ndecisions), monitoring, and decommission decisions. \nHuman-AI Conļ¬guration; \nInformation Security \nAI Actor Tasks: AI Deployment, Domain Experts, End-Users, Operation and Monitoring, TEVV \n \n""]",The answer to given question is not present in context,reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 42, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +"Which methods use expert feedback, group input, or anonymous surveys?","[' \n50 \nParticipatory Engagement Methods \nOn an ad hoc or more structured basis, organizations can design and use a variety of channels to engage \nexternal stakeholders in product development or review. Focus groups with select experts can provide \nfeedback on a range of issues. Small user studies can provide feedback from representative groups or \npopulations. Anonymous surveys can be used to poll or gauge reactions to speciļ¬c features. Participatory \nengagement methods are often less structured than ļ¬eld testing or red teaming, and are more \ncommonly used in early stages of AI or product development. \nField Testing \nField testing involves structured settings to evaluate risks and impacts and to simulate the conditions \nunder which the GAI system will be deployed. Field style tests can be adapted from a focus on user \npreferences and experiences towards AI risks and impacts ā€“ both negative and positive. When carried \nout with large groups of users, these tests can provide estimations of the likelihood of risks and impacts \nin real world interactions. \nOrganizations may also collect feedback on outcomes, harms, and user experience directly from users in \nthe production environment after a model has been released, in accordance with human subject \nstandards such as informed consent and compensation. Organizations should follow applicable human \nsubjects research requirements, and best practices such as informed consent and subject compensation, \nwhen implementing feedback activities. \nAI Red-teaming \nAI red-teaming is an evolving practice that references exercises often conducted in a controlled \nenvironment and in collaboration with AI developers building AI models to identify potential adverse \nbehavior or outcomes of a GAI model or system, how they could occur, and stress test safeguardsā€. AI \nred-teaming can be performed before or after AI models or systems are made available to the broader \npublic; this section focuses on red-teaming in pre-deployment contexts. \nThe quality of AI red-teaming outputs is related to the background and expertise of the AI red team \nitself. Demographically and interdisciplinarily diverse AI red teams can be used to identify ļ¬‚aws in the \nvarying contexts where GAI will be used. For best results, AI red teams should demonstrate domain \nexpertise, and awareness of socio-cultural aspects within the deployment context. AI red-teaming results \nshould be given additional analysis before they are incorporated into organizational governance and \ndecision making, policy and procedural updates, and AI risk management eļ¬€orts. \nVarious types of AI red-teaming may be appropriate, depending on the use case: \nā€¢ \nGeneral Public: Performed by general users (not necessarily AI or technical experts) who are \nexpected to use the model or interact with its outputs, and who bring their own lived \nexperiences and perspectives to the task of AI red-teaming. These individuals may have been \nprovided instructions and material to complete tasks which may elicit harmful model behaviors. \nThis type of exercise can be more eļ¬€ective with large groups of AI red-teamers. \nā€¢ \nExpert: Performed by specialists with expertise in the domain or speciļ¬c AI red-teaming context \nof use (e.g., medicine, biotech, cybersecurity). \nā€¢ \nCombination: In scenarios when it is diļ¬ƒcult to identify and recruit specialists with suļ¬ƒcient \ndomain and contextual expertise, AI red-teaming exercises may leverage both expert and \n']","Participatory engagement methods use expert feedback, group input, or anonymous surveys.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 53, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +Which metrics show AI reliability and failure response?,"[' \n32 \nMEASURE 2.6: The AI system is evaluated regularly for safety risks ā€“ as identiļ¬ed in the MAP function. The AI system to be \ndeployed is demonstrated to be safe, its residual negative risk does not exceed the risk tolerance, and it can fail safely, particularly if \nmade to operate beyond its knowledge limits. Safety metrics reļ¬‚ect system reliability and robustness, real-time monitoring, and \nresponse times for AI system failures. \nAction ID \nSuggested Action \nGAI Risks \nMS-2.6-001 \nAssess adverse impacts, including health and wellbeing impacts for value chain \nor other AI Actors that are exposed to sexually explicit, oļ¬€ensive, or violent \ninformation during GAI training and maintenance. \nHuman-AI Conļ¬guration; Obscene, \nDegrading, and/or Abusive \nContent; Value Chain and \nComponent Integration; \nDangerous, Violent, or Hateful \nContent \nMS-2.6-002 \nAssess existence or levels of harmful bias, intellectual property infringement, \ndata privacy violations, obscenity, extremism, violence, or CBRN information in \nsystem training data. \nData Privacy; Intellectual Property; \nObscene, Degrading, and/or \nAbusive Content; Harmful Bias and \nHomogenization; Dangerous, \nViolent, or Hateful Content; CBRN \nInformation or Capabilities \nMS-2.6-003 Re-evaluate safety features of ļ¬ne-tuned models when the negative risk exceeds \norganizational risk tolerance. \nDangerous, Violent, or Hateful \nContent \nMS-2.6-004 Review GAI system outputs for validity and safety: Review generated code to \nassess risks that may arise from unreliable downstream decision-making. \nValue Chain and Component \nIntegration; Dangerous, Violent, or \nHateful Content \nMS-2.6-005 \nVerify that GAI system architecture can monitor outputs and performance, and \nhandle, recover from, and repair errors when security anomalies, threats and \nimpacts are detected. \nConfabulation; Information \nIntegrity; Information Security \nMS-2.6-006 \nVerify that systems properly handle queries that may give rise to inappropriate, \nmalicious, or illegal usage, including facilitating manipulation, extortion, targeted \nimpersonation, cyber-attacks, and weapons creation. \nCBRN Information or Capabilities; \nInformation Security \nMS-2.6-007 Regularly evaluate GAI system vulnerabilities to possible circumvention of safety \nmeasures. \nCBRN Information or Capabilities; \nInformation Security \nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, Operation and Monitoring, TEVV \n \n']","Safety metrics reflect system reliability and robustness, real-time monitoring, and response times for AI system failures.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 35, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True +How does watch list opacity impact error correction and public understanding?,"[' \n \n \n \n \nNOTICE & \nEXPLANATION \nWHY THIS PRINCIPLE IS IMPORTANT\nThis section provides a brief summary of the problems which the principle seeks to address and protect \nagainst, including illustrative examples. \nā€¢\nA predictive policing system claimed to identify individuals at greatest risk to commit or become the victim of\ngun violence (based on automated analysis of social ties to gang members, criminal histories, previous experi\xad\nences of gun violence, and other factors) and led to individuals being placed on a watch list with no\nexplanation or public transparency regarding how the system came to its conclusions.85 Both police and\nthe public deserve to understand why and how such a system is making these determinations.\nā€¢\nA system awarding benefits changed its criteria invisibly. Individuals were denied benefits due to data entry\nerrors and other system flaws. These flaws were only revealed when an explanation of the system\nwas demanded and produced.86 The lack of an explanation made it harder for errors to be corrected in a\ntimely manner.\n42\n']",Watch list opacity impacts error correction and public understanding by making it difficult for both police and the public to understand why and how the system is making determinations. This lack of transparency can prevent errors from being corrected in a timely manner.,reasoning,"[{'source': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'file_path': 'https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf', 'page': 41, 'total_pages': 73, 'format': 'PDF 1.6', 'title': 'Blueprint for an AI Bill of Rights', 'author': '', 'subject': '', 'keywords': '', 'creator': 'Adobe Illustrator 26.3 (Macintosh)', 'producer': 'iLovePDF', 'creationDate': ""D:20220920133035-04'00'"", 'modDate': ""D:20221003104118-04'00'"", 'trapped': ''}]",True +"How do policies manage third-party AI risks, IP, and data privacy?","[' \n20 \nGV-4.3-003 \nVerify information sharing and feedback mechanisms among individuals and \norganizations regarding any negative impact from GAI systems. \nInformation Integrity; Data \nPrivacy \nAI Actor Tasks: AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 5.1: Organizational policies and practices are in place to collect, consider, prioritize, and integrate feedback from those \nexternal to the team that developed or deployed the AI system regarding the potential individual and societal impacts related to AI \nrisks. \nAction ID \nSuggested Action \nGAI Risks \nGV-5.1-001 \nAllocate time and resources for outreach, feedback, and recourse processes in GAI \nsystem development. \nHuman-AI Conļ¬guration; Harmful \nBias and Homogenization \nGV-5.1-002 \nDocument interactions with GAI systems to users prior to interactive activities, \nparticularly in contexts involving more signiļ¬cant risks. \nHuman-AI Conļ¬guration; \nConfabulation \nAI Actor Tasks: AI Design, AI Impact Assessment, Aļ¬€ected Individuals and Communities, Governance and Oversight \n \nGOVERN 6.1: Policies and procedures are in place that address AI risks associated with third-party entities, including risks of \ninfringement of a third-partyā€™s intellectual property or other rights. \nAction ID \nSuggested Action \nGAI Risks \nGV-6.1-001 Categorize diļ¬€erent types of GAI content with associated third-party rights (e.g., \ncopyright, intellectual property, data privacy). \nData Privacy; Intellectual \nProperty; Value Chain and \nComponent Integration \nGV-6.1-002 Conduct joint educational activities and events in collaboration with third parties \nto promote best practices for managing GAI risks. \nValue Chain and Component \nIntegration \nGV-6.1-003 \nDevelop and validate approaches for measuring the success of content \nprovenance management eļ¬€orts with third parties (e.g., incidents detected and \nresponse times). \nInformation Integrity; Value Chain \nand Component Integration \nGV-6.1-004 \nDraft and maintain well-deļ¬ned contracts and service level agreements (SLAs) \nthat specify content ownership, usage rights, quality standards, security \nrequirements, and content provenance expectations for GAI systems. \nInformation Integrity; Information \nSecurity; Intellectual Property \n']","Policies manage third-party AI risks, IP, and data privacy by categorizing different types of GAI content with associated third-party rights (e.g., copyright, intellectual property, data privacy), conducting joint educational activities and events in collaboration with third parties to promote best practices for managing GAI risks, developing and validating approaches for measuring the success of content provenance management efforts with third parties (e.g., incidents detected and response times), and drafting and maintaining well-defined contracts and service level agreements (SLAs) that specify content ownership, usage rights, quality standards, security requirements, and content provenance expectations for GAI systems.",reasoning,"[{'source': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'file_path': 'https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf', 'page': 23, 'total_pages': 64, 'format': 'PDF 1.6', 'title': 'Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile', 'author': 'National Institute of Standards and Technology', 'subject': '', 'keywords': '', 'creator': 'Acrobat PDFMaker 24 for Word', 'producer': 'Adobe PDF Library 24.2.159', 'creationDate': ""D:20240805141702-04'00'"", 'modDate': ""D:20240805143048-04'00'"", 'trapped': ''}]",True diff --git a/Tasks/Task 5/task5-assessing-performance.ipynb b/Tasks/Task 5/task5-assessing-performance.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2af74daa05d345469fcc6fbbf60a7630686aa1f7 --- /dev/null +++ b/Tasks/Task 5/task5-assessing-performance.ipynb @@ -0,0 +1,709 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -qU langsmith langchain-core langchain-community langchain-openai langchain-qdrant langchain_experimental pymupdf ragas" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "from uuid import uuid4\n", + "\n", + "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", + "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"LangChain API Key:\")\n", + "\n", + "os.environ[\"LANGCHAIN_PROJECT\"] = \"AIM-SDG-MidTerm - AI Safety\"\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", + "\n", + "os.environ[\"QDRANT_API_KEY\"] = getpass.getpass(\"Enter Your Qdrant API Key: \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Synthetic data generation using Ragas framework" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will generate set of synthetic data for evaluating different opetions\n", + "1. Evaluating Embedding model\n", + "2. Evaluating Chunking Strategies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_experimental.text_splitter import SemanticChunker\n", + "\n", + "from enum import Enum\n", + "from typing import List\n", + "from langchain_community.document_loaders import PyMuPDFLoader\n", + "from langchain_core.documents import Document\n", + "import asyncio\n", + "\n", + "class PDFLoaderWrapper():\n", + " class LoaderType(str, Enum):\n", + " PYMUPDF = \"pymupdf\"\n", + "\n", + " def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n", + " self.file_path = file_path if isinstance(file_path, list) else [file_path]\n", + " self.loader_type = loader_type\n", + "\n", + " async def aload(self) -> List[Document]:\n", + " all_docs = []\n", + " for file_path in self.file_path:\n", + " if self.loader_type == self.LoaderType.PYMUPDF:\n", + " try:\n", + " loader = PyMuPDFLoader(file_path)\n", + " docs = await loader.aload()\n", + " all_docs.extend(docs)\n", + " except Exception as e:\n", + " print(f\"Error loading file {file_path}: {e}\")\n", + " continue\n", + " return all_docs\n", + "\n", + "BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n", + "NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n", + "documents = [\n", + " BOR_FILE_PATH,\n", + " NIST_FILE_PATH\n", + "]\n", + "\n", + "pdf_loader = PDFLoaderWrapper(\n", + " documents, PDFLoaderWrapper.LoaderType.PYMUPDF\n", + ")\n", + "documents = await pdf_loader.aload()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from ragas.testset.generator import TestsetGenerator\n", + "from ragas.testset.evolutions import simple, reasoning, multi_context\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", + "from ragas.testset.docstore import Document, DocumentStore,InMemoryDocumentStore\n", + "from langchain_experimental.text_splitter import SemanticChunker,RecursiveCharacterTextSplitter\n", + "from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline\n", + "from ragas.testset.extractor import KeyphraseExtractor\n", + "\n", + "print (\"Packages import complete\")\n", + "print (\"Getting the Embedding model from Huggingface\")\n", + "# Using best performing embedding model from hugging face to generate quality dataset.\n", + "# Need GPU\n", + "model_name = \"Snowflake/snowflake-arctic-embed-l\"\n", + "embedding_model = HuggingFaceEmbeddings(model_name=model_name)\n", + "print (\"Embedding model loaded\")\n", + "\n", + "print (\"Splitting the documents into semantic chunks\")\n", + "text_splitter = RecursiveCharacterTextSplitter(\n", + " chunk_size = 1024,\n", + " chunk_overlap = 100,\n", + " length_function = len,\n", + ")\n", + "chunked_docs = text_splitter.split_documents(documents)\n", + "\n", + "print (\"Creating the document store for ragas and loading LLM models\")\n", + "generator_llm = ChatOpenAI(model=\"gpt-4o\")\n", + "critic_llm = ChatOpenAI(model=\"gpt-4o\")\n", + "\n", + "keyphrase_extractor = KeyphraseExtractor(llm=generator_llm)\n", + "docstore = InMemoryDocumentStore(splitter=text_splitter,extractor=keyphrase_extractor, embeddings=embedding_model)\n", + "\n", + "print (\"Creating the testset generator\")\n", + "generator = TestsetGenerator.from_langchain( # Default uses TokenTextSplitter\n", + " generator_llm=generator_llm,\n", + " critic_llm=critic_llm,\n", + " embeddings=embedding_model,\n", + " docstore=docstore # Document store uses SemenaticChunker\n", + ")\n", + "\n", + "distributions = {\n", + " simple: 0.5,\n", + " multi_context: 0.3,\n", + " reasoning: 0.2\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_size = 50\n", + "\n", + "testset = generator.generate_with_langchain_docs(\n", + " documents, \n", + " test_size, \n", + " distributions, \n", + " with_debugging_logs=True\n", + ") # Default RunConfig(max_retries=15, max_wait=90)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testset_df = testset.to_pandas()\n", + "testset_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testset_df.to_csv('task5-ai-safety-sdg.csv', index=False)\n", + "test_questions = testset_df[\"question\"].values.tolist()\n", + "test_groundtruths = testset_df[\"ground_truth\"].values.tolist()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Rag chain to generate answers for above questions in the dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> Note that we are usig Qdrant cloud where the pdf document is processed and saved for us to consume. For the RAG pipeline we use the same embedding model originally used to populate the Qdrant vectorstore." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_qdrant import QdrantVectorStore\n", + "from langchain_core.documents import Document\n", + "from qdrant_client import QdrantClient\n", + "from qdrant_client.http.models import Distance, VectorParams\n", + "\n", + "dimension = 1024\n", + "collection_name = \"ai-safety-sr-arctic-embed-l-recursive\"\n", + "qdrant_server = \"https://500cb0e8-ea08-4662-b4f2-3eca11e635da.europe-west3-0.gcp.cloud.qdrant.io:6333\"\n", + "qdrant_client = QdrantClient(url=qdrant_server,api_key=os.environ[\"QDRANT_API_KEY\"])\n", + "qdrant_client.create_collection(\n", + " collection_name=collection_name,\n", + " vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n", + ")\n", + "\n", + "vector_store = QdrantVectorStore(\n", + " client=qdrant_client,\n", + " collection_name=collection_name,\n", + " embedding=embedding_model,\n", + ")\n", + "\n", + "retriever = vector_store.as_retriever()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "\n", + "RAG_PROMPT = \"\"\"\\\n", + "Given a provided context and question, you must answer the question based only on context.\n", + "\n", + "If you cannot answer the question based on the context - you must say \"I don't know\".\n", + "\n", + "Context: {context}\n", + "Question: {question}\n", + "\"\"\"\n", + "\n", + "rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import ChatOpenAI\n", + "\n", + "# Using the same model used in the app.\n", + "chat_model_name = \"gpt-4o\"\n", + "llm = ChatOpenAI(model=chat_model_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from operator import itemgetter\n", + "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", + "from langchain.schema import StrOutputParser\n", + "\n", + "rag_chain_generate_anwsers = (\n", + " {\"context\": itemgetter(\"question\") | retriever, \"question\": itemgetter(\"question\")}\n", + " | rag_prompt | llm | StrOutputParser()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rag_chain_generate_anwsers.invoke({\"question\" : \"What steps can organizations take to minimize bias in AI models?\"})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create Rag Chain with config " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are going to replicate the exact implementation used in the hosted RAG app but with different configuration to evaluate and compare." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Utility function to create a rag chain with config\n", + "from langchain_experimental.text_splitter import SemanticChunker\n", + "from enum import Enum\n", + "from typing import List\n", + "from langchain_community.document_loaders import PyMuPDFLoader\n", + "from langchain_core.documents import Document\n", + "import asyncio\n", + "from langchain_qdrant import QdrantVectorStore\n", + "from langchain_core.documents import Document\n", + "from qdrant_client import QdrantClient\n", + "from qdrant_client.http.models import Distance, VectorParams\n", + "from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n", + "from langchain.retrievers.document_compressors import LLMChainExtractor\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from operator import itemgetter\n", + "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", + "from langchain.schema import StrOutputParser\n", + "from langchain.chains.combine_documents import create_stuff_documents_chain\n", + "from langchain.prompts import MessagesPlaceholder\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain.chains.history_aware_retriever import create_history_aware_retriever\n", + "from langchain.chains.retrieval import create_retrieval_chain\n", + "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "from langchain_core.chat_history import BaseChatMessageHistory\n", + "from langchain_community.chat_message_histories import ChatMessageHistory\n", + "\n", + "BOR_FILE_PATH = \"https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf\"\n", + "NIST_FILE_PATH = \"https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf\"\n", + "documents_to_preload = [\n", + " BOR_FILE_PATH,\n", + " NIST_FILE_PATH\n", + "]\n", + "store = {}\n", + "\n", + "class PDFLoaderWrapper():\n", + " class LoaderType(str, Enum):\n", + " PYMUPDF = \"pymupdf\"\n", + "\n", + " def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF):\n", + " self.file_path = file_path if isinstance(file_path, list) else [file_path]\n", + " self.loader_type = loader_type\n", + "\n", + " async def aload(self) -> List[Document]:\n", + " all_docs = []\n", + " for file_path in self.file_path:\n", + " if self.loader_type == self.LoaderType.PYMUPDF:\n", + " try:\n", + " loader = PyMuPDFLoader(file_path)\n", + " docs = await loader.aload()\n", + " all_docs.extend(docs)\n", + " except Exception as e:\n", + " print(f\"Error loading file {file_path}: {e}\")\n", + " continue\n", + " return all_docs\n", + "\n", + "async def get_contextual_compressed_retriever(retriver):\n", + "\n", + " base_retriever = retriver\n", + " compressor_llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\", max_tokens=1500)\n", + " compressor = LLMChainExtractor.from_llm(compressor_llm)\n", + "\n", + " #Combine the retriever with the compressor\n", + " compression_retriever = ContextualCompressionRetriever(\n", + " base_compressor=compressor,\n", + " base_retriever=base_retriever\n", + " )\n", + " return compression_retriever\n", + "\n", + "def create_history_aware_retriever_self(chat_model, retriever):\n", + " contextualize_q_system_prompt = (\n", + " \"Given a chat history and the latest user question which might reference context in the chat history, \"\n", + " \"formulate a standalone question which can be understood without the chat history. Do NOT answer the question, \"\n", + " \"just reformulate it if needed and otherwise return it as is.\"\n", + " )\n", + " contextualize_q_prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", contextualize_q_system_prompt),\n", + " MessagesPlaceholder(\"chat_history\"),\n", + " (\"human\", \"{input}\"),\n", + " ]\n", + " )\n", + " return create_history_aware_retriever(chat_model, retriever, contextualize_q_prompt)\n", + "\n", + "def create_qa_chain(chat_model):\n", + " qa_system_prompt = (\n", + " \"You are an helpful assistant named 'Shield' and your task is to answer any questions related to AI Safety for the given context.\"\n", + " \"Use the following pieces of retrieved context to answer the question.\"\n", + " # \"If any questions asked outside AI Safety context, just say that you are a specialist in AI Safety and can't answer that.\"\n", + " # f\"When introducing you, just say that you are an AI assistant powered by embedding model {embedding_model_name} and chat model {chat_model_name} and your knowledge is limited to 'Blueprint for an AI Bill of Rights' and 'NIST AI Standards' documents.\"\n", + " \"If you don't know the answer, just say that you don't know.\\n\\n\"\n", + " \"{context}\"\n", + " )\n", + " qa_prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", qa_system_prompt),\n", + " MessagesPlaceholder(\"chat_history\"),\n", + " (\"human\", \"{input}\"),\n", + " ]\n", + " )\n", + " return create_stuff_documents_chain(chat_model, qa_prompt)\n", + "\n", + "def create_conversational_rag_chain(chat_model, retriever):\n", + " history_aware_retriever = create_history_aware_retriever_self(chat_model, retriever)\n", + " question_answer_chain = create_qa_chain(chat_model)\n", + " return create_retrieval_chain(history_aware_retriever, question_answer_chain)\n", + "\n", + "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n", + " if session_id not in store:\n", + " store[session_id] = ChatMessageHistory()\n", + " return store[session_id]\n", + "\n", + "\n", + "pdf_loader = PDFLoaderWrapper(\n", + " documents_to_preload, PDFLoaderWrapper.LoaderType.PYMUPDF\n", + ")\n", + "documents = await pdf_loader.aload()\n", + "\n", + "async def create_history_aware_rag_chain_with_params(huggingface_embedding,text_splitter,collection_name,compress:bool=False,conversational:bool =False):\n", + "\n", + " chunked_docs = text_splitter.split_documents(documents)\n", + " dimension = 1024\n", + " qdrant_server = os.environ[\"QDRANT_URL\"]\n", + " qdrant_client = QdrantClient(url=qdrant_server,api_key=os.environ[\"QDRANT_API_KEY\"])\n", + "\n", + " # Below fails if collection already exists so make sure to delete the collection first\n", + " qdrant_client.create_collection(\n", + " collection_name=collection_name,\n", + " vectors_config=VectorParams(size=dimension, distance=Distance.COSINE),\n", + " )\n", + "\n", + " vector_store = QdrantVectorStore(\n", + " client=qdrant_client,\n", + " collection_name=collection_name,\n", + " embedding=huggingface_embedding,\n", + " )\n", + " vector_store.add_documents(chunked_docs)\n", + "\n", + " retriever = vector_store.as_retriever(search_type=\"similarity_score_threshold\",\n", + " search_kwargs={'k':10,'score_threshold': 0.8})\n", + " \n", + " # Using the same model used in the app.\n", + " chat_model_name = \"gpt-4o\"\n", + " llm = ChatOpenAI(model=chat_model_name,temperature=0) \n", + "\n", + " if compress:\n", + " contextual_compressed_retriever = await get_contextual_compressed_retriever(retriever)\n", + " \n", + " if conversational:\n", + " history_ai_safety_rag_chain = create_conversational_rag_chain(\n", + " llm, \n", + " contextual_compressed_retriever if contextual_compressed_retriever else retriever)\n", + "\n", + " conversational_rag_chain = RunnableWithMessageHistory(\n", + " history_ai_safety_rag_chain,\n", + " get_session_history,\n", + " input_messages_key=\"input\",\n", + " history_messages_key=\"chat_history\",\n", + " output_messages_key=\"answer\",\n", + " )\n", + " else:\n", + " RAG_PROMPT = \"\"\"\\\n", + " Given a provided context and question, you must answer the question based only on context.\n", + "\n", + " If you cannot answer the question based on the context - you must say \"I don't know\".\n", + "\n", + " Context: {context}\n", + " Question: {question}\n", + " \"\"\"\n", + "\n", + " rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)\n", + " ai_safety_rag_chain = (\n", + " {\"context\": itemgetter(\"question\") | retriever, \"question\": itemgetter(\"input\")}\n", + " | rag_prompt | llm | StrOutputParser()\n", + " )\n", + "\n", + " ret = contextual_compressed_retriever if contextual_compressed_retriever else retriever\n", + " chain = conversational_rag_chain if conversational_rag_chain else ai_safety_rag_chain\n", + " return chain, ret\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RAGAS Evaluation for Embedding Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "\n", + "# Load the embedding models\n", + "base_embedding_model = HuggingFaceEmbeddings(model_name=\"Snowflake/snowflake-arctic-embed-l\")\n", + "fine_tuned_embedding_model = HuggingFaceEmbeddings(model_name=\"\")\n", + "\n", + "# Common splitter to keep variables minimum\n", + "recursive_text_splitter = RecursiveCharacterTextSplitter(\n", + " chunk_size = 1024,\n", + " chunk_overlap = 100,\n", + " length_function = len,\n", + ")\n", + "recursive_chunked_docs = recursive_text_splitter.split_documents(documents)\n", + "\n", + "# Create two rag chaings with different embeddings\n", + "rag_chain_base, retriever_base = await create_history_aware_rag_chain_with_params(\n", + " base_embedding_model,\n", + " recursive_text_splitter,\n", + " \"aichatty-snowflake-arctic-embed-l-recursive-base\"\n", + ")\n", + "\n", + "rag_chain_fine_tuned, retriever_fine_tuned = await create_history_aware_rag_chain_with_params(\n", + " fine_tuned_embedding_model,\n", + " recursive_text_splitter,\n", + " \"aichatty-snowflake-arctic-embed-l-recursive-ft\"\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First Base model chain\n", + "\n", + "from datasets import Dataset\n", + "import time\n", + "import uuid\n", + "from ragas import evaluate\n", + "from ragas.metrics import (\n", + " faithfulness,\n", + " answer_relevancy,\n", + " answer_correctness,\n", + " context_recall,\n", + " context_precision,\n", + ")\n", + "\n", + "answers = []\n", + "contexts = []\n", + "\n", + "for question in test_questions:\n", + " store = {}\n", + " session_id = str(uuid.uuid4())\n", + "\n", + " response = rag_chain_base.invoke({\"input\" : question}, config={\"configurable\": {\"session_id\": session_id}})\n", + " answers.append(response[\"answer\"])\n", + " contexts.append([context.page_content for context in response[\"context\"]])\n", + "\n", + "base_chain_response_dataset = Dataset.from_dict({\n", + " \"question\" : test_questions,\n", + " \"answer\" : answers,\n", + " \"contexts\" : contexts,\n", + " \"ground_truth\" : test_groundtruths\n", + "})\n", + "\n", + "metrics = [\n", + " faithfulness,\n", + " answer_relevancy,\n", + " context_recall,\n", + " context_precision,\n", + " answer_correctness,\n", + "]\n", + "base_chain_eval_results = evaluate(base_chain_response_dataset, metrics)\n", + "\n", + "base_chain_eval_results_df = base_chain_eval_results.to_pandas()\n", + "base_chain_eval_results_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# For Fine Tuned model chain\n", + "\n", + "answers = []\n", + "contexts = []\n", + "\n", + "for question in test_questions:\n", + " store = {}\n", + " session_id = str(uuid.uuid4())\n", + "\n", + " response = rag_chain_fine_tuned.invoke({\"input\" : question}, config={\"configurable\": {\"session_id\": session_id}})\n", + " answers.append(response[\"answer\"])\n", + " contexts.append([context.page_content for context in response[\"context\"]])\n", + "\n", + "ft_chain_response_dataset = Dataset.from_dict({\n", + " \"question\" : test_questions,\n", + " \"answer\" : answers,\n", + " \"contexts\" : contexts,\n", + " \"ground_truth\" : test_groundtruths\n", + "})\n", + "\n", + "ft_chain_eval_results = evaluate(ft_chain_response_dataset, metrics)\n", + "\n", + "ft_chain_eval_results_df = ft_chain_eval_results.to_pandas()\n", + "ft_chain_eval_results_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Load your dataframes here\n", + "\n", + "# Merge on the 'question' column or another common identifier\n", + "merged_df = pd.merge(base_chain_eval_results_df, ft_chain_eval_results_df, on='question', suffixes=('_base', '_finetuned'))\n", + "\n", + "# Now, let's calculate the improvement in relevant metrics, e.g., answer_correctness\n", + "merged_df['improvement_answer_correctness'] = merged_df['answer_correctness_finetuned'] - merged_df['answer_correctness_base']\n", + "\n", + "# Plotting the comparison of correctness between the two models\n", + "plt.figure(figsize=(10, 6))\n", + "plt.plot(merged_df['question'], merged_df['answer_correctness_base'], label='Base Model', marker='o')\n", + "plt.plot(merged_df['question'], merged_df['answer_correctness_finetuned'], label='Fine-tuned Model', marker='x')\n", + "plt.xlabel('Questions')\n", + "plt.ylabel('Answer Correctness')\n", + "plt.title('Comparison of Base Model and Fine-tuned Model on Answer Correctness')\n", + "plt.xticks(rotation=90)\n", + "plt.legend()\n", + "plt.tight_layout()\n", + "plt.show()\n", + "\n", + "# Plotting the improvement\n", + "plt.figure(figsize=(10, 6))\n", + "plt.bar(merged_df['question'], merged_df['improvement_answer_correctness'], color='green')\n", + "plt.xlabel('Questions')\n", + "plt.ylabel('Improvement in Answer Correctness')\n", + "plt.title('Improvement in Answer Correctness after Fine-tuning')\n", + "plt.xticks(rotation=90)\n", + "plt.tight_layout()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RAGAS Evaluation for chunking strategy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "\n", + "# Using fine_tuned_embedding_model from before\n", + "\n", + "# First Splitter RecursiveCharacterTextSplitter\n", + "recursive_text_splitter = RecursiveCharacterTextSplitter(\n", + " chunk_size = 1024,\n", + " chunk_overlap = 100,\n", + " length_function = len,\n", + ")\n", + "recursive_chunked_docs = recursive_text_splitter.split_documents(documents)\n", + "\n", + "# Create two rag chaings with different embeddings\n", + "conversational_rag_chain_base, contextual_compressed_retriever_base, retriever_base = await create_history_aware_rag_chain_with_params(\n", + " base_embedding_model,recursive_text_splitter,\"aichatty-snowflake-arctic-embed-l-recursive-base\"\n", + ")\n", + "\n", + "conversational_rag_chain_fine_tuned, contextual_compressed_retriever_fine_tuned, retriever_fine_tuned = await create_history_aware_rag_chain_with_params(\n", + " fine_tuned_embedding_model,recursive_text_splitter,\"aichatty-snowflake-arctic-embed-l-recursive-ft\"\n", + ")\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/Task 5/task5-table-improvement.png b/Tasks/Task 5/task5-table-improvement.png new file mode 100644 index 0000000000000000000000000000000000000000..1a5daf8b7bb8916d54f8a40fc9cf3dc285a8f3eb Binary files /dev/null and b/Tasks/Task 5/task5-table-improvement.png differ diff --git a/Tasks/Task 6/task6.md b/Tasks/Task 6/task6.md new file mode 100644 index 0000000000000000000000000000000000000000..9fdf540f408a8e9466396656e4479d088b858774 --- /dev/null +++ b/Tasks/Task 6/task6.md @@ -0,0 +1,25 @@ +### 1. **What is the story that you will give to the CEO to tell the whole company at the launch next month?** + +The story for the CEO should emphasize how the company's investment in AI technology is yielding tangible results and preparing the organization for the future of AI governance. + + +**CEO Narrative:** + +"Over the past months, our dedicated team of engineers and AI specialists have worked on a groundbreaking Retrieval-Augmented Generation (RAG) application. Weā€™ve tested the system rigorously across 50 internal stakeholders, collecting feedback and refining the performance metrics based on cutting-edge frameworks like RAGAS. This initiative places us at the forefront of ethical AI adoption, ensuring our systems align with upcoming regulatory frameworks such as the 2024 NIST AI Risk Management Framework and the AI Bill of Rights. + +Weā€™ve created a scalable and adaptable AI system that not only answers complex questions related to AI safety but also ensures that our internal teams have the tools to address customer concerns, especially in today's rapidly changing AI landscape. By next month, weā€™ll be ready to roll out the solution across the enterprise, showing our commitment to innovation, security, and accountability." + + +### 2. **How might you incorporate relevant white-house briefing information into future versions?** + +In future versions of the RAG application, it is critical to keep it aligned with national regulations like the executive order on Safe, Secure, and Trustworthy AI. Here's how: + +- **Compliance & Transparency:** Ensure that future updates integrate guidelines from the White Houseā€™s executive orders on AI governance. This can be achieved by: + - **Embedding legal updates** into the knowledge base of the RAG system. This will allow the system to reflect the latest compliance requirements and help internal stakeholders understand how AI aligns with regulations. + - Implement an **alert system** within the RAG tool to notify users whenever regulatory changes impact AI practices. + +- **Periodic Review:** Set a quarterly update schedule where AI regulations, such as the 270-day update mentioned, are reviewed, and the RAG application is updated accordingly. This ensures that the company stays proactive regarding AI safety and governance. + +At the moment these guidelines are loaded into the system offline and in the future we will be rolling out features that keep the system constantly updated on any new regislation. + +This approach will position the company not only as an industry leader but also as a responsible organization that prioritizes safe and ethical AI use. \ No newline at end of file diff --git a/Tasks/deliverables.md b/Tasks/deliverables.md new file mode 100644 index 0000000000000000000000000000000000000000..9e96ea1fd427aeb01fc4290546a465331e05d59c --- /dev/null +++ b/Tasks/deliverables.md @@ -0,0 +1,153 @@ +## Task 1 - Dealing with the data + +***Deliverable 1*** Describe the default chunking strategy that you will use. + +We started of using `SemanticChunker` based on our understand, this would provide quality retrieval of the knowledge to enable LLM generate the response for a given question. + +***Deliverable 2*** Articulate a chunking strategy that you would also like to test out. + +I would like to explore `RecursiveCharacterTextSplitter` with some advanced RAG techniques like Contextual Compression or Parenet Document retriever. + +***Deliverable 3*** Describe how and why you made these decisions + +From the past experience and also in doing this mid term project, I understood one thing "Start with simple changes and iterate over period of time. Any small change in the RAG setup drasticly impact the performance and quality of the output." So wanted to be simple, smart and easy to change. + +> Since loading huge pdfs and populating vector store is time consuming I ended up in doing pre processing where the vector store is kept ready to be consumed. Refer the notebook in folder ![Task1](/ai-safety-chatty/Tasks/Task%201/) +> To decide on chunking strategy I worked on seperate notebook to try different option. Refer ![Task1](/ai-safety-chatty/Tasks/Task%201/settingup-vectorstore-chunking-strategy.ipynb) +> Refer the ![Task1-Detailed](/ai-safety-chatty/Tasks/Task%201/Task1.md) For detailed workout of this task. + +## Task 2 - Building a Quick End-to-End Prototype + +***Deliverable 1*** Build a prototype and deploy to a Hugging Face Space, and include the public URL link to your space create a short (< 2 min) loom video demonstrating some initial testing inputs and outputs. + +HF URL: +Loom Video: + +***Deliverable 2*** How did you choose your stack, and why did you select each tool the way you did? + +1. **Qdrant**: + - **Reason for Selection**: Provides efficient, scalable, and fast vector search for embedding retrieval, crucial for the RAG framework. + +2. **PyMuPDFLoader**: + - **Reason for Selection**: Lightweight and fast PDF parsing, ideal for loading structured and dense documents like the AI Bill of Rights. + +3. **RecursiveCharacterTextSplitter**: + - **Reason for Selection**: Allows flexible chunking while preserving the semantic context, improving retrieval precision. + +4. **SemanticChunker**: + - **Reason for Selection**: Enables semantically rich text chunking, leading to better coherence and improved retrieval results. + +5. **Snowflake-Arctic-Embed-L Embedding Model**: + - **Reason for Selection**: A smaller, efficient model providing a good balance between speed and accuracy for embedding text chunks in RAG systems. One of the highly ranked model as per MTEB. + +6. **Context Enrichment and Contextual Compression**: + - **Reason for Selection**: Enhances the retrieval process by providing more targeted, concise, and context-rich answers. + +7. **Tracing**: Used LangSmith for tracing which natively works with other frameworks and helps understand the issues we have in the RAG chain. + +8. **RAGAS**: Used to evaluate our RAG system with different configurations. This improve the performance of the RAG application. + +This stack was designed with a focus on balancing performance, scalability, and precision to build an effective Retrieval-Augmented Generation (RAG) application. + +Each tool in this stack was chosen to ensure **speed**, **scalability**, and **accuracy** while dealing with structured and unstructured documents. By balancing performance with precision (e.g., fast document loading via PyMuPDFLoader, efficient chunking strategies, and a small but powerful embedding model), this stack provides a robust framework for building ethical and useful AI applications. + +> Refer additional notes in ![Task2](/ai-safety-chatty/Tasks/Task%202/Task2.md) + +## Task 3 - Creating a Golden Test DataSet + +***Deliverable 1*** Assess your pipeline using the RAGAS framework including key metrics faithfulness, answer relevancy, context precision, and context recall. Provide a table of your output results. + +![Result](/ai-safety-chatty/Tasks/Task%203/task3-del1.png) +![Condensd Result](/ai-safety-chatty/Tasks/Task%203/task3-del11.png) + +***Deliverable 2*** What conclusions can you draw about performance and effectiveness of your pipeline with this information? + +Some observations from the results + +- **Faithfulness**: Mostly high faithfulness scores, indicating that the generated answers are generally true to the source material. But, there are some low score (e.g., 0.233333) which shows that the model may occasionally provide unfaithful or incomplete answers. + +- **Answer Relevancy**: The model seems to perform well in answer relevancy, with most scores being near 1. This suggests that even when faithfulness is low, the answers provided are still on-topic and relevant to the user's question. + +- **Context Recall & Precision**: There are several instances where **context recall** is 0.0, indicating that the context was not helpful in answering the question. However, when context recall is high, **context precision** is often perfect (1.0), showing that when the context is relevant, it is precise and accurate. + +- **Answer Correctness**: This metric shows a range of results. Although many answers are correct, a few are only partially correct, suggesting room for improvement in the correctness of generated answers. + +The pipeline performs well in generating relevant answers, but some improvements can be made to enhance the faithfulness and correctness of those answers. + +The **context recall** metric has room for improvement. There are several cases where relevant context is missing or inadequate, which can impact the overall effectiveness of the pipeline. + +## Task 4 - Generate synthetic fine-tuning data and complete fine-tuning of the open-source embedding model + +***Deliverable 1*** Swap out your existing embedding model for the new fine-tuned version. Provide a link to your fine-tuned embedding model on the Hugging Face Hub. + +HF Model Link for finetuned model: https://huggingface.co/jeevanions/finetuned_arctic-embedd-l + +***Deliverable 2*** How did you choose the embedding model for this application? + +The embedding model `snowflake-arctic-embed-l` is ranked 27 in the MTEB ranking which is has embedding dimension of 1024 and 334 Million parameter. Irrespective of its small size, it is a seriously contenting with top players. It is also easier download and train the model with less GPU resource constraint. Low cost but efficient. + +## Task 5 - Assessing Performance + +### 1. **Test the fine-tuned embedding model using the RAGAS frameworks to quantify any improvements. Provide results in a table.** + +Based on the comparison of the fine-tuned model against the baseline model using the RAGAS framework, here are key metrics evaluated: + +| Metric | Fine-Tuned Model | Baseline Model | Improvement | +|---------------------|------------------|----------------|-------------| +| Faithfulness | 0.5826 | 0.5011 | +8.15% | +| Answer Relevancy | 0.9422 | 0.8765 | +6.57% | +| Context Recall | 0.2716 | 0.2283 | +4.33% | +| Context Precision | 0.4460 | 0.3907 | +5.53% | +| Answer Correctness | 0.6179 | 0.5541 | +6.38% | + +![Comaprison](/ai-safety-chatty/Tasks/Task%205/Task5-ComparisonBaseFineTuned.png) +![Improvement](/ai-safety-chatty/Tasks/Task%205/Task5-ComparisonBaseFineTunedImprovemant.png) + +### 2. **Test the two chunking strategies using the RAGAS frameworks to quantify any improvements. Provide results in a table.** + +For chunking strategies (Recursive Character vs. Semantic Chunker), the following metrics were tested: + +| Metric | Recursive Chunking | Semantic Chunking | Improvement | +|---------------------|--------------------|-------------------|-------------| +| Faithfulness | 0.5901 | 0.5826 | -0.75% | +| Answer Relevancy | 0.9500 | 0.9422 | -0.78% | +| Context Recall | 0.3000 | 0.2716 | -2.84% | +| Context Precision | 0.4590 | 0.4460 | -1.30% | +| Answer Correctness | 0.6220 | 0.6179 | -0.41% | + +![Comparison](/ai-safety-chatty/Tasks/Task%205/Task5-SemanticVsRecurstive.png) + +> I expected Semantic chunker to perform better and noticed that the retrived document contains less words in the sentences. Also there were some duplicate documents fetched. I would need to spend more time why this has happend. For now with less complexity the RecursiveCharacterText splitter works well and on top implementing advanced RAG techqnique does improve performance. + +### 3. **Which one is the best to test with internal stakeholders next week, and why?** + +The fine-tuned embedding model paired with **Recursive Character Chunking** is the optimal choice for testing with internal stakeholders. This combination has shown slight improvements in key metrics like **context recall** and **context precision** over semantic chunking. Moreover, the fine-tuned model demonstrates enhanced **faithfulness** and **answer relevancy**, making it more reliable for enterprise-level queries, especially in handling dense documents like the AI Bill of Rights and NIST RMF. While the difference is not dramatic, the Recursive Character Chunking ensures better handling of varied document structures, making it the best candidate for real-world testing. + +> For details reports and Notebooks used refer the folder ![Task5 workout](/ai-safety-chatty/Tasks/Task%205/) + +## Task 6 - Managing Your Boss and User Expectations + +### 1. **What is the story that you will give to the CEO to tell the whole company at the launch next month?** + +The story for the CEO should emphasize how the company's investment in AI technology is yielding tangible results and preparing the organization for the future of AI governance. + +**CEO Narrative:** + +"Over the past months, our dedicated team of engineers and AI specialists have worked on a groundbreaking Retrieval-Augmented Generation (RAG) application. Weā€™ve tested the system rigorously across 50 internal stakeholders, collecting feedback and refining the performance metrics based on cutting-edge frameworks like RAGAS. This initiative places us at the forefront of ethical AI adoption, ensuring our systems align with upcoming regulatory frameworks such as the 2024 NIST AI Risk Management Framework and the AI Bill of Rights. + +Weā€™ve created a scalable and adaptable AI system that not only answers complex questions related to AI safety but also ensures that our internal teams have the tools to address customer concerns, especially in today's rapidly changing AI landscape. By next month, weā€™ll be ready to roll out the solution across the enterprise, showing our commitment to innovation, security, and accountability." + + +### 2. **How might you incorporate relevant white-house briefing information into future versions?** + +In future versions of the RAG application, it is critical to keep it aligned with national regulations like the executive order on Safe, Secure, and Trustworthy AI. Here's how: + +- **Compliance & Transparency:** Ensure that future updates integrate guidelines from the White Houseā€™s executive orders on AI governance. This can be achieved by: + - **Embedding legal updates** into the knowledge base of the RAG system. This will allow the system to reflect the latest compliance requirements and help internal stakeholders understand how AI aligns with regulations. + - Implement an **alert system** within the RAG tool to notify users whenever regulatory changes impact AI practices. + +- **Periodic Review:** Set a quarterly update schedule where AI regulations, such as the 270-day update mentioned, are reviewed, and the RAG application is updated accordingly. This ensures that the company stays proactive regarding AI safety and governance. + +At the moment these guidelines are loaded into the system offline and in the future we will be rolling out features that keep the system constantly updated on any new regislation. + +This approach will position the company not only as an industry leader but also as a responsible organization that prioritizes safe and ethical AI use. \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..60f47646ac9558a3c88dd50c6f38ce4c379b5a22 --- /dev/null +++ b/app.py @@ -0,0 +1,249 @@ +import os +from typing import List +import uuid +import chainlit as cl +from chainlit.types import AskFileResponse +from langchain.memory import ConversationBufferMemory +from langchain_core.chat_history import BaseChatMessageHistory +from langchain_community.chat_message_histories import ChatMessageHistory +from langchain_community.document_loaders import PyMuPDFLoader, TextLoader +from langchain.prompts import MessagesPlaceholder +from langchain.prompts import ChatPromptTemplate +from langchain.chains.history_aware_retriever import create_history_aware_retriever +from langchain.chains.retrieval import create_retrieval_chain +from langchain.chains.combine_documents import create_stuff_documents_chain +from langchain_experimental.text_splitter import SemanticChunker +from langchain_qdrant import QdrantVectorStore +from langchain_core.documents import Document +from qdrant_client import QdrantClient +from qdrant_client.http.models import Distance, VectorParams +from langchain_openai import ChatOpenAI +from langchain_core.runnables.history import RunnableWithMessageHistory +# from chainlit.input_widget import Select, Switch, Slider +from dotenv import load_dotenv +from langchain_huggingface import HuggingFaceEmbeddings +from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain.retrievers.document_compressors import LLMChainExtractor + + +load_dotenv() + +BOR_FILE_PATH = "https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf" +NIST_FILE_PATH = "https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.600-1.pdf" +SMALL_DOC = "https://arxiv.org/pdf/1908.10084" # 11 pages Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks +documents_to_preload = [ + BOR_FILE_PATH, + NIST_FILE_PATH + # SMALL_DOC +] +collection_name = "ai-safety" + +welcome_message = """ +Welcome to the chatbot to clarify all your AI Safety related queries.: +Now preloading below documents: + 1. Blueprint for an AI Bill of Rights + 2. NIST AI Standards +Please wait for a moment to load the documents. +""" +chat_model_name = "gpt-4o" +embedding_model_name = "Snowflake/snowflake-arctic-embed-l" +chat_model = ChatOpenAI(model=chat_model_name, temperature=0) + +async def connect_to_qdrant(): + embedding_model = HuggingFaceEmbeddings(model_name=embedding_model_name) + qdrant_url = os.environ["QDRANT_URL"] + qdrant_api_key = os.environ["QDRANT_API_KEY"] + collection_name = os.environ["COLLECTION_NAME"] + qdrant_client = QdrantClient(url=qdrant_url,api_key=qdrant_api_key) + vector_store = QdrantVectorStore( + client=qdrant_client, + collection_name=collection_name, + embedding=embedding_model, + ) + return vector_store.as_retriever(search_type="similarity_score_threshold",search_kwargs={'k':10,'score_threshold': 0.8}) + +async def get_contextual_compressed_retriever(retriver): + + base_retriever = retriver + compressor_llm = ChatOpenAI(temperature=0, model_name="gpt-4o", max_tokens=4000) + compressor = LLMChainExtractor.from_llm(compressor_llm) + + #Combine the retriever with the compressor + compression_retriever = ContextualCompressionRetriever( + base_compressor=compressor, + base_retriever=base_retriever + ) + return compression_retriever + + +def initialize_vectorstore( + collection_name: str, + embedding_model, + dimension, + distance_metric: Distance = Distance.COSINE, +): + client = QdrantClient(":memory:") + client.create_collection( + collection_name=collection_name, + vectors_config=VectorParams(size=dimension, distance=distance_metric), + ) + + vector_store = QdrantVectorStore( + client=client, + collection_name=collection_name, + embedding=embedding_model, + ) + return vector_store + +def get_text_splitter(strategy, embedding_model): + if strategy == "semantic": + return SemanticChunker( + embedding_model, + buffer_size=3, + breakpoint_threshold_type="percentile", + breakpoint_threshold_amount=90, + ) + +def process_file(file: AskFileResponse, text_splitter): + if file.type == "text/plain": + Loader = TextLoader + elif file.type == "application/pdf": + Loader = PyMuPDFLoader + + loader = Loader(file.path) + documents = loader.load() + title = documents[0].metadata.get("title") + docs = text_splitter.split_documents(documents) + for i, doc in enumerate(docs): + doc.metadata["source"] = f"source_{i}" + doc.metadata["title"] = title + return docs + +def populate_vectorstore(vector_store, docs: List[Document]): + vector_store.add_documents(docs) + return vector_store + +def create_history_aware_retriever_self(chat_model, retriever): + contextualize_q_system_prompt = ( + "Given a chat history and the latest user question which might reference context in the chat history, " + "formulate a standalone question which can be understood without the chat history. Do NOT answer the question, " + "just reformulate it if needed and otherwise return it as is." + ) + contextualize_q_prompt = ChatPromptTemplate.from_messages( + [ + ("system", contextualize_q_system_prompt), + MessagesPlaceholder("chat_history"), + ("human", "{input}"), + ] + ) + return create_history_aware_retriever(chat_model, retriever, contextualize_q_prompt) + +def create_qa_chain(chat_model): + qa_system_prompt = ( + "You are an helpful assistant named 'Shield' and your task is to answer any questions related to AI Safety for the given context." + "Use the following pieces of retrieved context to answer the question." + # "If any questions asked outside AI Safety context, just say that you are a specialist in AI Safety and can't answer that." + # f"When introducing you, just say that you are an AI assistant powered by embedding model {embedding_model_name} and chat model {chat_model_name} and your knowledge is limited to 'Blueprint for an AI Bill of Rights' and 'NIST AI Standards' documents." + "If you don't know the answer, just say that you don't know.\n\n" + "{context}" + ) + qa_prompt = ChatPromptTemplate.from_messages( + [ + ("system", qa_system_prompt), + MessagesPlaceholder("chat_history"), + ("human", "{input}"), + ] + ) + return create_stuff_documents_chain(chat_model, qa_prompt) + + +def create_rag_chain(chat_model, retriever): + history_aware_retriever = create_history_aware_retriever_self(chat_model, retriever) + question_answer_chain = create_qa_chain(chat_model) + return create_retrieval_chain(history_aware_retriever, question_answer_chain) + + +def create_session_id(): + session_id = str(uuid.uuid4()) + return session_id + + +@cl.on_chat_start +async def start(): + msg = cl.Message(content=welcome_message) + await msg.send() + + # Create a session id + session_id = create_session_id() + cl.user_session.set("session_id", session_id) + + retriever = await connect_to_qdrant() + contextual_compressed_retriever = await get_contextual_compressed_retriever(retriever) + + rag_chain = create_rag_chain(chat_model, contextual_compressed_retriever) + + store = {} + + def get_session_history(session_id: str) -> BaseChatMessageHistory: + if session_id not in store: + store[session_id] = ChatMessageHistory() + return store[session_id] + + conversational_rag_chain = RunnableWithMessageHistory( + rag_chain, + get_session_history, + input_messages_key="input", + history_messages_key="chat_history", + output_messages_key="answer", + ) + + # Let the user know that the system is ready + msg.content = msg.content + "\nReady to answer your questions!" + await msg.update() + + cl.user_session.set("conversational_rag_chain", conversational_rag_chain) + + +@cl.on_message +async def main(message: cl.Message): + session_id = cl.user_session.get("session_id") + conversational_rag_chain = cl.user_session.get("conversational_rag_chain") + + response = await conversational_rag_chain.ainvoke( + {"input": message.content}, + config={"configurable": {"session_id": session_id}, + "callbacks":[cl.AsyncLangchainCallbackHandler()]}, + ) + answer = response["answer"] + + source_documents = response["context"] + text_elements = [] + unique_pages = set() + + if source_documents: + + for source_idx, source_doc in enumerate(source_documents): + source_name = f"source_{source_idx+1}" + page_number = source_doc.metadata['page'] + #page_number = source_doc.metadata.get('page', "NA") # NA or any default value + page = f"Page {page_number}" + text_element_content = source_doc.page_content + text_element_content = text_element_content if text_element_content != "" else "No Content" + #text_elements.append(cl.Text(content=text_element_content, name=source_name)) + if page not in unique_pages: + unique_pages.add(page) + text_elements.append(cl.Text(content=text_element_content, name=page)) + #text_elements.append(cl.Text(content=text_element_content, name=page)) + source_names = [text_el.name for text_el in text_elements] + + if source_names: + answer += f"\n\n Sources:{', '.join(source_names)}" + else: + answer += "\n\n No sources found" + + await cl.Message(content=answer, elements=text_elements).send() + +if __name__ == "__main__": + from chainlit.cli import run_chainlit + + run_chainlit(__file__) diff --git a/chainlit.md b/chainlit.md new file mode 100644 index 0000000000000000000000000000000000000000..4046caf7446c40a7c040317b5e0d057f871fa749 --- /dev/null +++ b/chainlit.md @@ -0,0 +1,3 @@ +# Welcome to Chat with Your Text File + +With this application, you can chat with an uploaded text file that is smaller than 2MB! diff --git a/embedding_model.py b/embedding_model.py new file mode 100644 index 0000000000000000000000000000000000000000..af41b7898b24f131a0497e5647f9a66c00d3168a --- /dev/null +++ b/embedding_model.py @@ -0,0 +1,58 @@ + +import tiktoken +import os +from langchain_openai import OpenAIEmbeddings +from langchain_community.embeddings import HuggingFaceBgeEmbeddings +import torch +from transformers import AutoModel, AutoTokenizer +from transformers import AutoModel, AutoTokenizer +from langchain_huggingface import HuggingFaceEmbeddings + +# def get_embeddings_model_bge_base_en_v1_5(): +# model_name = "BAAI/bge-base-en-v1.5" +# model_kwargs = {'device': 'cpu'} +# encode_kwargs = {'normalize_embeddings': False} +# embedding_model = HuggingFaceBgeEmbeddings( +# model_name=model_name, +# model_kwargs=model_kwargs, +# encode_kwargs=encode_kwargs +# ) +# return embedding_model + +# def get_embeddings_model_bge_en_icl(): +# model_name = "BAAI/bge-en-icl" +# model_kwargs = {'device': 'cpu'} +# encode_kwargs = {'normalize_embeddings': False} +# embedding_model = HuggingFaceBgeEmbeddings( +# model_name=model_name, +# model_kwargs=model_kwargs, +# encode_kwargs=encode_kwargs +# ) +# return embedding_model , 4096 + +# def get_embeddings_model_bge_large_en(): +# model_name = "BAAI/bge-large-en" +# model_kwargs = {'device': 'cpu'} +# encode_kwargs = {'normalize_embeddings': False} +# embedding_model = HuggingFaceBgeEmbeddings( +# model_name=model_name, +# model_kwargs=model_kwargs, +# encode_kwargs=encode_kwargs +# ) +# return embedding_model + +def get_embeddings_openai_text_3_large(): + embedding_model = OpenAIEmbeddings(model="text-embedding-3-large") + dimension = 3072 + return embedding_model,dimension + +# def get_embeddings_snowflake_arctic_embed_l(): +# current_dir = os.path.dirname(os.path.realpath(__file__)) +# model_name = "Snowflake/snowflake-arctic-embed-l" +# tokenizer = AutoTokenizer.from_pretrained(f"{current_dir}/cache/tokenizer/{model_name}") +# model = AutoModel.from_pretrained(f"{current_dir}/cache/model/{model_name}") +# return model,1024 + +def get_embeddings_snowflake_arctic_embed_l(): + embedding_model = HuggingFaceEmbeddings(model_name="Snowflake/snowflake-arctic-embed-l") + return embedding_model,1024 diff --git a/output.txt b/output.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d4943c5e668cf1a56cb57cb75f2aa506cb564af --- /dev/null +++ b/output.txt @@ -0,0 +1,39 @@ +'Companies can ensure that AI does not violate data privacy laws by conducting appropriate diligence on the +training data used, assessing intellectual property and privacy risks, and ensuring that the use of proprietary or +sensitive data is consistent with applicable laws. This includes implementing processes for monitoring AI-generated content for +privacy risks, addressing any potential instances of personally identifiable information (PII) or sensitive data exposure, and +utilizing privacy-enhancing technologies such as anonymization and differential privacy. Periodic monitoring and documentation of +the mapping of AI technology to legal risks are also essential to mitigate potential infringements.' + + +'Companies can ensure that AI does not violate data privacy laws by implementing a series of strategies and techniques as outlined in the provided documents:\n\n1. **Leveraging Privacy Output Filters**: Implementing privacy output filters can help prevent the AI from generating outputs that include sensitive or personally identifiable information (PII).\n\n2. **Removing PII**: Actively removing any PII from data used in AI applications can prevent potential harm or misuse.\n\n3. **Providing Options to Withdraw Consent**: Providing human subjects with options to withdraw their participation or revoke their consent for the use of their data, both present and future, ensures that individuals maintain control over their personal information.\n\n4. **Using Privacy-Enhancing Technologies**: Employing techniques such as anonymization and differential privacy can help minimize the risks associated with linking AI-generated data to individuals.\n\n5. **Conducting Risk Assessments and Audits**: Regularly performing risk assessments and implementing auditing mechanisms can help identify and mitigate potential privacy risks associated with AI systems.\n\n6. **Ongoing Monitoring and Documentation**: Establishing dashboards for ongoing monitoring and maintaining thorough documentation procedures specific to model assessments can help ensure compliance with data privacy laws over time.\n\nBy adopting these measures, companies can better manage the privacy risks associated with AI and ensure they stay compliant with relevant data privacy laws.' + +"To ensure AI does not violate data privacy laws, companies can adopt several strategies and techniques as highlighted in the provided context:\n\n1. **Leveraging Privacy Output Filters**: Companies can implement privacy output filters to ensure that any personally identifiable information (PII) is removed, thereby preventing potential harm or misuse.\n\n2. **Providing Options to Withdraw Consent**: It's crucial to give human subjects options to withdraw their participation or revoke their consent for the present or future use of their data in AI applications.\n\n3. **Using Privacy-Enhancing Technologies**: Techniques such as anonymization and differential privacy can be employed to minimize the risks associated with linking AI-generated data to individuals.\n\n4. **Conducting Privacy Risk Assessments**: Regularly examining and documenting the privacy risks associated with AI systems helps in identifying potential areas of concern and addressing them proactively.\n\n5. **Implementing Innovative Risk Mitigation Solutions**: These solutions can include risk assessments, auditing mechanisms, assessment of organizational procedures, and the use of dashboards for ongoing monitoring. Such strategies help mitigate risks to safety, efficacy, and compliance with legal responsibilities.\n\nBy integrating these practices, companies can better ensure that their AI systems comply with data privacy laws and protect individual privacy rights." + +Context: +'human subjects; Leveraging privacy output filters; Removing any personally \r\nidentifiable information (PII) to prevent potential harm or misuse.\r\nData Privacy; Human AI \r\nConfiguration; Information \r\nIntegrity; Information Security; \r\nDangerous, Violent, or Hateful \r\nContent\r\nMS-2.2-003 Provide human subjects with options to withdraw participation or revoke their \r\nconsent for present or future use of their data in GAI applications. \r\nData Privacy; Human-AI \r\nConfiguration; Information \r\nIntegrity\r\nMS-2.2-004\r\nUse techniques such as anonymization, differential privacy or other privacy\x02enhancing technologies to minimize the risks associated with linking AI-generated + +Data quality; Model architecture (e.g., convolutional neural network, \r\ntransformers, etc.); Optimization objectives; Training algorithms; RLHF \r\napproaches; Fine-tuning or retrieval-augmented generation approaches; \r\nEvaluation data; Ethical considerations; Legal and regulatory requirements.\r\nInformation Integrity; Harmful Bias \r\nand Homogenization\r\nAI Actor Tasks: AI Deployment, AI Impact Assessment, Domain Experts, End-Users, Operation and Monitoring, TEVV\r\nMEASURE 2.10: Privacy risk of the AI system ā€“ as identified in the MAP function ā€“ is examined and documented.\r\nAction ID Suggested Action GAI Risks\r\nMS-2.10-001' + +From large companies to start-ups, industry is providing innovative solutions that allow \r\norganizations to mitigate risks to the safety and efficacy of AI systems, both before \r\ndeployment and through monitoring over time.17 These innovative solutions include risk \r\nassessments, auditing mechanisms, assessment of organizational procedures, dashboards to allow for ongoing \r\nmonitoring, documentation procedures specific to model assessments, and many other strategies that aim to \r\nmitigate risks posed by the use of AI to companiesā€™ reputation, legal responsibilities, and other product safety \r\nand effectiveness concerns. + +Applying The Blueprint for an AI Bill of Rights\r\nSENSITIVE DATA: Data and metadata are sensitive if they pertain to an individual in a sensitive domain \r\n(defined below); are generated by technologies used in a sensitive domain; can be used to infer data from a \r\nsensitive domain or sensitive data about an individual (such as disability-related data, genomic data, biometric \r\ndata, behavioral data, geolocation data, data related to interaction with the criminal justice system, relationship \r\nhistory and legal status such as custody and divorce information, and home, work, or school environmental')]} + + + + +'Companies can take several actions to ensure that their AI systems do not violate data privacy laws. According to the provided context, these include:\n\n1. **Conduct Periodic Monitoring**: Implement periodic monitoring of AI-generated content for privacy risks and address any possible instances of personally identifiable information (PII) or sensitive data exposure. (MP-4.1-001)\n\n2. **Implement Response Processes**: Establish processes for responding to potential intellectual property infringement claims or other rights, which may also encompass privacy violations. (MP-4.1-002)\n\n3. **Connect Policies to Governance**: Link new AI policies, procedures, and processes to existing model, data, software development, and IT governance, as well as to legal, compliance, and risk management activities. (MP-4.1-003)\n\n4. **Document Training Data Curation Policies**: Ensure that training data curation policies are well-documented according to applicable laws and policies, which helps in managing privacy risks. (MP-4.1-004)\n\n5. **Establish Data Policies**: Formulate policies for the collection, retention, and minimum quality of data, considering risks such as the leak of personally identifiable information, among others. (MP-4.1-005)\n\n6. **Define Use of Third-Party Data**: Implement policies and practices that define how third-party intellectual property and training data will be used, stored, and protected. (MP-4.1-006)\n\n7. **Re-evaluate Models**: Continuously re-evaluate models that were fine-tuned or enhanced on top of third-party models to ensure they comply with privacy standards. (MP-4.1-007)\n\n8. **AI Red-Teaming**: Conduct AI red-teaming to assess issues such as outputting training data samples, reverse engineering, and membership inference risks, which can reveal personal or sensitive information. (MS-2.10-001)\n\n9. **Engage with End-Users and Stakeholders**: Directly engage with end-users and other stakeholders to understand their expectations and concerns regarding content provenance and privacy. (MS-2.10-002)\n\nBy following these suggested actions, companies can better manage and mitigate the risks associated with data privacy in their AI systems.' + +'Companies can ensure that AI does not violate data privacy laws by implementing a variety of practices and measures, as suggested in the provided context:\n\n1. **Periodic Monitoring**:\n - Conduct periodic monitoring of AI-generated content for privacy risks and address any possible instances of personally identifiable information (PII) or sensitive data exposure.\n\n2. **Training Data Curation**:\n - Document training data curation policies according to applicable laws and policies to ensure transparency and compliance.\n\n3. **Policies for Data Collection and Retention**:\n - Establish and document policies for the collection, retention, and minimum quality of data. These policies should consider risks such as the disclosure of inappropriate information, the use of illegal or dangerous content, and the leak of PII.\n\n4. **Data Privacy Best Practices**:\n - Follow privacy and security best practices designed to ensure that data and metadata do not leak beyond the specific consented use case. This can include using privacy-enhancing technologies like cryptography, fine-grained permissions, and access control mechanisms.\n\n5. **Risk Identification and Mitigation**:\n - Proactively identify and manage harms related to data privacy. This includes determining not to process data when the privacy risks outweigh the benefits, or implementing measures to mitigate acceptable risks.\n\n6. **Compliance with Existing Regulations**:\n - Ensure that the AI systems comply with existing data privacy regulations and standards, such as the General Data Protection Regulation (GDPR) or the California Consumer Privacy Act (CCPA).\n\n7. **User Awareness**:\n - Increase transparency by disclosing specific data sources on which models were trained, thus limiting the risk of unintentional exposure of PII.\n\nBy adopting these approaches, companies can better manage the risks associated with data privacy in AI systems and ensure compliance with relevant laws and regulations.' + + +'To ensure AI does not violate data privacy laws, companies can adopt several strategies and practices as suggested in the provided documents:\n\n1. **Conduct Periodic Monitoring**:\n - Regularly monitor AI-generated content for privacy risks and address any possible instances of personally identifiable information (PII) or sensitive data exposure. ([Document: NIST.AI.600-1.pdf, page 29, Action ID MP-4.1-001])\n\n2. **Implement Robust Data Policies**:\n - Establish policies for the collection, retention, and minimum quality of data, considering various risks including the disclosure of inappropriate information and the leak of PII. ([Document: NIST.AI.600-1.pdf, page 29, Action ID MP-4.1-005])\n\n3. **Document Training Data Curation**:\n - Document training data curation policies according to applicable laws and policies, to ensure transparency and accountability. ([Document: NIST.AI.600-1.pdf, page 29, Action ID MP-4.1-004])\n\n4. **Engage Stakeholders**:\n - Engage directly with end-users and other stakeholders to understand their expectations and concerns regarding content provenance and data privacy. ([Document: NIST.AI.600-1.pdf, page 38, Action ID MS-2.10-002])\n\n5. **Privacy by Design and Default**:\n - Design and build automated systems with privacy protected by default. This includes assessing privacy risks throughout the development lifecycle and implementing appropriate technical and policy mitigation measures. ([Document: Blueprint for an AI Bill of Rights, page 32])\n\n6. **Follow Privacy and Security Best Practices**:\n - Follow privacy and security best practices such as privacy-enhancing cryptography, fine-grained permissions, access control mechanisms, and conventional system security protocols to ensure data and metadata do not leak beyond the specific consented use case. ([Document: Blueprint for an AI Bill of Rights, page 32])\n\n7. **Risk Identification and Mitigation**:\n - Proactively identify potential harms and manage them to avoid, mitigate, and respond appropriately to identified risks. This may include determining not to process data when privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks. ([Document: Blueprint for an AI Bill of Rights, page 32])\n\n8. **AI Red-Teaming**:\n - Conduct AI red-teaming to assess issues such as outputting of training data samples, reverse engineering, model extraction, membership inference risks, and revealing sensitive information. ([Document: NIST.AI.600-1.pdf, page 38, Action ID MS-2.10-001])\n\nBy implementing these strategies, companies can better ensure that their AI systems comply with data privacy laws and protect user data effectively.' + +"Companies can ensure AI does not violate data privacy laws by implementing several best practices and strategies, as highlighted in the context provided:\n\n1. **Periodic Monitoring**:\n - Conduct regular monitoring of AI-generated content for privacy risks and address any instances where personally identifiable information (PII) or sensitive data might be exposed.\n\n2. **Integrate Privacy into Governance**:\n - Connect new AI policies, procedures, and processes to existing governance structures such as model, data, software development, IT governance, legal, compliance, and risk management activities.\n\n3. **Data Curation Policies**:\n - Document and adhere to training data curation policies according to applicable laws and ensure these policies are transparent and well-documented.\n\n4. **Data Collection and Retention Policies**:\n - Establish policies for the collection, retention, and minimum quality of data. This includes mitigating risks associated with the disclosure of inappropriate information, use of illegal or dangerous content, offensive capabilities, and training data imbalances that could lead to harmful biases.\n\n5. **Privacy by Design and by Default**:\n - Ensure AI systems are designed with privacy protected by default. Assess privacy risks throughout the development life cycle and implement appropriate technical and policy measures to mitigate these risks.\n\n6. **Privacy-Enhancing Technologies**:\n - Utilize privacy-enhancing cryptography, fine-grained permissions, access control mechanisms, and conventional security protocols to protect data.\n\n7. **Consent and Transparency**:\n - Ensure transparency in data usage and acquire informed consent from individuals whose data is being used. This includes being clear about the data collection, usage, sharing, and storage practices.\n\n8. **Ethical Review**:\n - Conduct ethical reviews, especially for sensitive data, to assess the implications of data usage and to ensure that decisions based on such data do not limit rights, opportunities, or access unfairly. This includes periodic reviews by independent ethics committees or similar bodies.\n\n9. **Compliance with Legal and Ethical Standards**:\n - Ensure all AI activities comply with applicable data privacy laws and standards, such as those outlined in the Fair Information Practice Principles (FIPPs) and other relevant regulations.\n\n10. **User Engagement**:\n - Engage with end-users and other stakeholders to understand their expectations and concerns regarding data privacy and content provenance.\n\nBy integrating these practices into their AI development and deployment processes, companies can better ensure that their AI systems comply with data privacy laws and protect individuals' privacy rights." + + +'Companies can ensure that AI does not violate data privacy laws by implementing several strategies and practices as mentioned in the provided context:\n\n1. +**Periodic Monitoring**: Conduct regular monitoring of AI-generated content to identify and address any potential instances of personally identifiable information (PII) or sensitive data exposure. \n\n2. +**Integration with Existing Policies**: Connect new AI policies, procedures, and processes with existing model, data, software development, and IT governance, as well as legal, compliance, and risk management activities.\n\n3. +**Training Data Curation Policies**: Document training data curation policies in accordance with applicable laws and policies. This includes policies for the collection, retention, and minimum quality of data to mitigate risks such as the disclosure of inappropriate information, use of illegal or dangerous content, offensive cyber capabilities, data imbalances leading to harmful biases, and leaks of PII.\n\n4. **Diligence on Training Data**: Conduct appropriate diligence on the use of training data to assess intellectual property and privacy risks, ensuring that the use of proprietary or sensitive data is consistent with applicable laws.\n\n5. +**User Experience Research**: Conduct user experience research to confirm that individuals understand what data is being collected about them and how it will be used, ensuring that this collection matches their expectations and desires.\n\n6. **Scope Limits on Data Collection**: Limit data collection to specific, narrow goals to avoid "mission creep." Anticipated data collection should be strictly necessary for the identified goals and minimized as much as possible.\n\n7. **Risk Identification and Mitigation**: Proactively identify and manage privacy risks to avoid, mitigate, and respond appropriately to identified risks. This includes determining not to process data when privacy risks outweigh the benefits or implementing measures to mitigate acceptable risks.\n\n8. **Privacy-Preserving Security**: Follow privacy and security best practices to ensure that data and metadata do not leak beyond the specific consented use case. This can include using privacy-enhancing cryptography, privacy-enhancing technologies, fine-grained permissions, and access control mechanisms.\n\n9. **Consent and Privacy by Design**: Seek user permission and respect user decisions regarding data collection, use, access, transfer, and deletion to the greatest extent possible. Implement privacy by design safeguards where consent is not feasible, ensuring that systems do not employ user experience and design decisions that obfuscate user choice or burden users with privacy-invasive defaults.\n\n10. **Enhanced Protections for Sensitive Data**: Implement enhanced protections and restrictions for data and inferences related to sensitive domains such as health, work, education, criminal justice, and finance. Ensure that data pertaining to youth is protected, and any use in sensitive domains is subject to ethical review and use prohibitions.\n\n11. **Surveillance and Monitoring**: Ensure that surveillance technologies are subject to heightened oversight, including pre-deployment assessment of potential harms and scope limits to protect privacy and civil liberties. Avoid continuous surveillance and monitoring in contexts where it could limit rights, opportunities, or access.\n\nBy adopting these measures, companies can better ensure that their AI systems comply with data privacy laws and protect the privacy of individuals.' \ No newline at end of file diff --git a/pdfloader.py b/pdfloader.py new file mode 100644 index 0000000000000000000000000000000000000000..d676a2364fa4dac0162874de17360e33daafcb38 --- /dev/null +++ b/pdfloader.py @@ -0,0 +1,27 @@ +from enum import Enum +from typing import List +from langchain_community.document_loaders import PyMuPDFLoader +from langchain_core.documents import Document +import asyncio + +class PDFLoaderWrapper(): + class LoaderType(str, Enum): + PYMUPDF = "pymupdf" + + def __init__(self, file_path: str | List[str] , loader_type: LoaderType = LoaderType.PYMUPDF): + self.file_path = file_path if isinstance(file_path, list) else [file_path] + self.loader_type = loader_type + + async def aload(self) -> List[Document]: + all_docs = [] + for file_path in self.file_path: + if self.loader_type == self.LoaderType.PYMUPDF: + try: + loader = PyMuPDFLoader(file_path) + docs = await loader.aload() + all_docs.extend(docs) + except Exception as e: + print(f"Error loading file {file_path}: {e}") + continue + return all_docs + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca52471400c431a407fe35948117953971e1ff31 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +numpy==1.26.4 +chainlit==0.7.700 # 1.1.402 +openai==1.44.1 +qdrant-client==1.11.2 +langchain==0.3.0 +langchain-community==0.3.0 +langchain-text-splitters==0.3.0 +langchain_experimental +langchain_qdrant +langchain_openai +pypdf==4.3.1 +PyMuPDF==1.24.10 +pymupdf4llm +sentence_transformers +langchain_huggingface +ragas +langsmith \ No newline at end of file